From 174f42a0da42f0113266d33f3e1681125ea9f78e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 16:37:57 +0000 Subject: [PATCH 1/7] chore(schema): update (#3631) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 260 +++---- schema_source/cloudformation-docs.json | 852 ++++++++++++++++++----- schema_source/cloudformation.schema.json | 260 +++---- 3 files changed, 936 insertions(+), 436 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index c96762e32..648fb27b8 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -26184,7 +26184,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -33254,7 +33254,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33818,7 +33818,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33932,7 +33932,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -35488,7 +35488,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36322,12 +36322,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36362,11 +36362,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36835,7 +36835,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36867,7 +36867,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36925,7 +36925,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37048,22 +37048,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37071,7 +37071,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37087,12 +37087,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39305,7 +39305,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -39628,7 +39628,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -44531,7 +44531,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44614,7 +44614,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", "title": "SecretToken", "type": "string" } @@ -64650,7 +64650,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64732,7 +64732,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65379,7 +65379,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65728,7 +65728,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -69829,7 +69829,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71198,7 +71198,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -77684,7 +77684,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77907,7 +77907,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78232,7 +78232,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -82813,7 +82813,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82894,37 +82894,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82932,7 +82932,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82968,7 +82968,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -90375,12 +90375,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90418,7 +90418,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90431,7 +90431,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90472,12 +90472,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90653,7 +90653,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90668,7 +90668,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90779,7 +90779,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90922,22 +90922,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90947,7 +90947,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90965,7 +90965,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90990,7 +90990,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -91013,7 +91013,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -91021,7 +91021,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91036,7 +91036,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91090,7 +91090,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91110,7 +91110,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91123,12 +91123,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91247,7 +91247,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91471,7 +91471,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91532,7 +91532,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91553,7 +91553,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -95084,7 +95084,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95171,7 +95171,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95181,7 +95181,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95329,7 +95329,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95384,7 +95384,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95703,7 +95703,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95924,7 +95924,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -103381,7 +103381,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103627,7 +103627,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104220,7 +104220,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -114633,7 +114633,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -129708,7 +129708,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -130243,7 +130243,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -140506,7 +140506,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -153456,7 +153456,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -167377,7 +167377,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -182807,7 +182807,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224627,7 +224627,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224810,7 +224810,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224868,7 +224868,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224952,7 +224952,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225058,17 +225058,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225081,7 +225081,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225178,7 +225178,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225223,7 +225223,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225261,12 +225261,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225281,7 +225281,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225392,7 +225392,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225402,7 +225402,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225432,7 +225432,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225518,7 +225518,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225634,7 +225634,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225703,12 +225703,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225716,7 +225716,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225802,7 +225802,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225880,7 +225880,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225895,7 +225895,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225911,12 +225911,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225977,7 +225977,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -226030,12 +226030,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226079,7 +226079,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226214,7 +226214,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226227,7 +226227,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226413,7 +226413,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226429,7 +226429,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226518,12 +226518,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226730,7 +226730,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226816,7 +226816,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226834,7 +226834,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226874,7 +226874,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226905,7 +226905,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -235666,7 +235666,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -242381,7 +242381,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -261095,7 +261095,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index f49fa532d..735064c16 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -254,6 +254,9 @@ "Key": "The key of the tag. Must not begin with `aws:` .", "Value": "The value of the tag." }, + "AWS::ARCZonalShift::AutoshiftObserverNotificationStatus": { + "Status": "" + }, "AWS::ARCZonalShift::ZonalAutoshiftConfiguration": { "PracticeRunConfiguration": "A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, Route 53 ARC shifts traffic for the resource weekly for practice runs.\n\nPractice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.\n\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.", "ResourceIdentifier": "The identifier for the resource that AWS shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.\n\nAt this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.", @@ -3288,17 +3291,17 @@ "TargetLocation": "The target location of the input file." }, "AWS::AppTest::TestCase M2ManagedActionProperties": { - "ForceStop": "Force stops the AWS Mainframe Modernization managed action properties.", - "ImportDataSetLocation": "The import data set location of the AWS Mainframe Modernization managed action properties." + "ForceStop": "Force stops the Mainframe Modernization managed action properties.", + "ImportDataSetLocation": "The import data set location of the Mainframe Modernization managed action properties." }, "AWS::AppTest::TestCase M2ManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization managed application action.", - "Properties": "The properties of the AWS Mainframe Modernization managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization managed application action." + "ActionType": "The action type of the Mainframe Modernization managed application action.", + "Properties": "The properties of the Mainframe Modernization managed application action.", + "Resource": "The resource of the Mainframe Modernization managed application action." }, "AWS::AppTest::TestCase M2NonManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization non-managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization non-managed application action." + "ActionType": "The action type of the Mainframe Modernization non-managed application action.", + "Resource": "The resource of the Mainframe Modernization non-managed application action." }, "AWS::AppTest::TestCase MainframeAction": { "ActionType": "The action type of the mainframe action.", @@ -3320,8 +3323,8 @@ }, "AWS::AppTest::TestCase ResourceAction": { "CloudFormationAction": "The CloudFormation action of the resource action.", - "M2ManagedApplicationAction": "The AWS Mainframe Modernization managed application action of the resource action.", - "M2NonManagedApplicationAction": "The AWS Mainframe Modernization non-managed application action of the resource action." + "M2ManagedApplicationAction": "The Mainframe Modernization managed application action of the resource action.", + "M2NonManagedApplicationAction": "The Mainframe Modernization non-managed application action of the resource action." }, "AWS::AppTest::TestCase Script": { "ScriptLocation": "The script location of the scripts.", @@ -4327,7 +4330,7 @@ }, "AWS::Backup::BackupVault": { "AccessPolicy": "A resource-based policy that is used to manage access permissions on the target backup vault.", - "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "BackupVaultTags": "The tags to assign to the backup vault.", "EncryptionKeyArn": "A server-side encryption key you can specify to encrypt your backups from services that support full AWS Backup management; for example, `arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` . If you specify a key, you must specify its ARN, not its alias. If you do not specify a key, AWS Backup creates a KMS key for you by default.\n\nTo learn which AWS Backup services support full AWS Backup management and how AWS Backup handles encryption for backups from services that do not yet support full AWS Backup , see [Encryption for backups in AWS Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/encryption.html)", "LockConfiguration": "Configuration for [AWS Backup Vault Lock](https://docs.aws.amazon.com/aws-backup/latest/devguide/vault-lock.html) .", @@ -4663,6 +4666,7 @@ "AWS::Batch::JobDefinition NodeRangeProperty": { "Container": "The container details for the node range.", "EcsProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "EksProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, @@ -4899,10 +4903,244 @@ "AWS::Bedrock::DataSource VectorIngestionConfiguration": { "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried." }, + "AWS::Bedrock::Flow": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.", + "Definition": "The definition of the nodes and connections between the nodes in the flow.", + "DefinitionS3Location": "The Amazon S3 location of the flow definition.", + "DefinitionString": "The definition of the flow as a JSON-formatted string. The string must match the format in [FlowDefinition](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-flow-flowdefinition.html) .", + "DefinitionSubstitutions": "A map that specifies the mappings for placeholder variables in the prompt flow definition. This enables the customer to inject values obtained at runtime. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map. Only supported with the `DefinitionString` and `DefinitionS3Location` fields.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", + "Description": "A description of the flow.", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see [Create a service row for flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide.", + "Name": "The name of the flow.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "TestAliasTags": "" + }, + "AWS::Bedrock::Flow ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::Flow FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::Flow FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::Flow FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::Flow FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::Flow FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::Flow FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::Flow FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::Flow FlowNodeConfiguration": { + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node." + }, + "AWS::Bedrock::Flow FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::Flow LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::Flow LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::Flow PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::Flow PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::Flow PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Flow PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Flow PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Flow PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Flow S3Location": { + "Bucket": "The S3 bucket containing the flow definition.", + "Key": "The object key for the S3 location containing the definition.", + "Version": "The Amazon S3 location from which to retrieve data for an S3 retrieve node or to which to store data for an S3 storage node." + }, + "AWS::Bedrock::Flow TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." + }, + "AWS::Bedrock::FlowAlias": { + "Description": "A description of the alias.", + "FlowArn": "The Amazon Resource Name (ARN) of the alias.", + "Name": "The name of the alias.", + "RoutingConfiguration": "A list of configurations about the versions that the alias maps to. Currently, you can only specify one.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::FlowAlias FlowAliasRoutingConfigurationListItem": { + "FlowVersion": "The version that the alias maps to." + }, + "AWS::Bedrock::FlowVersion": { + "Description": "The description of the flow version.", + "FlowArn": "The Amazon Resource Name (ARN) of the flow that the version belongs to." + }, + "AWS::Bedrock::FlowVersion ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::FlowVersion FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::FlowVersion FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::FlowVersion FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::FlowVersion FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::FlowVersion FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::FlowVersion FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::FlowVersion FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::FlowVersion FlowNodeConfiguration": { + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node." + }, + "AWS::Bedrock::FlowVersion FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::FlowVersion LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::FlowVersion LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::FlowVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::FlowVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::FlowVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::FlowVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." + }, "AWS::Bedrock::Guardrail": { "BlockedInputMessaging": "The message to return when the guardrail blocks a prompt.", "BlockedOutputsMessaging": "The message to return when the guardrail blocks a model response.", "ContentPolicyConfig": "The content filter policies to configure for the guardrail.", + "ContextualGroundingPolicyConfig": "", "Description": "A description of the guardrail.", "KmsKeyArn": "The ARN of the AWS KMS key that you use to encrypt the guardrail.", "Name": "The name of the guardrail.", @@ -4919,6 +5157,13 @@ "AWS::Bedrock::Guardrail ContentPolicyConfig": { "FiltersConfig": "Contains the type of the content filter and how strongly it should apply to prompts and model responses." }, + "AWS::Bedrock::Guardrail ContextualGroundingFilterConfig": { + "Threshold": "", + "Type": "" + }, + "AWS::Bedrock::Guardrail ContextualGroundingPolicyConfig": { + "FiltersConfig": "" + }, "AWS::Bedrock::Guardrail ManagedWordsConfig": { "Type": "The managed word type to configure for the guardrail." }, @@ -4968,10 +5213,30 @@ "StorageConfiguration": "Contains details about the storage configuration of the knowledge base.", "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" }, + "AWS::Bedrock::KnowledgeBase BedrockEmbeddingModelConfiguration": { + "Dimensions": "The dimensions details for the vector configuration used on the Bedrock embeddings model." + }, + "AWS::Bedrock::KnowledgeBase EmbeddingModelConfiguration": { + "BedrockEmbeddingModelConfiguration": "The vector configuration details on the Bedrock embeddings model." + }, "AWS::Bedrock::KnowledgeBase KnowledgeBaseConfiguration": { "Type": "The type of data that the data source is converted into for the knowledge base.", "VectorKnowledgeBaseConfiguration": "Contains details about the embeddings model that'sused to convert the data source." }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasConfiguration": { + "CollectionName": "The collection name of the knowledge base in MongoDB Atlas.", + "CredentialsSecretArn": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that contains user credentials for your MongoDB Atlas cluster.", + "DatabaseName": "The database name in your MongoDB Atlas cluster for your knowledge base.", + "Endpoint": "The endpoint URL of your MongoDB Atlas cluster for your knowledge base.", + "EndpointServiceName": "The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "VectorIndexName": "The name of the MongoDB Atlas vector search index." + }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." + }, "AWS::Bedrock::KnowledgeBase OpenSearchServerlessConfiguration": { "CollectionArn": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", @@ -5006,13 +5271,87 @@ "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." }, "AWS::Bedrock::KnowledgeBase StorageConfiguration": { + "MongoDbAtlasConfiguration": "Contains the storage configuration of the knowledge base in MongoDB Atlas.", "OpensearchServerlessConfiguration": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", "PineconeConfiguration": "Contains the storage configuration of the knowledge base in Pinecone.", "RdsConfiguration": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", "Type": "The vector store service in which the knowledge base is stored." }, "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { - "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base." + "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base." + }, + "AWS::Bedrock::Prompt": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.", + "DefaultVariant": "The name of the default variant for the prompt. This value must match the `name` field in the relevant [PromptVariant](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptVariant.html) object.", + "Description": "The description of the prompt.", + "Name": "The name of the prompt.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "Variants": "A list of objects, each containing details about a variant of the prompt." + }, + "AWS::Bedrock::Prompt PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Prompt PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Prompt PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Prompt PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Prompt PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::Prompt TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt.", + "TextS3Location": "The Amazon S3 location of the prompt text." + }, + "AWS::Bedrock::Prompt TextS3Location": { + "Bucket": "The Amazon S3 bucket containing the prompt text.", + "Key": "The object key for the Amazon S3 location.", + "Version": "The version of the Amazon S3 location to use." + }, + "AWS::Bedrock::PromptVersion": { + "Description": "The description of the prompt version.", + "PromptArn": "The Amazon Resource Name (ARN) of the version of the prompt." + }, + "AWS::Bedrock::PromptVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::PromptVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::PromptVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::PromptVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::PromptVersion PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::PromptVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." }, "AWS::BillingConductor::BillingGroup": { "AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", @@ -5448,7 +5787,7 @@ "AWS::CleanRooms::ConfiguredTable": { "AllowedColumns": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", "AnalysisMethod": "The analysis method for the configured table. The only valid value is currently `DIRECT_QUERY`.", - "AnalysisRules": "The entire created analysis rule.", + "AnalysisRules": "The analysis rule that was created for the configured table.", "Description": "A description for the configured table.", "Name": "A name for the configured table.", "TableReference": "The AWS Glue table that this configured table represents.", @@ -5468,6 +5807,7 @@ "Type": "The type of analysis rule." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleAggregation": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.\n\nThe `additionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", "AggregateColumns": "The columns that query runners are allowed to use in aggregation queries.", "AllowedJoinOperators": "Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is `AND` .", "DimensionColumns": "The columns that query runners are allowed to select, group by, or filter by.", @@ -5477,11 +5817,14 @@ "ScalarFunctions": "Set of scalar functions that are allowed to be used on dimension columns and the output of aggregation of metrics." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleCustom": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedAnalyses": "The ARN of the analysis templates that are allowed by the custom analysis rule.", "AllowedAnalysisProviders": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", - "DifferentialPrivacy": "The differential privacy configuration." + "DifferentialPrivacy": "The differential privacy configuration.", + "DisallowedOutputColumns": "A list of columns that aren't allowed to be shown in the query output." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleList": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedJoinOperators": "The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is `AND` .", "JoinColumns": "Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables.", "ListColumns": "Columns that can be listed in the output." @@ -5512,6 +5855,7 @@ "Value": "The value of the tag." }, "AWS::CleanRooms::ConfiguredTableAssociation": { + "ConfiguredTableAssociationAnalysisRules": "An analysis rule for a configured table association. This analysis rule specifies how data from the table can be used within its associated collaboration. In the console, the `ConfiguredTableAssociationAnalysisRule` is referred to as the *collaboration analysis rule* .", "ConfiguredTableIdentifier": "A unique identifier for the configured table to be associated to. Currently accepts a configured table ID.", "Description": "A description of the configured table association.", "MembershipIdentifier": "The unique ID for the membership this configured table association belongs to.", @@ -5519,10 +5863,80 @@ "RoleArn": "The service will assume this role to access catalog metadata and query the table.", "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRule": { + "Policy": "The policy of the configured table association analysis rule.", + "Type": "The type of the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleAggregation": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.\n\nThe `allowedAdditionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleCustom": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleList": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicy": { + "V1": "The policy for the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicyV1": { + "Aggregation": "Analysis rule type that enables only aggregation queries on a configured table.", + "Custom": "Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy.", + "List": "Analysis rule type that enables only list queries on a configured table." + }, "AWS::CleanRooms::ConfiguredTableAssociation Tag": { "Key": "The key of the tag.", "Value": "The value of the tag." }, + "AWS::CleanRooms::IdMappingTable": { + "Description": "The description of the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID mapping table.", + "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key.", + "MembershipIdentifier": "The unique identifier of the membership resource for the ID mapping table.", + "Name": "The name of the ID mapping table.", + "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the referenced resource in AWS Entity Resolution . Valid values are ID mapping workflow ARNs.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID mapping table resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID mapping table resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceProperties": { + "IdMappingTableInputSource": "The input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputSource": { + "IdNamespaceAssociationId": "The unique identifier of the ID namespace association.", + "Type": "The type of the input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, + "AWS::CleanRooms::IdNamespaceAssociation": { + "Description": "The description of the ID namespace association.", + "IdMappingConfig": "The configuration settings for the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID namespace association.", + "MembershipIdentifier": "The unique identifier of the membership that contains the ID namespace association.", + "Name": "The name of this ID namespace association.", + "Tags": "" + }, + "AWS::CleanRooms::IdNamespaceAssociation IdMappingConfig": { + "AllowUseAsDimensionColumn": "An indicator as to whether you can use your column as a dimension column in the ID mapping table ( `TRUE` ) or not ( `FALSE` ).\n\nDefault is `FALSE` ." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the AWS Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID namespace association resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID namespace association resource." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceProperties": { + "IdMappingWorkflowsSupported": "Defines how ID mapping workflows are supported for this ID namespace association.", + "IdNamespaceType": "The ID namespace type for this ID namespace association." + }, + "AWS::CleanRooms::IdNamespaceAssociation Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, "AWS::CleanRooms::Membership": { "CollaborationIdentifier": "The unique ID for the associated collaboration.", "DefaultResultConfiguration": "The default protected query result configuration as specified by the member who can receive results.", @@ -5534,7 +5948,7 @@ "QueryCompute": "The payment responsibilities accepted by the collaboration member for query compute costs." }, "AWS::CleanRooms::Membership MembershipProtectedQueryOutputConfiguration": { - "S3": "Required configuration for a protected query with an `S3` output type." + "S3": "Required configuration for a protected query with an `s3` output type." }, "AWS::CleanRooms::Membership MembershipProtectedQueryResultConfiguration": { "OutputConfiguration": "Configuration for protected query results.", @@ -5555,7 +5969,7 @@ "AWS::CleanRooms::PrivacyBudgetTemplate": { "AutoRefresh": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", "MembershipIdentifier": "The identifier for a membership resource.", - "Parameters": "Specifies the epislon and noise parameters for the privacy budget template.", + "Parameters": "Specifies the epsilon and noise parameters for the privacy budget template.", "PrivacyBudgetType": "Specifies the type of the privacy budget template.", "Tags": "" }, @@ -5742,7 +6156,7 @@ "RetainStacksOnAccountRemoval": "If set to `true` , stack resources are retained when an account is removed from a target organization or OU. If set to `false` , stack resources are deleted. Specify only if `Enabled` is set to `True` ." }, "AWS::CloudFormation::StackSet DeploymentTargets": { - "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "Accounts": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", "AccountsUrl": "Returns the value of the `AccountsUrl` property.", "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" @@ -5850,16 +6264,16 @@ "Value": "The request header value." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleHeaderPolicyConfig": { - "Header": "", - "Value": "" + "Header": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", + "Value": "Specifies the value to assign to the header for a single header policy." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightConfig": { "SessionStickinessConfig": "Session stickiness provides the ability to define multiple requests from a single viewer as a single session. This prevents the potentially inconsistent experience of sending some of a given user's requests to your staging distribution, while others are sent to your primary distribution. Define the session duration using TTL values.", "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightPolicyConfig": { - "SessionStickinessConfig": "", - "Weight": "" + "SessionStickinessConfig": "Enable session stickiness for the associated origin or cache settings.", + "Weight": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings." }, "AWS::CloudFront::ContinuousDeploymentPolicy TrafficConfig": { "SingleHeaderConfig": "Determines which HTTP requests are sent to the staging distribution.", @@ -5933,12 +6347,12 @@ }, "AWS::CloudFront::Distribution DistributionConfig": { "Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", - "CNAMEs": "", + "CNAMEs": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "CacheBehaviors": "A complex type that contains zero or more `CacheBehavior` elements.", "Comment": "A comment to describe the distribution. The comment cannot be longer than 128 characters.", "ContinuousDeploymentPolicyId": "The identifier of a continuous deployment policy. For more information, see `CreateContinuousDeploymentPolicy` .", "CustomErrorResponses": "A complex type that controls the following:\n\n- Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n- How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n\nFor more information about custom error pages, see [Customizing Error Responses](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) in the *Amazon CloudFront Developer Guide* .", - "CustomOrigin": "", + "CustomOrigin": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "DefaultCacheBehavior": "A complex type that describes the default cache behavior if you don't specify a `CacheBehavior` element or if files don't match any of the values of `PathPattern` in `CacheBehavior` elements. You must create exactly one default cache behavior.", "DefaultRootObject": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "Enabled": "From this field, you can enable or disable the selected distribution.", @@ -5949,7 +6363,7 @@ "Origins": "A complex type that contains information about origins for this distribution.\n\nSpecify a value for either the `Origins` or `OriginGroups` property.", "PriceClass": "The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify `PriceClass_All` , CloudFront responds to requests for your objects from all CloudFront edge locations.\n\nIf you specify a price class other than `PriceClass_All` , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n\nFor more information about price classes, see [Choosing the Price Class for a CloudFront Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) in the *Amazon CloudFront Developer Guide* . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see [Amazon CloudFront Pricing](https://docs.aws.amazon.com/cloudfront/pricing/) .", "Restrictions": "A complex type that identifies ways in which you want to restrict distribution of your content.", - "S3Origin": "", + "S3Origin": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "Staging": "A Boolean that indicates whether this is a staging distribution. When this value is `true` , this is a staging distribution. When this value is `false` , this is not a staging distribution.", "ViewerCertificate": "A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.", "WebACLId": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) ." @@ -5974,15 +6388,15 @@ "LambdaFunctionARN": "The ARN of the Lambda@Edge function. You must specify the ARN of a function version; you can't specify an alias or $LATEST." }, "AWS::CloudFront::Distribution LegacyCustomOrigin": { - "DNSName": "", - "HTTPPort": "", - "HTTPSPort": "", - "OriginProtocolPolicy": "", - "OriginSSLProtocols": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "HTTPPort": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", + "HTTPSPort": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", + "OriginProtocolPolicy": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", + "OriginSSLProtocols": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution LegacyS3Origin": { - "DNSName": "", - "OriginAccessIdentity": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "OriginAccessIdentity": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead." }, "AWS::CloudFront::Distribution Logging": { "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", @@ -6302,7 +6716,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -6344,7 +6758,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -7153,7 +7567,7 @@ "Name": "The name of a pipeline-level variable." }, "AWS::CodePipeline::Webhook": { - "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "AuthenticationConfiguration": "Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the `SecretToken` property must be set. For IP, only the `AllowedIPRange` property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.", "Filters": "A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.", "Name": "The name of the webhook.", @@ -7164,7 +7578,7 @@ }, "AWS::CodePipeline::Webhook WebhookAuthConfiguration": { "AllowedIPRange": "The property used to configure acceptance of webhooks in an IP address range. For IP, only the `AllowedIPRange` property must be set. This property must be set to a valid CIDR range.", - "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities." + "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response." }, "AWS::CodePipeline::Webhook WebhookFilterRule": { "JsonPath": "A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the `MatchEquals` field. Otherwise, the request is ignored. For more information, see [Java JsonPath implementation](https://docs.aws.amazon.com/https://github.com/json-path/JsonPath) in GitHub.", @@ -10299,7 +10713,7 @@ }, "AWS::Deadline::Farm": { "Description": "A description of the farm that helps identify what the farm is used for.", - "DisplayName": "The display name of the farm.", + "DisplayName": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "KmsKeyArn": "The ARN for the KMS key.", "Tags": "The tags to add to your farm. Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings." }, @@ -10310,7 +10724,7 @@ "AWS::Deadline::Fleet": { "Configuration": "The configuration details for the fleet.", "Description": "A description that helps identify what the fleet is used for.", - "DisplayName": "The display name of the fleet summary to update.", + "DisplayName": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "MaxWorkerCount": "The maximum number of workers specified in the fleet.", "MinWorkerCount": "The minimum number of workers in the fleet.", @@ -10408,7 +10822,7 @@ "ProductId": "The product ID." }, "AWS::Deadline::Monitor": { - "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.", + "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "IdentityCenterInstanceArn": "The Amazon Resource Name (ARN) of the IAM Identity Center instance responsible for authenticating monitor users.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role for the monitor. Users of the monitor use this role to access Deadline Cloud resources.", "Subdomain": "The subdomain used for the monitor URL. The full URL of the monitor is subdomain.Region.deadlinecloud.amazonaws.com." @@ -10417,7 +10831,7 @@ "AllowedStorageProfileIds": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", "DefaultBudgetAction": "The default action taken on a queue summary if a budget wasn't configured.", "Description": "A description of the queue that helps identify what the queue is used for.", - "DisplayName": "The display name of the queue summary to update.", + "DisplayName": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "JobAttachmentSettings": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", "JobRunAsUser": "Identifies the user for a job.", @@ -10459,7 +10873,7 @@ "QueueId": "The queue ID." }, "AWS::Deadline::StorageProfile": { - "DisplayName": "The display name of the storage profile summary to update.", + "DisplayName": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The unique identifier of the farm that contains the storage profile.", "FileSystemLocations": "Operating system specific file system path to the storage location.", "OsFamily": "The operating system (OS) family." @@ -11148,7 +11562,7 @@ "Placement": "The location where the instance launched, if applicable.", "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf the On-Demand `AllocationStrategy` is set to `prioritized` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacity-optimized-prioritized` , EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SubnetId": "The IDs of the subnets in which to launch the instances. Separate multiple subnet IDs using commas (for example, `subnet-1234abcdeexample1, subnet-0987cdef6example2` ). A request of type `instant` can have only one subnet ID.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::EC2Fleet FleetLaunchTemplateSpecificationRequest": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -11342,7 +11756,7 @@ "AwsService": "Limits which service in AWS that the pool can be used in. \"ec2\", for example, allows users to use space for Elastic IP addresses and VPCs.", "Description": "The description of the IPAM pool.", "IpamScopeId": "The ID of the scope in which you would like to create the IPAM pool.", - "Locale": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "Locale": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "ProvisionedCidrs": "Information about the CIDRs provisioned to an IPAM pool.", "PublicIpSource": "The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is `BYOIP` . For more information, see [Create IPv6 pools](https://docs.aws.amazon.com//vpc/latest/ipam/intro-create-ipv6-pools.html) in the *Amazon VPC IPAM User Guide* . By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see [Quotas for your IPAM](https://docs.aws.amazon.com//vpc/latest/ipam/quotas-ipam.html) in the *Amazon VPC IPAM User Guide* .", "PubliclyAdvertisable": "Determines if a pool is publicly advertisable. This option is not available for pools with AddressFamily set to `ipv4` .", @@ -12387,7 +12801,7 @@ "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf `OnDemandAllocationStrategy` is set to `prioritized` , Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacityOptimizedPrioritized` , Spot Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SpotPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n\n> If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.", "SubnetId": "The ID of the subnet in which to launch the instances.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet LoadBalancersConfig": { "ClassicLoadBalancersConfig": "The Classic Load Balancers.", @@ -12435,7 +12849,7 @@ "SubnetId": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "TagSpecifications": "The tags to apply during creation.", "UserData": "The base64-encoded user data that instances use when starting up. User data is limited to 16 KB.", - "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet SpotFleetMonitoring": { "Enabled": "Enables monitoring for the instance.\n\nDefault: `false`" @@ -12500,7 +12914,7 @@ "AvailabilityZone": "The Availability Zone of the subnet.\n\nIf you update this property, you must also update the `CidrBlock` property.", "AvailabilityZoneId": "The AZ ID of the subnet.", "CidrBlock": "The IPv4 CIDR block assigned to the subnet.\n\nIf you update this property, we create a new subnet, and then delete the existing one.", - "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "EnableLniAtDeviceIndex": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", "Ipv4IpamPoolId": "An IPv4 IPAM pool ID for the subnet.", "Ipv4NetmaskLength": "An IPv4 netmask length for the subnet.", @@ -12798,6 +13212,7 @@ }, "AWS::EC2::VPNConnection": { "CustomerGatewayId": "The ID of the customer gateway at your end of the VPN connection.", + "EnableAcceleration": "", "StaticRoutesOnly": "Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.\n\nIf you are creating a VPN connection for a device that does not support Border Gateway Protocol (BGP), you must specify `true` .", "Tags": "Any tags assigned to the VPN connection.", "TransitGatewayId": "The ID of the transit gateway associated with the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", @@ -13031,7 +13446,7 @@ "Tags": "An array of key-value pairs to apply to this resource." }, "AWS::ECR::Repository EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::Repository ImageScanningConfiguration": { @@ -13046,17 +13461,18 @@ "Value": "A `value` acts as a descriptor within a tag category (key)." }, "AWS::ECR::RepositoryCreationTemplate": { - "AppliedFor": "", - "Description": "", - "EncryptionConfiguration": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", - "ImageTagMutability": "", - "LifecyclePolicy": "", - "Prefix": "", - "RepositoryPolicy": "", - "ResourceTags": "The tags attached to the resource." + "AppliedFor": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", + "CustomRoleArn": "The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.", + "Description": "The description associated with the repository creation template.", + "EncryptionConfiguration": "The encryption configuration associated with the repository creation template.", + "ImageTagMutability": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", + "LifecyclePolicy": "The lifecycle policy to use for repositories created using the template.", + "Prefix": "The repository namespace prefix associated with the repository creation template.", + "RepositoryPolicy": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", + "ResourceTags": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." }, "AWS::ECR::RepositoryCreationTemplate EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::RepositoryCreationTemplate Tag": { @@ -13661,6 +14077,7 @@ "ResourcesVpcConfig": "The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see [Amazon EKS Service IAM Role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) in the **Amazon EKS User Guide** .", "Tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags don't propagate to any other resources associated with the cluster.\n\n> You must have the `eks:TagResource` and `eks:UntagResource` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", + "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the EKS User Guide.](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available." }, "AWS::EKS::Cluster AccessConfig": { @@ -13707,6 +14124,9 @@ "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::EKS::Cluster UpgradePolicy": { + "SupportType": "" + }, "AWS::EKS::FargateProfile": { "ClusterName": "The name of your cluster.", "FargateProfileName": "The name of the Fargate profile.", @@ -14221,6 +14641,7 @@ "AutoStopConfiguration": "The configuration for an application to automatically stop after a certain amount of time being idle.", "ImageConfiguration": "The image configuration applied to all worker types.", "InitialCapacity": "The initial capacity of the application.", + "InteractiveConfiguration": "The interactive configuration object that enables the interactive use cases for an application.", "MaximumCapacity": "The maximum capacity of the application. This is cumulative across all workers at any given point in time during the lifespan of the application is created. No new resources will be created once any one of the defined limits is hit.", "MonitoringConfiguration": "A configuration specification to be used when provisioning an application. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.", "Name": "The name of the application.", @@ -14261,6 +14682,10 @@ "Key": "", "Value": "" }, + "AWS::EMRServerless::Application InteractiveConfiguration": { + "LivyEndpointEnabled": "Enables an Apache Livy endpoint that you can connect to and run interactive jobs.", + "StudioEnabled": "Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook." + }, "AWS::EMRServerless::Application LogTypeMapKeyValuePair": { "Key": "", "Value": "" @@ -14302,25 +14727,25 @@ }, "AWS::ElastiCache::CacheCluster": { "AZMode": "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.\n\nThis parameter is only supported for Memcached clusters.\n\nIf the `AZMode` and `PreferredAvailabilityZones` are not specified, ElastiCache assumes `single-az` mode.", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has `cluster-enabled='yes'` when creating a cluster.", "CacheSecurityGroupNames": "A list of security group names to associate with this cluster.\n\nUse this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).", "CacheSubnetGroupName": "The name of the subnet group to be used for the cluster.\n\nUse this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see `[AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .`", "ClusterName": "A name for the cache cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the cache cluster. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nThe name must contain 1 to 50 alphanumeric characters or hyphens. The name must start with a letter and cannot end with a hyphen or contain two consecutive hyphens.", "Engine": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", "EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", - "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheNodes": "The number of cache nodes that the cache cluster should have.\n\n> However, if the `PreferredAvailabilityZone` and `PreferredAvailabilityZones` properties were not previously specified and you don't specify any new values, an update requires [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "Port": "The port number on which each of the cache nodes accepts connections.", "PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.\n\nAll nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use `PreferredAvailabilityZones` .\n\nDefault: System chosen Availability Zone.", "PreferredAvailabilityZones": "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.\n\nThis option is only supported on Memcached.\n\n> If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheNodes` . \n\nIf you want all the nodes in the same Availability Zone, use `PreferredAvailabilityZone` instead, or repeat the Availability Zone multiple times in the list.\n\nDefault: System chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", - "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", - "SnapshotName": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotName": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot taken today is retained for 5 days before being deleted.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nDefault: 0 (i.e., automatic backups are disabled for this cache cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "Tags": "A list of tags to be added to this resource.", @@ -14348,10 +14773,10 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::GlobalReplicationGroup": { - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "CacheNodeType": "The cache node type of the Global datastore", "CacheParameterGroupName": "The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore.", - "EngineVersion": "The Elasticache Redis engine version.", + "EngineVersion": "The Elasticache Redis OSS engine version.", "GlobalNodeGroupCount": "The number of node groups that comprise the Global Datastore.", "GlobalReplicationGroupDescription": "The optional description of the Global datastore", "GlobalReplicationGroupIdSuffix": "The suffix name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.", @@ -14369,7 +14794,7 @@ "ReshardingConfigurations": "A list of PreferredAvailabilityZones objects that specifies the configuration of a node group in the resharded cluster." }, "AWS::ElastiCache::GlobalReplicationGroup ReshardingConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PreferredAvailabilityZones": "A list of preferred availability zones for the nodes in this cluster." }, "AWS::ElastiCache::ParameterGroup": { @@ -14383,28 +14808,28 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::ReplicationGroup": { - "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", - "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", - "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "CacheSecurityGroupNames": "A list of cache security group names to associate with this replication group.", "CacheSubnetGroupName": "The name of the cache subnet group to be used for the replication group.\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see [AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .", - "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "GlobalReplicationGroupId": "The name of the Global datastore", - "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "KmsKeyId": "The ID of the KMS key used to encrypt the disk on the cluster.", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", "MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", - "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheClusters": "The number of clusters this replication group initially has.\n\nThis parameter is not used if there is more than one node group (shard). You should use `ReplicasPerNodeGroup` instead.\n\nIf `AutomaticFailoverEnabled` is `true` , the value of this parameter must be at least 2. If `AutomaticFailoverEnabled` is `false` you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.\n\nThe maximum permitted value for `NumCacheClusters` is 6 (1 primary plus 5 replicas).", - "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "Port": "The port number on which each member of the replication group accepts connections.", "PreferredCacheClusterAZs": "A list of EC2 Availability Zones in which the replication group's clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.\n\nThis parameter is not used if there is more than one node group (shard). You should use `NodeGroupConfiguration` instead.\n\n> If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheClusters` . \n\nDefault: system chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", @@ -14413,14 +14838,14 @@ "ReplicationGroupDescription": "A user-created description for the replication group.", "ReplicationGroupId": "The replication group identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- A name must contain from 1 to 40 alphanumeric characters or hyphens.\n- The first character must be a letter.\n- A name cannot end with a hyphen or contain two consecutive hyphens.", "SecurityGroupIds": "One or more Amazon VPC security groups associated with this replication group.\n\nUse this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).", - "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "SnapshotName": "The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to `restoring` while the new replication group is being created.", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot that was taken today is retained for 5 days before being deleted.\n\nDefault: 0 (i.e., automatic backups are disabled for this cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.", - "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "Tags": "A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key= `myKey` , Value= `myKeyValue` . You can include multiple tags as shown following: Key= `myKey` , Value= `myKeyValue` Key= `mySecondKey` , Value= `mySecondKeyValue` . Tags on replication groups will be replicated to all nodes.", - "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", - "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "UserGroupIds": "The ID of user group to associate with the replication group." }, "AWS::ElastiCache::ReplicationGroup CloudWatchLogsDestinationDetails": { @@ -14440,7 +14865,7 @@ "LogType": "Valid value is either `slow-log` , which refers to [slow-log](https://docs.aws.amazon.com/https://redis.io/commands/slowlog) or `engine-log` ." }, "AWS::ElastiCache::ReplicationGroup NodeGroupConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PrimaryAvailabilityZone": "The Availability Zone where the primary node of this node group (shard) is launched.", "ReplicaAvailabilityZones": "A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of `ReplicaCount` or `ReplicasPerNodeGroup` if not specified.", "ReplicaCount": "The number of read replica nodes in this node group (shard).", @@ -14465,7 +14890,7 @@ }, "AWS::ElastiCache::ServerlessCache": { "CacheUsageLimits": "The cache usage limit for the serverless cache.", - "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "Description": "A description of the serverless cache.", "Endpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "Engine": "The engine the serverless cache is compatible with.", @@ -14476,10 +14901,10 @@ "SecurityGroupIds": "The IDs of the EC2 security groups associated with the serverless cache.", "ServerlessCacheName": "The unique identifier of the serverless cache.", "SnapshotArnsToRestore": "The ARN of the snapshot from which to restore data into the new cache.", - "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "SubnetIds": "If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC.", "Tags": "A list of tags to be added to this resource.", - "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL." + "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL." }, "AWS::ElastiCache::ServerlessCache CacheUsageLimits": { "DataStorage": "The maximum data storage limit in the cache, expressed in Gigabytes.", @@ -15048,7 +15473,7 @@ }, "AWS::EntityResolution::IdMappingWorkflow": { "Description": "A description of the workflow.", - "IdMappingTechniques": "An object which defines the `idMappingType` and the `providerProperties` .", + "IdMappingTechniques": "An object which defines the ID mapping technique and any additional configurations.", "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "OutputSourceConfig": "A list of `IdMappingWorkflowOutputSource` objects, each of which contains fields `OutputS3Path` and `Output` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.", @@ -15056,21 +15481,20 @@ "WorkflowName": "The name of the workflow. There can't be multiple `IdMappingWorkflows` with the same name." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModel": "", - "RuleDefinitionType": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of the `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModel": "The type of matching record that is allowed to be used in an ID mapping workflow.\n\nIf the value is set to `ONE_SOURCE_TO_ONE_TARGET` , only one record in the source can be matched to the same record in the target.\n\nIf the value is set to `MANY_SOURCE_TO_ONE_TARGET` , multiple records in the source can be matched to one record in the target.", + "RuleDefinitionType": "The set of rules you can use in an ID mapping workflow. The limitations specified for the source or target to define the match rules must be compatible.", + "Rules": "The rules that can be used for ID mapping." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingTechniques": { "IdMappingType": "The type of ID mapping.", - "NormalizationVersion": "", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowOutputSource": { "KMSArn": "Customer AWS KMS ARN for encryption at rest. If not provided, system will use an AWS Entity Resolution managed KMS key.", @@ -15085,8 +15509,8 @@ "ProviderServiceArn": "The ARN of the provider service." }, "AWS::EntityResolution::IdMappingWorkflow Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdMappingWorkflow Tag": { "Key": "The key of the tag.", @@ -15099,15 +15523,15 @@ "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", "Tags": "The tags used to organize, track, or control access for this resource.", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdNamespace IdNamespaceIdMappingWorkflowProperties": { "IdMappingType": "The type of ID mapping.", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdNamespace IdNamespaceInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaName": "The name of the schema." }, "AWS::EntityResolution::IdNamespace NamespaceProviderProperties": { @@ -15115,14 +15539,14 @@ "ProviderServiceArn": "The Amazon Resource Name (ARN) of the provider service." }, "AWS::EntityResolution::IdNamespace NamespaceRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModels": "", - "RuleDefinitionTypes": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModels": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RuleDefinitionTypes": "The sets of rules you can use in an ID mapping workflow. The limitations specified for the source and target must be compatible.", + "Rules": "The rules for the ID namespace." }, "AWS::EntityResolution::IdNamespace Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdNamespace Tag": { "Key": "The key of the tag.", @@ -15170,8 +15594,8 @@ "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::MatchingWorkflow RuleBasedProperties": { - "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", - "MatchPurpose": "", + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "MatchPurpose": "An indicator of whether to generate IDs and index the data or not.\n\nIf you choose `IDENTIFIER_GENERATION` , the process generates IDs and indexes the data.\n\nIf you choose `INDEXING` , the process indexes the data without generating IDs.", "Rules": "A list of `Rule` objects, each of which have fields `RuleName` and `MatchingKeys` ." }, "AWS::EntityResolution::MatchingWorkflow Tag": { @@ -15195,8 +15619,8 @@ "AWS::EntityResolution::SchemaMapping SchemaInputAttribute": { "FieldName": "A string containing the field name.", "GroupName": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "Hashed": "", - "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "Hashed": "Indicates if the column values are hashed in the schema input. If the value is set to `TRUE` , the column values are hashed. If the value is set to `FALSE` , the column values are cleartext.", + "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "SubType": "The subtype of the attribute, selected from a list of values.", "Type": "The type of the attribute, selected from a list of values." }, @@ -16363,7 +16787,7 @@ "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", - "Locations": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", @@ -16407,7 +16831,7 @@ "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0." }, "AWS::GameLift::Fleet LocationConfiguration": { - "Location": "An AWS Region code, such as `us-west-2` .", + "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "LocationCapacity": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" }, "AWS::GameLift::Fleet ResourceCreationLimitPolicy": { @@ -16506,7 +16930,7 @@ }, "AWS::GameLift::Location": { "LocationName": "A descriptive name for the custom location.", - "Tags": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." + "Tags": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." }, "AWS::GameLift::Location Tag": { "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", @@ -16582,6 +17006,7 @@ "Tags": "Add tags for a cross-account attachment.\n\nFor more information, see [Tagging in AWS Global Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html) in the *AWS Global Accelerator Developer Guide* ." }, "AWS::GlobalAccelerator::CrossAccountAttachment Resource": { + "Cidr": "An IP address range, in CIDR format, that is specified as resource. The address must be provisioned and advertised in AWS Global Accelerator by following the bring your own IP address (BYOIP) process for Global Accelerator\n\nFor more information, see [Bring your own IP addresses (BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html) in the AWS Global Accelerator Developer Guide.", "EndpointId": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", "Region": "The AWS Region where a shared endpoint resource is located." }, @@ -18025,7 +18450,7 @@ "AWS::IAM::OIDCProvider": { "ClientIdList": "A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", "Tags": "A list of tags that are attached to the specified IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see [Tagging IAM resources](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the *IAM User Guide* .", - "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "Url": "The URL that the IAM OIDC provider resource object is associated with. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) ." }, "AWS::IAM::OIDCProvider Tag": { @@ -20348,7 +20773,7 @@ "AssetModelDescription": "A description for the asset model.", "AssetModelExternalId": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", - "AssetModelName": "A unique, friendly name for the asset model.", + "AssetModelName": "A unique name for the asset model.", "AssetModelProperties": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "AssetModelType": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." @@ -20439,7 +20864,7 @@ }, "AWS::IoTSiteWise::Gateway": { "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", - "GatewayName": "A unique, friendly name for the gateway.", + "GatewayName": "A unique name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, @@ -21904,6 +22329,7 @@ "ElasticsearchDestinationConfiguration": "An Amazon ES destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon ES destination to an Amazon S3 or Amazon Redshift destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "ExtendedS3DestinationConfiguration": "An Amazon S3 destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Extended S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "HttpEndpointDestinationConfiguration": "Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination.", + "IcebergDestinationConfiguration": "Specifies the destination configure settings for Apache Iceberg Table.\n\nAmazon Data Firehose is in preview release and is subject to change.", "KinesisStreamSourceConfiguration": "When a Kinesis stream is used as the source for the delivery stream, a [KinesisStreamSourceConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html) containing the Kinesis stream ARN and the role ARN for the source stream.", "MSKSourceConfiguration": "The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.", "RedshiftDestinationConfiguration": "An Amazon Redshift destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Redshift destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", @@ -21962,6 +22388,9 @@ "IntervalInSeconds": "The length of time, in seconds, that Kinesis Data Firehose buffers incoming data before delivering it to the destination. For valid values, see the `IntervalInSeconds` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* .", "SizeInMBs": "The size of the buffer, in MBs, that Kinesis Data Firehose uses for incoming data before delivering it to the destination. For valid values, see the `SizeInMBs` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* ." }, + "AWS::KinesisFirehose::DeliveryStream CatalogConfiguration": { + "CatalogArn": "Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format `arn:aws:glue:region:account-id:catalog` .\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream CloudWatchLoggingOptions": { "Enabled": "Indicates whether CloudWatch Logs logging is enabled.", "LogGroupName": "The name of the CloudWatch Logs log group that contains the log stream that Kinesis Data Firehose will use.\n\nConditional. If you enable logging, you must specify this property.", @@ -21986,6 +22415,12 @@ "HiveJsonSerDe": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", "OpenXJsonSerDe": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe." }, + "AWS::KinesisFirehose::DeliveryStream DestinationTableConfiguration": { + "DestinationDatabaseName": "The name of the Apache Iceberg database.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "DestinationTableName": "Specifies the name of the Apache Iceberg Table.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "S3ErrorOutputPrefix": "The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "UniqueKeys": "A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream DocumentIdOptions": { "DefaultDocumentIdFormat": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance." }, @@ -22065,6 +22500,17 @@ "CommonAttributes": "Describes the metadata sent to the HTTP endpoint destination.", "ContentEncoding": "Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation." }, + "AWS::KinesisFirehose::DeliveryStream IcebergDestinationConfiguration": { + "BufferingHints": "", + "CatalogConfiguration": "Configuration describing where the destination Apache Iceberg Tables are persisted.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "CloudWatchLoggingOptions": "", + "DestinationTableConfigurationList": "Provides a list of `DestinationTableConfigurations` which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "ProcessingConfiguration": "", + "RetryOptions": "", + "RoleARN": "The Amazon Resource Name (ARN) of the the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "S3Configuration": "", + "s3BackupMode": "Describes how Firehose will backup records. Currently,S3 backup only supports `FailedDataOnly` for preview.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream InputFormatConfiguration": { "Deserializer": "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request." }, @@ -22167,8 +22613,13 @@ "OrcSerDe": "A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see [Apache ORC](https://docs.aws.amazon.com/https://orc.apache.org/docs/) .", "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) ." }, + "AWS::KinesisFirehose::DeliveryStream SnowflakeBufferingHints": { + "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.", + "SizeInMBs": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1." + }, "AWS::KinesisFirehose::DeliveryStream SnowflakeDestinationConfiguration": { "AccountUrl": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "BufferingHints": "Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.", "CloudWatchLoggingOptions": "", "ContentColumnName": "The name of the record content column", "DataLoadingOption": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", @@ -22176,7 +22627,7 @@ "KeyPassphrase": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", "MetaDataColumnName": "The name of the record metadata column", "PrivateKey": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", - "ProcessingConfiguration": "", + "ProcessingConfiguration": "Specifies configuration for Snowflake.", "RetryOptions": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", "RoleARN": "The Amazon Resource Name (ARN) of the Snowflake role", "S3BackupMode": "Choose an S3 backup mode", @@ -24142,7 +24593,7 @@ }, "AWS::MWAA::Environment": { "AirflowConfigurationOptions": "A list of key-value pairs containing the Airflow configuration options for your environment. For example, `core.default_timezone: utc` . To learn more, see [Apache Airflow configuration options](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html) .", - "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "DagS3Path": "The relative path to the DAGs folder on your Amazon S3 bucket. For example, `dags` . To learn more, see [Adding or updating DAGs](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html) .", "EndpointManagement": "Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to `SERVICE` , Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER` , you must create, and manage, the VPC endpoints in your VPC.", "EnvironmentClass": "The environment class type. Valid values: `mw1.small` , `mw1.medium` , `mw1.large` . To learn more, see [Amazon MWAA environment class](https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html) .", @@ -24521,6 +24972,7 @@ "MediaStreamOutputConfigurations": "The definition for each media stream that is associated with the output.", "MinLatency": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency.", "Name": "The name of the output. This value must be unique within the current flow.", + "OutputStatus": "", "Port": "The port to use when MediaConnect distributes content to the output.", "Protocol": "The protocol to use for the output.", "RemoteId": "The identifier that is assigned to the Zixi receiver. This parameter applies only to outputs that use Zixi pull.", @@ -26039,6 +26491,7 @@ "ChannelGroupName": "The name of the channel group associated with the channel configuration.", "ChannelName": "The name of the channel.", "Description": "The description of the channel.", + "InputType": "", "Tags": "The tags associated with the channel." }, "AWS::MediaPackageV2::Channel IngestEndpoint": { @@ -26069,6 +26522,7 @@ "ContainerType": "The container type associated with the origin endpoint configuration.", "DashManifests": "A DASH manifest configuration.", "Description": "The description associated with the origin endpoint.", + "ForceEndpointErrorConfiguration": "", "HlsManifests": "The HLS manfiests associated with the origin endpoint configuration.", "LowLatencyHlsManifests": "The low-latency HLS (LL-HLS) manifests associated with the origin endpoint.", "OriginEndpointName": "The name of the origin endpoint associated with the origin endpoint configuration.", @@ -26113,6 +26567,9 @@ "Start": "Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.", "TimeDelaySeconds": "Optionally specify the time delay for all of your manifest egress requests. Enter a value that is smaller than your endpoint's startover window. When you include time delay, note that you cannot use time delay query parameters for this manifest's endpoint URL." }, + "AWS::MediaPackageV2::OriginEndpoint ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": "" + }, "AWS::MediaPackageV2::OriginEndpoint HlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the HLS manifest configuration.", "FilterConfiguration": "", @@ -26630,7 +27087,7 @@ "LoggingConfiguration": "Defines how AWS Network Firewall performs logging for a `Firewall` ." }, "AWS::NetworkFirewall::LoggingConfiguration LogDestinationConfig": { - "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", "LogType": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs." }, @@ -27140,6 +27597,10 @@ "Key": "The tag key. Tag keys must be unique for the pipeline to which they are attached.", "Value": "The value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key value pair in a tag set of `project : Trinity` and `cost-center : Trinity`" }, + "AWS::OSIS::Pipeline VpcAttachmentOptions": { + "AttachToVpc": "Whether a VPC is attached to the pipeline.", + "CidrBlock": "The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs)." + }, "AWS::OSIS::Pipeline VpcEndpoint": { "VpcEndpointId": "The unique identifier of the endpoint.", "VpcId": "The ID for your VPC. AWS PrivateLink generates this value when you create a VPC.", @@ -27148,6 +27609,7 @@ "AWS::OSIS::Pipeline VpcOptions": { "SecurityGroupIds": "A list of security groups associated with the VPC endpoint.", "SubnetIds": "A list of subnet IDs associated with the VPC endpoint.", + "VpcAttachmentOptions": "Options for attaching a VPC to a pipeline.", "VpcEndpointManagement": "Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline." }, "AWS::Oam::Link": { @@ -28937,7 +29399,7 @@ "SaslScram512Auth": "The ARN of the Secrets Manager secret." }, "AWS::Pipes::Pipe SelfManagedKafkaAccessConfigurationVpc": { - "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "Subnets": "Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets." }, "AWS::Pipes::Pipe SingleMeasureMapping": { @@ -37774,7 +38236,7 @@ "AssociatedRoles": "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "AutoMinorVersionUpgrade": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", - "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37812,7 +38274,7 @@ "Port": "The port number on which the DB instances in the DB cluster accept connections.\n\nDefault:\n\n- When `EngineMode` is `provisioned` , `3306` (for both Aurora MySQL and Aurora PostgreSQL)\n- When `EngineMode` is `serverless` :\n\n- `3306` when `Engine` is `aurora` or `aurora-mysql`\n- `5432` when `Engine` is `aurora-postgresql`\n\n> The `No interruption` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredBackupWindow": "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.*\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\nConstraints: Minimum 30-minute window.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "ReadEndpoint": "This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:\n\n- `CreateDBInstance`\n- `DescribeDBInstances`\n- `DeleteDBInstance`\n\nFor the data structure that represents Amazon Aurora DB cluster endpoints, see `DBClusterEndpoint` .", "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n\nValid for: Aurora DB clusters only", "RestoreToTime": "The date and time to restore the DB cluster to.\n\nValid Values: Value must be a time in Universal Coordinated Time (UTC) format\n\nConstraints:\n\n- Must be before the latest restorable time for the DB instance\n- Must be specified if `UseLatestRestorableTime` parameter isn't provided\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled\n- Can't be specified if the `RestoreType` parameter is `copy-on-write`\n\nThis property must be used with `SourceDBClusterIdentifier` property. The resulting cluster will have the identifier that matches the value of the `DBclusterIdentifier` property.\n\nExample: `2015-03-07T23:45:00Z`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37824,7 +38286,7 @@ "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\n> If you specify both the `StorageEncrypted` and `SnapshotIdentifier` properties without specifying the `KmsKeyId` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", - "Tags": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "Tags": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "UseLatestRestorableTime": "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this DB cluster.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" }, @@ -37838,7 +38300,7 @@ }, "AWS::RDS::DBCluster MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) ." }, "AWS::RDS::DBCluster ReadEndpoint": { "Address": "The host address of the reader endpoint." @@ -37860,11 +38322,11 @@ "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBClusterParameterGroup": { - "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", - "Description": "A friendly description for this DB cluster parameter group.", - "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "Description": "The description for the DB cluster parameter group.", + "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "Parameters": "Provides a list of parameters for the DB cluster parameter group.", - "Tags": "An optional array of key-value pairs to apply to this DB cluster parameter group." + "Tags": "Tags to assign to the DB cluster parameter group." }, "AWS::RDS::DBClusterParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37876,7 +38338,7 @@ "AssociatedRoles": "The AWS Identity and Access Management (IAM) roles associated with the DB instance.\n\n*Amazon Aurora*\n\nNot applicable. The associated roles are managed by the DB cluster.", "AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.", "AutomaticBackupReplicationKmsKeyId": "The AWS KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination AWS Region , for example, `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE` .", - "AutomaticBackupReplicationRegion": "", + "AutomaticBackupReplicationRegion": "The AWS Region associated with the automated backup.", "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\nFor Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nConstraints:\n\n- The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment.\n- The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\nExample: `us-east-1d`", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", @@ -37885,19 +38347,19 @@ "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", "CopyTagsToSnapshot": "Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.", "CustomIAMInstanceProfile": "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance.\n\nThis setting is required for RDS Custom.\n\nConstraints:\n\n- The profile must exist in your account.\n- The profile must have an IAM role that Amazon EC2 has permissions to assume.\n- The instance profile name and the associated IAM role name must start with the prefix `AWSRDSCustom` .\n\nFor the list of permissions required for the IAM role, see [Configure IAM and your VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the *Amazon RDS User Guide* .", - "DBClusterIdentifier": "The identifier of the DB cluster that the instance will belong to.", + "DBClusterIdentifier": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "DBClusterSnapshotIdentifier": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", "DBInstanceClass": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "DBInstanceIdentifier": "A name for the DB instance. If you specify a name, AWS CloudFormation converts it to lowercase. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the DB instance. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "DBName": "The meaning of this parameter differs according to the database engine you use.\n\n> If you specify the `[DBSnapshotIdentifier](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsnapshotidentifier)` property, this property only applies to RDS for Oracle. \n\n*Amazon Aurora*\n\nNot applicable. The database name is managed by the DB cluster.\n\n*Db2*\n\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Can't be a word reserved by the specified database engine.\n\n*MySQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*MariaDB*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*PostgreSQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, the default `postgres` database is created in the DB instance.\n\nConstraints:\n\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Must contain 1 to 63 characters.\n- Can't be a word reserved by the specified database engine\n\n*Oracle*\n\nThe Oracle System ID (SID) of the created DB instance. If you specify `null` , the default value `ORCL` is used. You can't specify the string NULL, or any other reserved word, for `DBName` .\n\nDefault: `ORCL`\n\nConstraints:\n\n- Can't be longer than 8 characters\n\n*SQL Server*\n\nNot applicable. Must be null.", "DBParameterGroupName": "The name of an existing DB parameter group or a reference to an [AWS::RDS::DBParameterGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbparametergroup.html) resource created in the template.\n\nTo list all of the available DB parameter group names, use the following command:\n\n`aws rds describe-db-parameter-groups --query \"DBParameterGroups[].DBParameterGroupName\" --output text`\n\n> If any of the data members of the referenced parameter group are changed during an update, the DB instance might need to be restarted, which causes some interruption. If the parameter group contains static parameters, whether they were changed or not, an update triggers a reboot. \n\nIf you don't specify a value for `DBParameterGroupName` property, the default DB parameter group for the specified engine and engine version is used.", "DBSecurityGroups": "A list of the DB security groups to assign to the DB instance. The list can include both the name of existing DB security groups or references to AWS::RDS::DBSecurityGroup resources created in the template.\n\nIf you set DBSecurityGroups, you must not set VPCSecurityGroups, and vice versa. Also, note that the DBSecurityGroups property exists only for backwards compatibility with older regions and is no longer recommended for providing security information to an RDS DB instance. Instead, use VPCSecurityGroups.\n\n> If you specify this property, AWS CloudFormation sends only the following properties (if specified) to Amazon RDS during create operations:\n> \n> - `AllocatedStorage`\n> - `AutoMinorVersionUpgrade`\n> - `AvailabilityZone`\n> - `BackupRetentionPeriod`\n> - `CharacterSetName`\n> - `DBInstanceClass`\n> - `DBName`\n> - `DBParameterGroupName`\n> - `DBSecurityGroups`\n> - `DBSubnetGroupName`\n> - `Engine`\n> - `EngineVersion`\n> - `Iops`\n> - `LicenseModel`\n> - `MasterUsername`\n> - `MasterUserPassword`\n> - `MultiAZ`\n> - `OptionGroupName`\n> - `PreferredBackupWindow`\n> - `PreferredMaintenanceWindow`\n> \n> All other properties are ignored. Specify a virtual private cloud (VPC) security group if you want to submit other properties, such as `StorageType` , `StorageEncrypted` , or `KmsKeyId` . If you're already using the `DBSecurityGroups` property, you can't use these other properties by updating your DB instance to use a VPC security group. You must recreate the DB instance.", - "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", - "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "DBSystemId": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to `RDSCDB` . The Oracle SID is also the name of your CDB.", "DedicatedLogVolume": "Indicates whether the DB instance has a dedicated log volume (DLV) enabled.", "DeleteAutomatedBackups": "A value that indicates whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted.\n\n*Amazon Aurora*\n\nNot applicable. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the DB cluster are not deleted.", - "DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "DeletionProtection": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "Domain": "The Active Directory directory ID to create the DB instance in. Currently, only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.\n\nFor more information, see [Kerberos Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the *Amazon RDS User Guide* .", "DomainAuthSecretArn": "The ARN for the Secrets Manager secret with the credentials for the user joining the domain.\n\nExample: `arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456`", "DomainDnsIps": "The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers.\n\nConstraints:\n\n- Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list.\n\nExample: `123.124.125.126,234.235.236.237`", @@ -37919,15 +38381,15 @@ "MasterUserSecret": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide.*", "MasterUsername": "The master user name for the DB instance.\n\n> If you specify the `SourceDBInstanceIdentifier` or `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the source DB instance or snapshot.\n> \n> When migrating a self-managed Db2 database, we recommend that you use the same master username as your self-managed Db2 instance name. \n\n*Amazon Aurora*\n\nNot applicable. The name for the master user is managed by the DB cluster.\n\n*RDS for Db2*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MariaDB*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Microsoft SQL Server*\n\nConstraints:\n\n- Must be 1 to 128 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MySQL*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Oracle*\n\nConstraints:\n\n- Must be 1 to 30 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for PostgreSQL*\n\nConstraints:\n\n- Must be 1 to 63 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.", "MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.\n\nFor more information about this setting, including limitations that apply to it, see [Managing capacity automatically with Amazon RDS storage autoscaling](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (Storage is managed by the DB cluster.)\n- RDS Custom", - "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "MonitoringRoleArn": "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting Up and Enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , then you must supply a `MonitoringRoleArn` value.\n\nThis setting doesn't apply to RDS Custom DB instances.", - "MultiAZ": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "MultiAZ": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "NcharCharacterSetName": "The name of the NCHAR character set for the Oracle DB instance.\n\nThis setting doesn't apply to RDS Custom DB instances.", "NetworkType": "The network type of the DB instance.\n\nValid values:\n\n- `IPV4`\n- `DUAL`\n\nThe network type is determined by the `DBSubnetGroup` specified for the DB instance. A `DBSubnetGroup` can support only the IPv4 protocol or the IPv4 and IPv6 protocols ( `DUAL` ).\n\nFor more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon RDS User Guide.*", "OptionGroupName": "Indicates that the DB instance should be associated with the specified option group.\n\nPermanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance once it is associated with a DB instance.", "PerformanceInsightsKMSKeyId": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you do not specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account. Your AWS account has a different default KMS key for each AWS Region.\n\nFor information about enabling Performance Insights, see [EnablePerformanceInsights](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-enableperformanceinsights) .", "PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data.\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS returns an error.", - "Port": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "Port": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, using the `BackupRetentionPeriod` parameter. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) in the *Amazon RDS User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\n*Amazon Aurora*\n\nNot applicable. The daily time range for creating automated backups is managed by the DB cluster.", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Instance Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) in the *Amazon RDS User Guide.*\n\n> This property applies when AWS CloudFormation initially creates the DB instance. If you use AWS CloudFormation to update the DB instance, those updates are applied immediately. \n\nConstraints: Minimum 30-minute window.", "ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.\n\nThis setting doesn't apply to Amazon Aurora or RDS Custom DB instances.", @@ -37943,7 +38405,7 @@ "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` or `SourceDbiResourceId` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SourceDBInstanceAutomatedBackupsArn` property, don't specify this property. The value is inherited from the source DB instance automated backup.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", - "Tags": "An optional array of key-value pairs to apply to this DB instance.", + "Tags": "Tags to assign to the DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/db2-time-zone) and [RDS for SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", @@ -37964,7 +38426,7 @@ }, "AWS::RDS::DBInstance MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) ." }, "AWS::RDS::DBInstance ProcessorFeature": { "Name": "The name of the processor feature. Valid names are `coreCount` and `threadsPerCore` .", @@ -37977,9 +38439,9 @@ "AWS::RDS::DBParameterGroup": { "DBParameterGroupName": "The name of the DB parameter group.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens.\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nIf you don't specify a value for `DBParameterGroupName` property, a name is automatically created for the DB parameter group.\n\n> This value is stored as a lowercase string.", "Description": "Provides the customer-specified description for this DB parameter group.", - "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", - "Parameters": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", - "Tags": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection." + "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "Parameters": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "Tags": "Tags to assign to the DB parameter group." }, "AWS::RDS::DBParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37989,7 +38451,7 @@ "Auth": "The authorization mechanism that the proxy uses.", "DBProxyName": "The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region . An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.", "DebugLogging": "Specifies whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.", - "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "IdleClientTimeout": "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.", "RequireTLS": "Specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in AWS Secrets Manager.", @@ -37998,30 +38460,30 @@ "VpcSubnetIds": "One or more VPC subnet IDs to associate with the new proxy." }, "AWS::RDS::DBProxy AuthFormat": { - "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "ClientPasswordAuthType": "Specifies the details of authentication used by a proxy to log in as a specific database user.", "Description": "A user-specified description about the authentication used by a proxy to log in as a specific database user.", - "IAMAuth": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "IAMAuth": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "SecretArn": "The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager." }, "AWS::RDS::DBProxy TagFormat": { - "Key": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyEndpoint": { "DBProxyEndpointName": "The name of the DB proxy endpoint to create.", "DBProxyName": "The name of the DB proxy associated with the DB proxy endpoint that you create.", "Tags": "An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy.", - "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "VpcSecurityGroupIds": "The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC.", "VpcSubnetIds": "The VPC subnet IDs for the DB proxy endpoint that you create. You can specify a different set of subnet IDs than for the original DB proxy." }, "AWS::RDS::DBProxyEndpoint TagFormat": { - "Key": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "Metadata assigned to a DB instance consisting of a key-value pair." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyTargetGroup": { - "ConnectionPoolConfigurationInfo": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "ConnectionPoolConfigurationInfo": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "DBClusterIdentifiers": "One or more DB cluster identifiers.", "DBInstanceIdentifiers": "One or more DB instance identifiers.", "DBProxyName": "The identifier of the `DBProxy` that is associated with the `DBProxyTargetGroup` .", @@ -38036,9 +38498,9 @@ }, "AWS::RDS::DBSecurityGroup": { "DBSecurityGroupIngress": "Ingress rules to be applied to the DB security group.", - "EC2VpcId": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "EC2VpcId": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "GroupDescription": "Provides the description of the DB security group.", - "Tags": "An optional array of key-value pairs to apply to this DB security group." + "Tags": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* ." }, "AWS::RDS::DBSecurityGroup Ingress": { "CIDRIP": "The IP range to authorize.", @@ -38059,9 +38521,9 @@ }, "AWS::RDS::DBSubnetGroup": { "DBSubnetGroupDescription": "The description for the DB subnet group.", - "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "SubnetIds": "The EC2 Subnet IDs for the DB subnet group.", - "Tags": "An optional array of key-value pairs to apply to this DB subnet group." + "Tags": "Tags to assign to the DB subnet group." }, "AWS::RDS::DBSubnetGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -38071,8 +38533,8 @@ "Enabled": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", "EventCategories": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", - "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", - "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", + "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "SubscriptionName": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", "Tags": "An optional array of key-value pairs to apply to this subscription." }, @@ -38096,7 +38558,7 @@ "IntegrationName": "The name of the integration.", "KMSKeyId": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", "SourceArn": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", - "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "Tags": "An optional array of key-value pairs to apply to this integration.", "TargetArn": "The ARN of the Redshift data warehouse to use as the target for replication." }, "AWS::RDS::Integration Tag": { @@ -38106,18 +38568,18 @@ "AWS::RDS::OptionGroup": { "EngineName": "Specifies the name of the engine that this option group should be associated with.\n\nValid Values:\n\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.", - "OptionConfigurations": "A list of options and the settings for each option.", + "OptionConfigurations": "A list of all available options for an option group.", "OptionGroupDescription": "The description of the option group.", "OptionGroupName": "The name of the option group to be created.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nExample: `myoptiongroup`\n\nIf you don't specify a value for `OptionGroupName` property, a name is automatically created for the option group.\n\n> This value is stored as a lowercase string.", - "Tags": "An optional array of key-value pairs to apply to this option group." + "Tags": "Tags to assign to the option group." }, "AWS::RDS::OptionGroup OptionConfiguration": { - "DBSecurityGroupMemberships": "A list of DBSecurityGroupMembership name strings used for this option.", + "DBSecurityGroupMemberships": "A list of DB security groups used for this option.", "OptionName": "The configuration of options to include in a group.", "OptionSettings": "The option settings to include in an option group.", "OptionVersion": "The version for the option.", "Port": "The optional port for the option.", - "VpcSecurityGroupMemberships": "A list of VpcSecurityGroupMembership name strings used for this option." + "VpcSecurityGroupMemberships": "A list of VPC security group names used for this option." }, "AWS::RDS::OptionGroup OptionSetting": { "Name": "The name of the option that has settings that you can set.", @@ -39281,7 +39743,7 @@ "PublicAccessBlockConfiguration": "Configuration that defines how Amazon S3 handles public access.", "ReplicationConfiguration": "Configuration for replicating objects in an S3 bucket. To enable replication, you must also enable versioning by using the `VersioningConfiguration` property.\n\nAmazon S3 can store replicated objects in a single destination bucket or multiple destination buckets. The destination bucket or buckets must already exist.", "Tags": "An arbitrary set of tags (key-value pairs) for this S3 bucket.", - "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "WebsiteConfiguration": "Information used to configure the bucket as a static website. For more information, see [Hosting Websites on Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) ." }, "AWS::S3::Bucket AbortIncompleteMultipartUpload": { @@ -40514,7 +40976,7 @@ "OperatingSystem": "Defines the operating system the patch baseline applies to. The default value is `WINDOWS` .", "PatchGroups": "The name of the patch group to be registered with the patch baseline.", "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", - "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "Sources": "Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only.", "Tags": "Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to." }, @@ -40827,6 +41289,7 @@ }, "AWS::SageMaker::App ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -41085,11 +41548,13 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain RSessionAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a RSession app.", @@ -41471,6 +41936,20 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::MlflowTrackingServer": { + "ArtifactStoreUri": "", + "AutomaticModelRegistration": "", + "MlflowVersion": "", + "RoleArn": "", + "Tags": "", + "TrackingServerName": "", + "TrackingServerSize": "", + "WeeklyMaintenanceWindowStart": "" + }, + "AWS::SageMaker::MlflowTrackingServer Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::Model": { "Containers": "Specifies the containers in the inference pipeline.", "EnableNetworkIsolation": "Isolates the model container. No inbound or outbound network calls can be made to or from the model container.", @@ -41493,6 +41972,9 @@ "ModelPackageName": "The name or Amazon Resource Name (ARN) of the model package to use to create the model.", "MultiModelConfig": "Specifies additional configuration for multi-model endpoints." }, + "AWS::SageMaker::Model HubAccessConfig": { + "HubContentArn": "" + }, "AWS::SageMaker::Model ImageConfig": { "RepositoryAccessMode": "Set this to one of the following values:\n\n- `Platform` - The model image is hosted in Amazon ECR.\n- `Vpc` - The model image is hosted in a private Docker registry in your VPC.", "RepositoryAuthConfig": "(Optional) Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified `Vpc` as the value for the `RepositoryAccessMode` field, and the private Docker registry where the model image is hosted requires authentication." @@ -41514,6 +41996,7 @@ }, "AWS::SageMaker::Model S3DataSource": { "CompressionType": "", + "HubAccessConfig": "", "ModelAccessConfig": "", "S3DataType": "If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.\n\nIf you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.\n\nIf you choose `AugmentedManifestFile` , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. `AugmentedManifestFile` can only be used if the Channel's input mode is `Pipe` .", "S3Uri": "Depending on the value specified for the `S3DataType` , identifies either a key name prefix or a manifest. For example:\n\n- A key name prefix might look like this: `s3://bucketname/exampleprefix/`\n- A manifest might look like this: `s3://bucketname/example.manifest`\n\nA manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of `S3Uri` . Note that the prefix must be a valid non-empty `S3Uri` that precludes users from specifying a manifest whose individual `S3Uri` is sourced from different S3 buckets.\n\nThe following code example shows a valid manifest format:\n\n`[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},`\n\n`\"relative/path/to/custdata-1\",`\n\n`\"relative/path/custdata-2\",`\n\n`...`\n\n`\"relative/path/custdata-N\"`\n\n`]`\n\nThis JSON is equivalent to the following `S3Uri` list:\n\n`s3://customer_bucket/some/prefix/relative/path/to/custdata-1`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-2`\n\n`...`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-N`\n\nThe complete set of `S3Uri` in this manifest is the input data for the channel for this data source. The object that each `S3Uri` points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.\n\nYour input bucket must be located in same AWS region as your training job." @@ -42409,17 +42892,20 @@ "EbsVolumeSizeInGb": "The size of an EBS storage volume for a space." }, "AWS::SageMaker::Space JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space OwnershipSettings": { "OwnerUserProfileName": "The user profile who is the owner of the space." }, "AWS::SageMaker::Space ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -42449,6 +42935,16 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::StudioLifecycleConfig": { + "StudioLifecycleConfigAppType": "The App type to which the Lifecycle Configuration is attached.", + "StudioLifecycleConfigContent": "", + "StudioLifecycleConfigName": "The name of the Amazon SageMaker Studio Lifecycle Configuration.", + "Tags": "" + }, + "AWS::SageMaker::StudioLifecycleConfig Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::UserProfile": { "DomainId": "The domain ID.", "SingleSignOnUserIdentifier": "A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is \"UserName\". If the Domain's AuthMode is IAM Identity Center , this field is required. If the Domain's AuthMode is not IAM Identity Center , this field cannot be specified.", @@ -42495,11 +42991,13 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile RStudioServerProAppSettings": { "AccessStatus": "Indicates whether the current user has access to the `RStudioServerPro` app.", @@ -42507,6 +43005,7 @@ }, "AWS::SageMaker::UserProfile ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -42870,8 +43369,8 @@ "AdminAccountId": "The AWS account identifier of the account to designate as the Security Hub administrator account." }, "AWS::SecurityHub::FindingAggregator": { - "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Indicates to aggregate findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.", - "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region." + "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Aggregates findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.\n- `NO_REGIONS` - Aggregates no data because no Regions are selected as linked Regions.", + "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region.\n\nAn `InvalidInputException` error results if you populate this field while `RegionLinkingMode` is `NO_REGIONS` ." }, "AWS::SecurityHub::Hub": { "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", @@ -43491,14 +43990,14 @@ "ObjectKey": "The key name of an object in Amazon S3. For more information about Amazon S3 objects and object keys, see [Uploading, downloading, and working with objects in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/uploading-downloading-objects.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::StepFunctions::Activity": { - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the activity.\n\nActivity configuration is immutable, and resource names must be unique. To set customer managed keys for encryption, you must create a *new Activity* . If you attempt to change the configuration in your CFN template for an existing activity, you will receive an `ActivityAlreadyExists` exception.\n\nTo update your activity to include customer managed keys, set a new activity name within your AWS CloudFormation template.", "Name": "The name of the activity.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\nTo enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.", "Tags": "The list of tags to add to a resource.\n\nTags may only contain Unicode letters, digits, white space, or these symbols: `_ . : / = + - @` ." }, "AWS::StepFunctions::Activity EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for an activity." }, "AWS::StepFunctions::Activity TagsEntry": { "Key": "The `key` for a key-value pair in a tag entry.", @@ -43509,7 +44008,7 @@ "DefinitionS3Location": "The name of the S3 bucket where the state machine definition is stored. The state machine definition must be a JSON or YAML file.", "DefinitionString": "The Amazon States Language definition of the state machine. The state machine definition must be in JSON. See [Amazon States Language](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) .", "DefinitionSubstitutions": "A map (string to string) that specifies the mappings for placeholder variables in the state machine definition. This enables the customer to inject values obtained at runtime, for example from intrinsic functions, in the state machine definition. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the state machine.", "LoggingConfiguration": "Defines what execution history events are logged and where they are logged.\n\n> By default, the `level` is set to `OFF` . For more information see [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role to use for this state machine.", "StateMachineName": "The name of the state machine.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", @@ -43521,9 +44020,9 @@ "LogGroupArn": "The ARN of the the CloudWatch log group to which you want your logs emitted to. The ARN must end with `:*`" }, "AWS::StepFunctions::StateMachine EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for a state machine." }, "AWS::StepFunctions::StateMachine LogDestination": { "CloudWatchLogsLogGroup": "An object describing a CloudWatch log group. For more information, see [AWS::Logs::LogGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html) in the AWS CloudFormation User Guide." @@ -43552,7 +44051,7 @@ "RoutingConfiguration": "The routing configuration of an alias. Routing configuration splits [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) requests between one or two versions of the same state machine.\n\nUse `RoutingConfiguration` if you want to explicitly set the alias [weights](https://docs.aws.amazon.com/step-functions/latest/apireference/API_RoutingConfigurationListItem.html#StepFunctions-Type-RoutingConfigurationListItem-weight) . Weight is the percentage of traffic you want to route to a state machine version.\n\n> `RoutingConfiguration` and `DeploymentPreference` are mutually exclusive properties. You must define only one of these properties." }, "AWS::StepFunctions::StateMachineAlias DeploymentPreference": { - "Alarms": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "Alarms": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "Interval": "The time in minutes between each traffic shifting increment.", "Percentage": "The percentage of traffic to shift to the new version in each increment.", "StateMachineVersionArn": "The Amazon Resource Name (ARN) of the [`AWS::StepFunctions::StateMachineVersion`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachineversion.html) resource that will be the final version to which the alias points to when the traffic shifting is complete.\n\nWhile performing gradual deployments, you can only provide a single state machine version ARN. To explicitly set version weights in a CloudFormation template, use `RoutingConfiguration` instead.", @@ -45291,7 +45790,7 @@ "DesiredSoftwareSetId": "The ID of the software set to apply.", "DesktopArn": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", "DesktopEndpoint": "The URL for the identity provider login (only for environments that use AppStream 2.0).", - "DeviceCreationTags": "\"The tag keys and optional values for the newly created devices for this environment.\"", + "DeviceCreationTags": "The tag keys and optional values for the newly created devices for this environment.", "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the environment.", "MaintenanceWindow": "A specification for a time window to apply software updates.", "Name": "The name of the environment.", @@ -45394,6 +45893,7 @@ "CookieSynchronizationConfiguration": "The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.", "CopyAllowed": "Specifies whether the user can copy text from the streaming session to the local device.", "CustomerManagedKey": "The customer managed key used to encrypt sensitive information in the user settings.", + "DeepLinkAllowed": "Specifies whether the user can use deep links that open automatically when connecting to a session.", "DisconnectTimeoutInMinutes": "The amount of time that a streaming session remains active after users disconnect.", "DownloadAllowed": "Specifies whether the user can download files from the streaming session to the local device.", "IdleDisconnectTimeoutInMinutes": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 2eb1dfd67..3bc8a4533 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -26156,7 +26156,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -33226,7 +33226,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33790,7 +33790,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33904,7 +33904,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -35460,7 +35460,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36294,12 +36294,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36334,11 +36334,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36807,7 +36807,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36839,7 +36839,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36897,7 +36897,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37020,22 +37020,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37043,7 +37043,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37059,12 +37059,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39277,7 +39277,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -39600,7 +39600,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -44503,7 +44503,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44586,7 +44586,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", "title": "SecretToken", "type": "string" } @@ -64622,7 +64622,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64704,7 +64704,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65351,7 +65351,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65700,7 +65700,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -69794,7 +69794,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71163,7 +71163,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -77649,7 +77649,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77872,7 +77872,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78197,7 +78197,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -82778,7 +82778,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82859,37 +82859,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82897,7 +82897,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82933,7 +82933,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -90340,12 +90340,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90383,7 +90383,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90396,7 +90396,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90437,12 +90437,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90618,7 +90618,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90633,7 +90633,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90744,7 +90744,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90887,22 +90887,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90912,7 +90912,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90930,7 +90930,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90955,7 +90955,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90978,7 +90978,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90986,7 +90986,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91001,7 +91001,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91055,7 +91055,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91075,7 +91075,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91088,12 +91088,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91212,7 +91212,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91436,7 +91436,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91497,7 +91497,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91518,7 +91518,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -95049,7 +95049,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95136,7 +95136,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95146,7 +95146,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95294,7 +95294,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95349,7 +95349,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95668,7 +95668,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95889,7 +95889,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -103339,7 +103339,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103585,7 +103585,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104178,7 +104178,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -114591,7 +114591,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -129666,7 +129666,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -130201,7 +130201,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -140464,7 +140464,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -153407,7 +153407,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -167328,7 +167328,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -182758,7 +182758,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224578,7 +224578,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224761,7 +224761,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224819,7 +224819,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224903,7 +224903,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225009,17 +225009,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225032,7 +225032,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225129,7 +225129,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225174,7 +225174,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225212,12 +225212,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225232,7 +225232,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225343,7 +225343,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225353,7 +225353,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225383,7 +225383,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225469,7 +225469,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225585,7 +225585,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225654,12 +225654,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225667,7 +225667,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225753,7 +225753,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225831,7 +225831,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225846,7 +225846,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225862,12 +225862,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225928,7 +225928,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -225981,12 +225981,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226030,7 +226030,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226165,7 +226165,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226178,7 +226178,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226364,7 +226364,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226380,7 +226380,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226469,12 +226469,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226681,7 +226681,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226767,7 +226767,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226785,7 +226785,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226825,7 +226825,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226856,7 +226856,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -235610,7 +235610,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -242311,7 +242311,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -261018,7 +261018,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" }, From 249dc325e33b1ef44505a508c0893c7af56347bc Mon Sep 17 00:00:00 2001 From: CORBIERE Sebastien Date: Mon, 12 Aug 2024 11:54:29 -0700 Subject: [PATCH 2/7] fix: Add the property UseAliasAsEventTarget to AWS::Serverless::StateMachine and make associated events use the state machine alias as a target. (#3627) --- DEVELOPMENT_GUIDE.md | 4 +- bin/add_transform_test.py | 13 +- .../aws_serverless_statemachine.py | 1 + samtranslator/model/sam_resources.py | 3 + .../model/stepfunctions/generators.py | 16 +- samtranslator/schema/schema.json | 4 + schema_source/sam.schema.json | 4 + .../test_state_machine_generator.py | 31 ++ .../state_machine_with_events_and_alias.yaml | 45 ++ .../state_machine_with_events_and_alias.json | 427 ++++++++++++++++++ .../state_machine_with_events_and_alias.json | 427 ++++++++++++++++++ .../state_machine_with_events_and_alias.json | 419 +++++++++++++++++ 12 files changed, 1388 insertions(+), 6 deletions(-) create mode 100644 tests/translator/input/state_machine_with_events_and_alias.yaml create mode 100644 tests/translator/output/aws-cn/state_machine_with_events_and_alias.json create mode 100644 tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json create mode 100644 tests/translator/output/state_machine_with_events_and_alias.json diff --git a/DEVELOPMENT_GUIDE.md b/DEVELOPMENT_GUIDE.md index 2a89debf6..10fcec056 100644 --- a/DEVELOPMENT_GUIDE.md +++ b/DEVELOPMENT_GUIDE.md @@ -59,10 +59,10 @@ We format our code using [Black](https://github.com/python/black) and verify the during PR checks. Black will be installed automatically with `make init`. After installing, you can run our formatting through our Makefile by `make format` or integrating Black directly in your favorite IDE (instructions -can be found [here](https://black.readthedocs.io/en/stable/editor_integration.html)) +can be found [here](https://black.readthedocs.io/en/stable/integrations/editors.html)) ##### (Workaround) Integrating Black directly in your favorite IDE -Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/editor_integration.html), `which black` might give you this +Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/integrations/editors.html), `which black` might give you this ```bash (sam38) $ where black diff --git a/bin/add_transform_test.py b/bin/add_transform_test.py index 99139b74e..ec85b53c9 100755 --- a/bin/add_transform_test.py +++ b/bin/add_transform_test.py @@ -68,8 +68,13 @@ def get_input_file_path() -> Path: def copy_input_file_to_transform_test_dir(input_file_path: Path, transform_test_input_path: Path) -> None: - shutil.copyfile(input_file_path, transform_test_input_path) - print(f"Transform Test input file generated {transform_test_input_path}") + try: + shutil.copyfile(input_file_path, transform_test_input_path) + print(f"Transform Test input file generated {transform_test_input_path}") + except shutil.SameFileError: + print(f"Source and destination are the same file: {input_file_path}") + except Exception as e: + raise e def verify_input_template(input_file_path: Path) -> None: @@ -99,7 +104,9 @@ def main() -> None: verify_input_template(input_file_path) transform_test_input_path = TRANSFORM_TEST_DIR / "input" / (file_basename + ".yaml") - copy_input_file_to_transform_test_dir(input_file_path, transform_test_input_path) + # check if the template is not already in the transform test input dir + if input_file_path != transform_test_input_path: + copy_input_file_to_transform_test_dir(input_file_path, transform_test_input_path) generate_transform_test_output_files(transform_test_input_path, file_basename) diff --git a/samtranslator/internal/schema_source/aws_serverless_statemachine.py b/samtranslator/internal/schema_source/aws_serverless_statemachine.py index 9e6a549e9..355064767 100644 --- a/samtranslator/internal/schema_source/aws_serverless_statemachine.py +++ b/samtranslator/internal/schema_source/aws_serverless_statemachine.py @@ -174,6 +174,7 @@ class Properties(BaseModel): Type: Optional[PassThroughProp] = properties("Type") AutoPublishAlias: Optional[PassThroughProp] DeploymentPreference: Optional[PassThroughProp] + UseAliasAsEventTarget: Optional[bool] class Resource(ResourceAttributes): diff --git a/samtranslator/model/sam_resources.py b/samtranslator/model/sam_resources.py index fb018e208..1592ee8ba 100644 --- a/samtranslator/model/sam_resources.py +++ b/samtranslator/model/sam_resources.py @@ -1778,6 +1778,7 @@ class SamStateMachine(SamResourceMacro): "PermissionsBoundary": PropertyType(False, IS_STR), "AutoPublishAlias": PassThroughProperty(False), "DeploymentPreference": MutatedPassThroughProperty(False), + "UseAliasAsEventTarget": Property(False, IS_BOOL), } Definition: Optional[Dict[str, Any]] @@ -1796,6 +1797,7 @@ class SamStateMachine(SamResourceMacro): PermissionsBoundary: Optional[Intrinsicable[str]] AutoPublishAlias: Optional[PassThrough] DeploymentPreference: Optional[PassThrough] + UseAliasAsEventTarget: Optional[bool] event_resolver = ResourceTypeResolver( samtranslator.model.stepfunctions.events, @@ -1834,6 +1836,7 @@ def to_cloudformation(self, **kwargs): # type: ignore[no-untyped-def] get_managed_policy_map=get_managed_policy_map, auto_publish_alias=self.AutoPublishAlias, deployment_preference=self.DeploymentPreference, + use_alias_as_event_target=self.UseAliasAsEventTarget, ) generated_resources = state_machine_generator.to_cloudformation() diff --git a/samtranslator/model/stepfunctions/generators.py b/samtranslator/model/stepfunctions/generators.py index f69499a7f..e5cf744be 100644 --- a/samtranslator/model/stepfunctions/generators.py +++ b/samtranslator/model/stepfunctions/generators.py @@ -54,6 +54,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 get_managed_policy_map=None, auto_publish_alias=None, deployment_preference=None, + use_alias_as_event_target=None, ): """ Constructs an State Machine Generator class that generates a State Machine resource @@ -81,6 +82,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 :param passthrough_resource_attributes: Attributes such as `Condition` that are added to derived resources :param auto_publish_alias: Name of the state machine alias to automatically create and update :deployment_preference: Settings to enable gradual state machine deployments + :param use_alias_as_event_target: Whether to use the state machine alias as the event target """ self.logical_id = logical_id self.depends_on = depends_on @@ -110,6 +112,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 self.get_managed_policy_map = get_managed_policy_map self.auto_publish_alias = auto_publish_alias self.deployment_preference = deployment_preference + self.use_alias_as_event_target = use_alias_as_event_target @cw_timer(prefix="Generator", name="StateMachine") def to_cloudformation(self): # type: ignore[no-untyped-def] @@ -300,6 +303,8 @@ def _construct_alias(self, version: StepFunctionsStateMachineVersion) -> StepFun deployment_preference["StateMachineVersionArn"] = state_machine_version_arn state_machine_alias.DeploymentPreference = deployment_preference + self.state_machine_alias = state_machine_alias + return state_machine_alias def _generate_managed_traffic_shifting_resources( @@ -310,6 +315,10 @@ def _generate_managed_traffic_shifting_resources( :returns: a list containing the state machine's version and alias resources :rtype: list """ + if not self.auto_publish_alias and self.use_alias_as_event_target: + raise InvalidResourceException( + self.logical_id, "'UseAliasAsEventTarget' requires 'AutoPublishAlias' property to be specified." + ) if not self.auto_publish_alias and not self.deployment_preference: return [] if not self.auto_publish_alias and self.deployment_preference: @@ -341,7 +350,12 @@ def _generate_event_resources(self) -> List[Dict[str, Any]]: kwargs[name] = resource except (TypeError, AttributeError) as e: raise InvalidEventException(logical_id, str(e)) from e - resources += eventsource.to_cloudformation(resource=self.state_machine, **kwargs) + target_resource = ( + (self.state_machine_alias or self.state_machine) + if self.use_alias_as_event_target + else self.state_machine + ) + resources += eventsource.to_cloudformation(resource=target_resource, **kwargs) return resources diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 648fb27b8..5176e0d2d 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -281000,6 +281000,10 @@ ], "markdownDescription": "The type of the state machine\\. \n*Valid values*: `STANDARD` or `EXPRESS` \n*Type*: String \n*Required*: No \n*Default*: `STANDARD` \n*AWS CloudFormation compatibility*: This property is passed directly to the [`StateMachineType`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinetype) property of an `AWS::StepFunctions::StateMachine` resource\\.", "title": "Type" + }, + "UseAliasAsEventTarget": { + "title": "Usealiasaseventtarget", + "type": "boolean" } }, "title": "Properties", diff --git a/schema_source/sam.schema.json b/schema_source/sam.schema.json index 2a9899e1b..b06b877ac 100644 --- a/schema_source/sam.schema.json +++ b/schema_source/sam.schema.json @@ -8020,6 +8020,10 @@ ], "markdownDescription": "The type of the state machine\\. \n*Valid values*: `STANDARD` or `EXPRESS` \n*Type*: String \n*Required*: No \n*Default*: `STANDARD` \n*AWS CloudFormation compatibility*: This property is passed directly to the [`StateMachineType`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinetype) property of an `AWS::StepFunctions::StateMachine` resource\\.", "title": "Type" + }, + "UseAliasAsEventTarget": { + "title": "Usealiasaseventtarget", + "type": "boolean" } }, "title": "Properties", diff --git a/tests/model/stepfunctions/test_state_machine_generator.py b/tests/model/stepfunctions/test_state_machine_generator.py index a4f801c81..968708152 100644 --- a/tests/model/stepfunctions/test_state_machine_generator.py +++ b/tests/model/stepfunctions/test_state_machine_generator.py @@ -148,6 +148,37 @@ def test_state_machine_with_unsupported_event_source(self): with self.assertRaises(InvalidEventException): StateMachineGenerator(**self.kwargs).to_cloudformation() + def test_state_machine_with_alias_as_event_source_target(self): + self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" + self.kwargs["role"] = "my-test-role-arn" + self.kwargs["use_alias_as_event_target"] = True + self.kwargs["auto_publish_alias"] = "live" + event_resolver = Mock() + event_resolver.resolve_resource_type = Mock(return_value=CloudWatchEvent) + self.kwargs["event_resolver"] = event_resolver + self.kwargs["events"] = { + "CWEEvent": {"Type": "CloudWatchEvent", "Properties": {"Pattern": {"detail": {"state": ["terminated"]}}}} + } + self.kwargs["event_resources"] = {"CWEEvent": {}} + state_machine_generator = StateMachineGenerator(**self.kwargs) + state_machine_generator._generate_managed_traffic_shifting_resources() + generated_event_resources = state_machine_generator._generate_event_resources() + self.assertEqual(generated_event_resources[0].Targets[0]["Arn"], {"Ref": "StateMachineIdAliaslive"}) + + def test_state_machine_with_alias_as_event_source_target_requires_alias(self): + self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" + self.kwargs["role"] = "my-test-role-arn" + self.kwargs["use_alias_as_event_target"] = True + self.kwargs["deployment_preference"] = {"Type": "ALL_AT_ONCE"} + # Missing property + # self.kwargs["auto_publish_alias"] = "live" + with self.assertRaises(InvalidResourceException) as error: + StateMachineGenerator(**self.kwargs).to_cloudformation() + self.assertEqual( + error.exception.message, + "Resource with id [StateMachineId] is invalid. 'UseAliasAsEventTarget' requires 'AutoPublishAlias' property to be specified.", + ) + def test_state_machine_with_managed_traffic_shifting_properties(self): self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" self.kwargs["role"] = "my-test-role-arn" diff --git a/tests/translator/input/state_machine_with_events_and_alias.yaml b/tests/translator/input/state_machine_with_events_and_alias.yaml new file mode 100644 index 000000000..1b954c122 --- /dev/null +++ b/tests/translator/input/state_machine_with_events_and_alias.yaml @@ -0,0 +1,45 @@ +Transform: AWS::Serverless-2016-10-31 +Resources: + MyStateMachine: + Type: AWS::Serverless::StateMachine + Properties: + Type: STANDARD + Definition: + StartAt: HelloWorld + States: + HelloWorld: + Type: Pass + Result: 1 + End: true + Role: !Sub "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + AutoPublishAlias: test + UseAliasAsEventTarget: true + Events: + CWEvent: + Type: CloudWatchEvent + Properties: + Pattern: + detail: + state: + - terminated + EBEvent: + Type: EventBridgeRule + Properties: + Pattern: + source: [aws.tag] + ApiEvent: + Type: Api + Properties: + Path: /path + Method: get + CWSchedule: + Type: Schedule + Properties: + Schedule: rate(1 minute) + Name: TestSchedule + Description: test schedule + Enabled: false + ScheduleEvent: + Type: ScheduleV2 + Properties: + ScheduleExpression: rate(1 minute) diff --git a/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json b/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..60f4052a5 --- /dev/null +++ b/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json @@ -0,0 +1,427 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + }, + "EndpointConfiguration": { + "Types": [ + "REGIONAL" + ] + }, + "Parameters": { + "endpointConfigurationTypes": "REGIONAL" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} diff --git a/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json b/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..60f4052a5 --- /dev/null +++ b/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json @@ -0,0 +1,427 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + }, + "EndpointConfiguration": { + "Types": [ + "REGIONAL" + ] + }, + "Parameters": { + "endpointConfigurationTypes": "REGIONAL" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} diff --git a/tests/translator/output/state_machine_with_events_and_alias.json b/tests/translator/output/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..1f4567f89 --- /dev/null +++ b/tests/translator/output/state_machine_with_events_and_alias.json @@ -0,0 +1,419 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} From 2aef9554890e5fd183439397a8cdfafd4b50a137 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:52:39 +0000 Subject: [PATCH 3/7] chore(schema): update (#3635) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 72 ++++----- schema_source/cloudformation-docs.json | 193 +++++++++++++++++------ schema_source/cloudformation.schema.json | 72 ++++----- 3 files changed, 216 insertions(+), 121 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 5176e0d2d..f1612a9dd 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -22542,7 +22542,7 @@ "additionalProperties": false, "properties": { "ServiceName": { - "markdownDescription": "The name of the AWS service .", + "markdownDescription": "The name of the AWS-service .", "title": "ServiceName", "type": "string" } @@ -22657,7 +22657,7 @@ "items": { "$ref": "#/definitions/AWS::AuditManager::Assessment.AWSService" }, - "markdownDescription": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty.", + "markdownDescription": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS-services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty.", "title": "AwsServices", "type": "array" } @@ -32773,7 +32773,7 @@ "type": "string" }, "SlackChannelId": { - "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", + "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", "title": "SlackChannelId", "type": "string" }, @@ -39682,7 +39682,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", + "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", "title": "Values", "type": "array" } @@ -44531,7 +44531,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44614,7 +44614,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response.", "title": "SecretToken", "type": "string" } @@ -45694,12 +45694,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.LogConfiguration" }, - "markdownDescription": "The detailed activity logging destination of a user pool.", + "markdownDescription": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", "title": "LogConfigurations", "type": "array" }, "UserPoolId": { - "markdownDescription": "The ID of the user pool where you configured detailed activity logging.", + "markdownDescription": "The ID of the user pool where you configured logging.", "title": "UserPoolId", "type": "string" } @@ -45746,16 +45746,16 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.CloudWatchLogsConfiguration", - "markdownDescription": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", + "markdownDescription": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.", "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for detailed activity logging.", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging.", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -46366,7 +46366,7 @@ "additionalProperties": false, "properties": { "AdvancedSecurityMode": { - "markdownDescription": "The operating mode of advanced security features in your user pool.", + "markdownDescription": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication.", "title": "AdvancedSecurityMode", "type": "string" } @@ -62354,7 +62354,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "title": "AgentArns", "type": "array" }, @@ -62531,7 +62531,7 @@ }, "OnPremConfig": { "$ref": "#/definitions/AWS::DataSync::LocationNFS.OnPremConfig", - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "OnPremConfig" }, "ServerHostname": { @@ -62597,7 +62597,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "AgentArns", "type": "array" } @@ -62651,7 +62651,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "title": "AgentArns", "type": "array" }, @@ -73217,7 +73217,7 @@ "title": "IamInstanceProfile" }, "ImageId": { - "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "title": "ImageId", "type": "string" }, @@ -75055,7 +75055,7 @@ "type": "string" }, "destinationPrefixListId": { - "markdownDescription": "The prefix of the AWS service .", + "markdownDescription": "The prefix of the AWS-service .", "title": "destinationPrefixListId", "type": "string" }, @@ -79231,8 +79231,6 @@ "type": "string" }, "SecurityGroupReferencingSupport": { - "markdownDescription": "Enables you to reference a security group across VPCs attached to a transit gateway (TGW). Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.\n\nFor important information about this feature, see [Create a transit gateway](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html#create-tgw) in the *AWS Transit Gateway Guide* .", - "title": "SecurityGroupReferencingSupport", "type": "string" } }, @@ -143631,7 +143629,7 @@ "type": "string" }, "Principal": { - "markdownDescription": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "markdownDescription": "The AWS-service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "title": "Principal", "type": "string" }, @@ -143641,12 +143639,12 @@ "type": "string" }, "SourceAccount": { - "markdownDescription": "For AWS service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", + "markdownDescription": "For AWS-service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", "title": "SourceAccount", "type": "string" }, "SourceArn": { - "markdownDescription": "For AWS services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator.", + "markdownDescription": "For AWS-services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator.", "title": "SourceArn", "type": "string" } @@ -161769,8 +161767,6 @@ "additionalProperties": false, "properties": { "ChannelId": { - "markdownDescription": "The unique ID of the channel.", - "title": "ChannelId", "type": "string" }, "MultiplexId": { @@ -167392,7 +167388,7 @@ "type": "string" }, "LogType": { - "markdownDescription": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.", + "markdownDescription": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* .", "title": "LogType", "type": "string" } @@ -182742,7 +182738,7 @@ "type": "string" }, "OutputFormat": { - "markdownDescription": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "markdownDescription": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "title": "OutputFormat", "type": "string" }, @@ -225470,7 +225466,7 @@ "type": "string" }, "RestoreTime": { - "markdownDescription": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "markdownDescription": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "title": "RestoreTime", "type": "string" }, @@ -225533,7 +225529,7 @@ "type": "boolean" }, "UseLatestRestorableTime": { - "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "title": "UseLatestRestorableTime", "type": "boolean" }, @@ -235499,7 +235495,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -236373,7 +236369,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -237206,7 +237202,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -238354,7 +238350,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS-service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -242489,12 +242485,12 @@ "additionalProperties": false, "properties": { "ApproveAfterDays": { - "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", + "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveAfterDays", "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveUntilDate", "type": "string" }, @@ -255367,7 +255363,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.StringFilter" }, - "markdownDescription": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", + "markdownDescription": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS-service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "title": "ResourceId", "type": "array" }, @@ -255922,7 +255918,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" }, - "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS-service and a number, such as APIGateway.5.", "title": "ComplianceSecurityControlId", "type": "array" }, @@ -257287,7 +257283,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityLake::Subscriber.Source" }, - "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS-services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", "title": "Sources", "type": "array" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 735064c16..821ec121e 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -3737,7 +3737,7 @@ "Name": "The name of the AWS account ." }, "AWS::AuditManager::Assessment AWSService": { - "ServiceName": "The name of the AWS service ." + "ServiceName": "The name of the AWS-service ." }, "AWS::AuditManager::Assessment AssessmentReportsDestination": { "Destination": "The destination bucket where Audit Manager stores assessment reports.", @@ -3762,7 +3762,7 @@ }, "AWS::AuditManager::Assessment Scope": { "AwsAccounts": "The AWS accounts that are included in the scope of the assessment.", - "AwsServices": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty." + "AwsServices": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS-services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty." }, "AWS::AuditManager::Assessment Tag": { "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", @@ -5717,7 +5717,7 @@ "GuardrailPolicies": "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.", "IamRoleArn": "The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\nThis is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see [IAM Policies for AWS Chatbot](https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html) .", "LoggingLevel": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\nLogging levels include `ERROR` , `INFO` , or `NONE` .", - "SlackChannelId": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", + "SlackChannelId": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", "SlackWorkspaceId": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in [Setting Up AWS Chatbot with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro) in the *AWS Chatbot User Guide* .", "SnsTopicArns": "The ARNs of the SNS topics that deliver notifications to AWS Chatbot .", "Tags": "The tags to add to the configuration.", @@ -6766,7 +6766,7 @@ }, "AWS::CloudTrail::Trail DataResource": { "Type": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information about these additional resource types, see [AdvancedFieldSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) .", - "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." + "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." }, "AWS::CloudTrail::Trail EventSelector": { "DataResources": "CloudTrail supports data event logging for Amazon S3 objects in standard S3 buckets, AWS Lambda functions, and Amazon DynamoDB tables with basic event selectors. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events.\n\nFor more information, see [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) and [Limits in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) in the *AWS CloudTrail User Guide* .\n\n> To log data events for all other resource types including objects stored in [directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) , you must use [AdvancedEventSelectors](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) . You must also use `AdvancedEventSelectors` if you want to filter on the `eventName` field.", @@ -7499,15 +7499,23 @@ "ArtifactStore": "Represents information about the S3 bucket where artifacts are stored for the pipeline.\n\n> You must include either `artifactStore` or `artifactStores` in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use `artifactStores` .", "Region": "The action declaration's AWS Region, such as us-east-1." }, + "AWS::CodePipeline::Pipeline BeforeEntryConditions": { + "Conditions": "The conditions that are configured as entry conditions." + }, "AWS::CodePipeline::Pipeline BlockerDeclaration": { "Name": "Reserved for future use.", "Type": "Reserved for future use." }, + "AWS::CodePipeline::Pipeline Condition": { + "Result": "The action to be done when the condition is met. For example, rolling back an execution for a failure condition.", + "Rules": "The rules that make up the condition." + }, "AWS::CodePipeline::Pipeline EncryptionKey": { "Id": "The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\n> Aliases are recognized only in the account that created the AWS KMS key. For cross-account actions, you can only use the key ID or key ARN to identify the key. Cross-account actions involve using the role from the other account (AccountB), so specifying the key ID will use the key from the other account (AccountB).", "Type": "The type of encryption key, such as an AWS KMS key. When creating or updating a pipeline, the value must be set to 'KMS'." }, "AWS::CodePipeline::Pipeline FailureConditions": { + "Conditions": "The conditions that are configured as failure conditions.", "Result": "The specified result for when the failure conditions are met, such as rolling back the stage." }, "AWS::CodePipeline::Pipeline GitBranchFilterCriteria": { @@ -7547,16 +7555,35 @@ "GitConfiguration": "Provides the filter criteria and the source stage for the repository event that starts the pipeline, such as Git tags.", "ProviderType": "The source provider for the event, such as connections configured for a repository with Git tags, for the specified trigger configuration." }, + "AWS::CodePipeline::Pipeline RuleDeclaration": { + "Configuration": "The action configuration fields for the rule.", + "InputArtifacts": "The input artifacts fields for the rule, such as specifying an input file for the rule.", + "Name": "The name of the rule that is created for the condition, such as CheckAllResults.", + "Region": "The Region for the condition associated with the rule.", + "RoleArn": "The pipeline role ARN associated with the rule.", + "RuleTypeId": "The ID for the rule type, which is made up of the combined values for category, owner, provider, and version." + }, + "AWS::CodePipeline::Pipeline RuleTypeId": { + "Category": "A category defines what kind of rule can be run in the stage, and constrains the provider type for the rule. The valid category is `Rule` .", + "Owner": "The creator of the rule being called. The valid value for the `Owner` field in the rule category is `AWS` .", + "Provider": "The rule provider, such as the `DeploymentWindow` rule.", + "Version": "A string that describes the rule version." + }, "AWS::CodePipeline::Pipeline StageDeclaration": { "Actions": "The actions included in a stage.", + "BeforeEntry": "The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met.", "Blockers": "Reserved for future use.", "Name": "The name of the stage.", - "OnFailure": "The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage." + "OnFailure": "The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage.", + "OnSuccess": "The method to use when a stage has succeeded. For example, configuring this field for conditions will allow the stage to succeed when the conditions are met." }, "AWS::CodePipeline::Pipeline StageTransition": { "Reason": "The reason given to the user that a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.", "StageName": "The name of the stage where you want to disable the inbound or outbound transition of artifacts." }, + "AWS::CodePipeline::Pipeline SuccessConditions": { + "Conditions": "The conditions that are success conditions." + }, "AWS::CodePipeline::Pipeline Tag": { "Key": "The tag's key.", "Value": "The tag's value." @@ -7567,7 +7594,7 @@ "Name": "The name of a pipeline-level variable." }, "AWS::CodePipeline::Webhook": { - "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "AuthenticationConfiguration": "Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the `SecretToken` property must be set. For IP, only the `AllowedIPRange` property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.", "Filters": "A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.", "Name": "The name of the webhook.", @@ -7578,7 +7605,7 @@ }, "AWS::CodePipeline::Webhook WebhookAuthConfiguration": { "AllowedIPRange": "The property used to configure acceptance of webhooks in an IP address range. For IP, only the `AllowedIPRange` property must be set. This property must be set to a valid CIDR range.", - "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response." + "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response." }, "AWS::CodePipeline::Webhook WebhookFilterRule": { "JsonPath": "A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the `MatchEquals` field. Otherwise, the request is ignored. For more information, see [Java JsonPath implementation](https://docs.aws.amazon.com/https://github.com/json-path/JsonPath) in GitHub.", @@ -7703,16 +7730,24 @@ "Rules": "The rules. You can specify up to 25 rules per identity provider." }, "AWS::Cognito::LogDeliveryConfiguration": { - "LogConfigurations": "The detailed activity logging destination of a user pool.", - "UserPoolId": "The ID of the user pool where you configured detailed activity logging." + "LogConfigurations": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", + "UserPoolId": "The ID of the user pool where you configured logging." }, "AWS::Cognito::LogDeliveryConfiguration CloudWatchLogsConfiguration": { "LogGroupArn": "The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with AWS Key Management Service and must be in the same AWS account as your user pool.\n\nTo send logs to log groups with a resource policy of a size greater than 5120 characters, configure a log group with a path that starts with `/aws/vendedlogs` . For more information, see [Enabling logging from certain AWS services](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) ." }, + "AWS::Cognito::LogDeliveryConfiguration FirehoseConfiguration": { + "StreamArn": "The ARN of an Amazon Data Firehose stream that's the destination for advanced security features log export." + }, "AWS::Cognito::LogDeliveryConfiguration LogConfiguration": { - "CloudWatchLogsConfiguration": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", - "EventSource": "The source of events that your user pool sends for detailed activity logging.", - "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging." + "CloudWatchLogsConfiguration": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.", + "EventSource": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", + "FirehoseConfiguration": "Configuration for the Amazon Data Firehose stream destination of user activity log export with advanced security features.", + "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", + "S3Configuration": "Configuration for the Amazon S3 bucket destination of user activity log export with advanced security features." + }, + "AWS::Cognito::LogDeliveryConfiguration S3Configuration": { + "BucketArn": "The ARN of an Amazon S3 bucket that's the destination for advanced security features log export." }, "AWS::Cognito::UserPool": { "AccountRecoverySetting": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.", @@ -7748,6 +7783,9 @@ "InviteMessageTemplate": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", "UnusedAccountValidityDays": "The user account expiration limit, in days, after which a new account that hasn't signed in is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used, and `UnusedAccountValidityDays` will be no longer be an available parameter for that user pool." }, + "AWS::Cognito::UserPool AdvancedSecurityAdditionalFlows": { + "CustomAuthMode": "" + }, "AWS::Cognito::UserPool CustomEmailSender": { "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", "LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` ." @@ -7794,6 +7832,7 @@ }, "AWS::Cognito::UserPool PasswordPolicy": { "MinimumLength": "The minimum length of the password in the policy that you have set. This value can't be less than 6.", + "PasswordHistorySize": "The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of `n` previous passwords, where `n` is the value of `PasswordHistorySize` .\n\nPassword history isn't enforced and isn't displayed in [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) responses when you set this value to `0` or don't provide it. To activate this setting, [advanced security features](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) must be active in your user pool.", "RequireLowercase": "In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.", "RequireNumbers": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.", "RequireSymbols": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.", @@ -7833,7 +7872,8 @@ "AttributesRequireVerificationBeforeUpdate": "Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn\u2019t change the value of the attribute until your user responds to the verification message and confirms the new value.\n\nYou can verify an updated email address or phone number with a [VerifyUserAttribute](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerifyUserAttribute.html) API request. You can also call the [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) API and set `email_verified` or `phone_number_verified` to true.\n\nWhen `AttributesRequireVerificationBeforeUpdate` is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where `AttributesRequireVerificationBeforeUpdate` is false, API operations that change attribute values can immediately update a user\u2019s `email` or `phone_number` attribute." }, "AWS::Cognito::UserPool UserPoolAddOns": { - "AdvancedSecurityMode": "The operating mode of advanced security features in your user pool." + "AdvancedSecurityAdditionalFlows": "", + "AdvancedSecurityMode": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication." }, "AWS::Cognito::UserPool UsernameConfiguration": { "CaseSensitive": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **True** - Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **False** - Enables case insensitivity for all username input. For example, when this option is set to `False` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute." @@ -10338,7 +10378,7 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationHDFS": { - "AgentArns": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "AgentArns": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "AuthenticationType": "", "BlockSize": "The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB).", "KerberosKeytab": "The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. Provide the base64-encoded file text. If `KERBEROS` is specified for `AuthType` , this value is required.", @@ -10366,7 +10406,7 @@ }, "AWS::DataSync::LocationNFS": { "MountOptions": "Specifies the options that DataSync can use to mount your NFS file server.", - "OnPremConfig": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "OnPremConfig": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "ServerHostname": "Specifies the Domain Name System (DNS) name or IP version 4 address of the NFS file server that your DataSync agent connects to.", "Subdirectory": "Specifies the export path in your NFS file server that you want DataSync to mount.\n\nThis path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see [Accessing NFS file servers](https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#accessing-nfs) .", "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location." @@ -10375,7 +10415,7 @@ "Version": "Specifies the NFS version that you want DataSync to use when mounting your NFS share. If the server refuses to use the version specified, the task fails.\n\nYou can specify the following options:\n\n- `AUTOMATIC` (default): DataSync chooses NFS version 4.1.\n- `NFS3` : Stateless protocol version that allows for asynchronous writes on the server.\n- `NFSv4_0` : Stateful, firewall-friendly protocol version that supports delegations and pseudo file systems.\n- `NFSv4_1` : Stateful protocol version that supports sessions, directory delegations, and parallel data processing. NFS version 4.1 also includes all features available in version 4.0.\n\n> DataSync currently only supports NFS version 3 with Amazon FSx for NetApp ONTAP locations." }, "AWS::DataSync::LocationNFS OnPremConfig": { - "AgentArns": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location." + "AgentArns": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) ." }, "AWS::DataSync::LocationNFS Tag": { "Key": "The key for an AWS resource tag.", @@ -10383,7 +10423,7 @@ }, "AWS::DataSync::LocationObjectStorage": { "AccessKey": "Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server.", - "AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "BucketName": "Specifies the name of the object storage bucket involved in the transfer.", "SecretKey": "Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server.", "ServerCertificate": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", @@ -12100,7 +12140,7 @@ "EnclaveOptions": "Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see [What is AWS Nitro Enclaves?](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the *AWS Nitro Enclaves User Guide* .\n\nYou can't enable AWS Nitro Enclaves and hibernation on the same instance.", "HibernationOptions": "Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the [hibernation prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) . For more information, see [Hibernate your Amazon EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the *Amazon EC2 User Guide* .", "IamInstanceProfile": "The name or Amazon Resource Name (ARN) of an IAM instance profile.", - "ImageId": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "ImageId": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "InstanceInitiatedShutdownBehavior": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n\nDefault: `stop`", "InstanceMarketOptions": "The market (purchasing) option for the instances.", "InstanceRequirements": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", @@ -12397,7 +12437,7 @@ "TransitGatewayId": "The ID of a transit gateway.", "VpcPeeringConnectionId": "The ID of a VPC peering connection.", "destinationCidr": "The destination IPv4 address, in CIDR notation.", - "destinationPrefixListId": "The prefix of the AWS service .", + "destinationPrefixListId": "The prefix of the AWS-service .", "egressOnlyInternetGatewayId": "The ID of an egress-only internet gateway.", "gatewayId": "The ID of the gateway, such as an internet gateway or virtual private gateway.", "instanceId": "The ID of the instance, such as a NAT instance." @@ -13035,8 +13075,7 @@ "AWS::EC2::TransitGatewayAttachment Options": { "ApplianceModeSupport": "Enable or disable appliance mode support. The default is `disable` .", "DnsSupport": "Enable or disable DNS support. The default is `disable` .", - "Ipv6Support": "Enable or disable IPv6 support. The default is `disable` .", - "SecurityGroupReferencingSupport": "Enables you to reference a security group across VPCs attached to a transit gateway (TGW). Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.\n\nFor important information about this feature, see [Create a transit gateway](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html#create-tgw) in the *AWS Transit Gateway Guide* ." + "Ipv6Support": "Enable or disable IPv6 support. The default is `disable` ." }, "AWS::EC2::TransitGatewayAttachment Tag": { "Key": "The tag key.", @@ -18341,7 +18380,7 @@ "Message": "Issue message that specifies the reason. For information about potential troubleshooting steps, see [Troubleshooting Malware Protection for S3 status issues](https://docs.aws.amazon.com/guardduty/latest/ug/troubleshoot-s3-malware-protection-status-errors.html) in the *GuardDuty User Guide* ." }, "AWS::GuardDuty::MalwareProtectionPlan CFNTagging": { - "Status": "Indicates whether or not you chose GuardDuty to add a predefined tag to the scanned S3 object." + "Status": "Indicates whether or not you chose GuardDuty to add a predefined tag to the scanned S3 object.\n\nPotential values include `ENABLED` and `DISABLED` . These values are case-sensitive." }, "AWS::GuardDuty::MalwareProtectionPlan S3Bucket": { "BucketName": "Name of the S3 bucket.", @@ -22524,6 +22563,7 @@ "AWS::KinesisFirehose::DeliveryStream MSKSourceConfiguration": { "AuthenticationConfiguration": "The authentication configuration of the Amazon MSK cluster.", "MSKClusterARN": "The ARN of the Amazon MSK cluster.", + "ReadFromTimestamp": "The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.\n\nIf you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the `ReadFromTimestamp` parameter to Epoch (1970-01-01T00:00:00Z).", "TopicName": "The topic name within the Amazon MSK cluster." }, "AWS::KinesisFirehose::DeliveryStream OpenXJsonSerDe": { @@ -23020,6 +23060,7 @@ "S3Bucket": "An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account .", "S3Key": "The Amazon S3 key of the deployment package.", "S3ObjectVersion": "For versioned objects, the version of the deployment package object to use.", + "SourceKMSKeyArn": "", "ZipFile": "(Node.js and Python) The source code of your Lambda function. If you include your function source inline with this parameter, AWS CloudFormation places it in a file named `index` and zips it to create a [deployment package](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-package.html) . This zip file cannot exceed 4MB. For the `Handler` property, the first part of the handler identifier must be `index` . For example, `index.handler` .\n\nFor JSON, you must escape quotes and special characters such as newline ( `\\n` ) with a backslash.\n\nIf you specify a function that interacts with an AWS CloudFormation custom resource, you don't have to write your own functions to send responses to the custom resource that invoked the function. AWS CloudFormation provides a response module ( [cfn-response](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html) ) that simplifies sending responses. See [Using AWS Lambda with AWS CloudFormation](https://docs.aws.amazon.com/lambda/latest/dg/services-cloudformation.html) for details." }, "AWS::Lambda::Function DeadLetterConfig": { @@ -23093,10 +23134,10 @@ "EventSourceToken": "For Alexa Smart Home functions, a token that the invoker must supply.", "FunctionName": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionUrlAuthType": "The type of authentication that your function URL uses. Set to `AWS_IAM` if you want to restrict access to authenticated users only. Set to `NONE` if you want to bypass IAM authentication to create a public endpoint. For more information, see [Security and auth model for Lambda function URLs](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) .", - "Principal": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "Principal": "The AWS-service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "PrincipalOrgID": "The identifier for your organization in AWS Organizations . Use this to grant permissions to all the AWS accounts under this organization.", - "SourceAccount": "For AWS service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", - "SourceArn": "For AWS services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator." + "SourceAccount": "For AWS-service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", + "SourceArn": "For AWS-services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator." }, "AWS::Lambda::Url": { "AuthType": "The type of authentication that your function URL uses. Set to `AWS_IAM` if you want to restrict access to authenticated users only. Set to `NONE` if you want to bypass IAM authentication to create a public endpoint. For more information, see [Security and auth model for Lambda function URLs](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) .", @@ -26176,7 +26217,6 @@ "Value": "" }, "AWS::MediaLive::Multiplexprogram": { - "ChannelId": "The unique ID of the channel.", "MultiplexId": "The unique id of the multiplex.", "MultiplexProgramSettings": "Multiplex Program settings configuration.", "PacketIdentifiersMap": "", @@ -27089,7 +27129,7 @@ "AWS::NetworkFirewall::LoggingConfiguration LogDestinationConfig": { "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", - "LogType": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs." + "LogType": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* ." }, "AWS::NetworkFirewall::LoggingConfiguration LoggingConfiguration": { "LogDestinationConfigs": "Defines the logging destinations for the logs for a firewall. Network Firewall generates logs for stateful rule groups." @@ -27246,7 +27286,9 @@ "AWS::NetworkManager::ConnectAttachment": { "CoreNetworkId": "The ID of the core network where the Connect attachment is located.", "EdgeLocation": "The Region where the edge is located.", + "NetworkFunctionGroupName": "The name of the network function group.", "Options": "Options for connecting an attachment.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "Tags": "The tags associated with the Connect attachment.", "TransportAttachmentId": "The ID of the transport attachment." @@ -27254,6 +27296,11 @@ "AWS::NetworkManager::ConnectAttachment ConnectAttachmentOptions": { "Protocol": "The protocol used for the attachment connection." }, + "AWS::NetworkManager::ConnectAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::ConnectAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -27303,11 +27350,20 @@ "EdgeLocation": "The Region where a core network edge is located.", "InsideCidrBlocks": "The inside IP addresses used for core network edges." }, + "AWS::NetworkManager::CoreNetwork CoreNetworkNetworkFunctionGroup": { + "EdgeLocations": "The core network edge locations.", + "Name": "The name of the network function group.", + "Segments": "The segments associated with the network function group." + }, "AWS::NetworkManager::CoreNetwork CoreNetworkSegment": { "EdgeLocations": "The Regions where the edges are located.", "Name": "The name of a core network segment.", "SharedSegments": "The shared segments of a core network." }, + "AWS::NetworkManager::CoreNetwork Segments": { + "SendTo": "", + "SendVia": "" + }, "AWS::NetworkManager::CoreNetwork Tag": { "Key": "The tag key.\n\nConstraints: Maximum length of 128 characters.", "Value": "The tag value.\n\nConstraints: Maximum length of 256 characters." @@ -27392,10 +27448,17 @@ }, "AWS::NetworkManager::SiteToSiteVpnAttachment": { "CoreNetworkId": "", + "NetworkFunctionGroupName": "The name of the network function group.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "Tags": "The tags associated with the Site-to-Site VPN attachment.", "VpnConnectionArn": "The ARN of the site-to-site VPN attachment." }, + "AWS::NetworkManager::SiteToSiteVpnAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::SiteToSiteVpnAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -27419,11 +27482,18 @@ "TransitGatewayArn": "The Amazon Resource Name (ARN) of the transit gateway." }, "AWS::NetworkManager::TransitGatewayRouteTableAttachment": { + "NetworkFunctionGroupName": "The name of the network function group.", "PeeringId": "The ID of the transit gateway peering.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "This property is read-only. Values can't be assigned to it.", "Tags": "The list of key-value pairs associated with the transit gateway route table attachment.", "TransitGatewayRouteTableArn": "The ARN of the transit gateway attachment route table. For example, `\"TransitGatewayRouteTableArn\": \"arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table/tgw-rtb-9876543210123456\"` ." }, + "AWS::NetworkManager::TransitGatewayRouteTableAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::TransitGatewayRouteTableAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -27436,11 +27506,17 @@ "AWS::NetworkManager::VpcAttachment": { "CoreNetworkId": "The core network ID.", "Options": "Options for creating the VPC attachment.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "SubnetArns": "The subnet ARNs.", "Tags": "The tags associated with the VPC attachment.", "VpcArn": "The ARN of the VPC attachment." }, + "AWS::NetworkManager::VpcAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::VpcAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -29385,7 +29461,7 @@ "AWS::Pipes::Pipe S3LogDestination": { "BucketName": "The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.", "BucketOwner": "The AWS account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.", - "OutputFormat": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "OutputFormat": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "Prefix": "The prefix text with which to begin Amazon S3 log object names.\n\nFor more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::Pipes::Pipe SageMakerPipelineParameter": { @@ -38396,7 +38472,7 @@ "PromotionTier": "The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see [Fault Tolerance for an Aurora DB Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.AuroraHighAvailability.html#Aurora.Managing.FaultTolerance) in the *Amazon Aurora User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nDefault: `1`\n\nValid Values: `0 - 15`", "PubliclyAccessible": "Indicates whether the DB instance is an internet-facing instance. If you specify true, AWS CloudFormation creates an instance with a publicly resolvable DNS name, which resolves to a public IP address. If you specify false, AWS CloudFormation creates an internal instance with a DNS name that resolves to a private IP address.\n\nThe default behavior value depends on your VPC setup and the database subnet group. For more information, see the `PubliclyAccessible` parameter in the [CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html) in the *Amazon RDS API Reference* .", "ReplicaMode": "The open mode of an Oracle read replica. For more information, see [Working with Oracle Read Replicas for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) in the *Amazon RDS User Guide* .\n\nThis setting is only supported in RDS for Oracle.\n\nDefault: `open-read-only`\n\nValid Values: `open-read-only` or `mounted`", - "RestoreTime": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "RestoreTime": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "SourceDBClusterIdentifier": "The identifier of the Multi-AZ DB cluster that will act as the source for the read replica. Each DB cluster can have up to 15 read replicas.\n\nConstraints:\n\n- Must be the identifier of an existing Multi-AZ DB cluster.\n- Can't be specified if the `SourceDBInstanceIdentifier` parameter is also specified.\n- The specified DB cluster must have automatic backups enabled, that is, its backup retention period must be greater than 0.\n- The source DB cluster must be in the same AWS Region as the read replica. Cross-Region replication isn't supported.", "SourceDBInstanceAutomatedBackupsArn": "The Amazon Resource Name (ARN) of the replicated automated backups from which to restore, for example, `arn:aws:rds:us-east-1:123456789012:auto-backup:ab-L2IJCEXJP7XQ7HOJ4SIEXAMPLE` .\n\nThis setting doesn't apply to RDS Custom.", "SourceDBInstanceIdentifier": "If you want to create a read replica DB instance, specify the ID of the source DB instance. Each DB instance can have a limited number of read replicas. For more information, see [Working with Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/DeveloperGuide/USER_ReadRepl.html) in the *Amazon RDS User Guide* .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\nThe `SourceDBInstanceIdentifier` property determines whether a DB instance is a read replica. If you remove the `SourceDBInstanceIdentifier` property from your template and then update your stack, AWS CloudFormation promotes the read replica to a standalone DB instance.\n\nIf you specify the `UseLatestRestorableTime` or `RestoreTime` properties in conjunction with the `SourceDBInstanceIdentifier` property, RDS restores the DB instance to the requested point in time, thereby creating a new DB instance.\n\n> - If you specify a source DB instance that uses VPC security groups, we recommend that you specify the `VPCSecurityGroups` property. If you don't specify the property, the read replica inherits the value of the `VPCSecurityGroups` property from the source DB when you create the replica. However, if you update the stack, AWS CloudFormation reverts the replica's `VPCSecurityGroups` property to the default value because it's not defined in the stack's template. This change might cause unexpected issues.\n> - Read replicas don't support deletion policies. AWS CloudFormation ignores any deletion policy that's associated with a read replica.\n> - If you specify `SourceDBInstanceIdentifier` , don't specify the `DBSnapshotIdentifier` property. You can't create a read replica from a snapshot.\n> - Don't set the `BackupRetentionPeriod` , `DBName` , `MasterUsername` , `MasterUserPassword` , and `PreferredBackupWindow` properties. The database attributes are inherited from the source DB instance, and backups are disabled for read replicas.\n> - If the source DB instance is in a different region than the read replica, specify the source region in `SourceRegion` , and specify an ARN for a valid DB instance in `SourceDBInstanceIdentifier` . For more information, see [Constructing a Amazon RDS Amazon Resource Name (ARN)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN) in the *Amazon RDS User Guide* .\n> - For DB instances in Amazon Aurora clusters, don't specify this property. Amazon RDS automatically assigns writer and reader DB instances.", @@ -38408,7 +38484,7 @@ "Tags": "Tags to assign to the DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/db2-time-zone) and [RDS for SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", - "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "VPCSecurityGroups": "A list of the VPC security group IDs to assign to the DB instance. The list can include both the physical IDs of existing VPC security groups and references to [AWS::EC2::SecurityGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html) resources created in the template.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nIf you set `VPCSecurityGroups` , you must not set [`DBSecurityGroups`](https://docs.aws.amazon.com//AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsecuritygroups) , and vice versa.\n\n> You can migrate a DB instance in your stack from an RDS DB security group to a VPC security group, but keep the following in mind:\n> \n> - You can't revert to using an RDS security group after you establish a VPC security group membership.\n> - When you migrate your DB instance to VPC security groups, if your stack update rolls back because the DB instance update fails or because an update fails in another AWS CloudFormation resource, the rollback fails because it can't revert to an RDS security group.\n> - To use the properties that are available when you use a VPC security group, you must recreate the DB instance. If you don't, AWS CloudFormation submits only the property values that are listed in the [`DBSecurityGroups`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsecuritygroups) property. \n\nTo avoid this situation, migrate your DB instance to using VPC security groups only when that is the only change in your stack template.\n\n*Amazon Aurora*\n\nNot applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. If specified, the setting must match the DB cluster setting." }, "AWS::RDS::DBInstance CertificateDetails": { @@ -38690,6 +38766,8 @@ }, "AWS::Redshift::Cluster LoggingProperties": { "BucketName": "The name of an existing S3 bucket where the log files are to be stored.\n\nConstraints:\n\n- Must be in the same region as the cluster\n- The cluster must have read bucket and put object permissions", + "LogDestinationType": "", + "LogExports": "", "S3KeyPrefix": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger" }, "AWS::Redshift::Cluster Tag": { @@ -39215,6 +39293,7 @@ "Value": "The tag value." }, "AWS::RolesAnywhere::Profile": { + "AcceptRoleSessionName": "Used to determine if a custom role session name will be accepted in a temporary credential request.", "AttributeMappings": "A mapping applied to the authenticating end-entity certificate.", "DurationSeconds": "The number of seconds vended session credentials will be valid for", "Enabled": "The enabled status of the resource.", @@ -39648,6 +39727,7 @@ "ResourceId": "The ID of the Amazon VPC that is associated with the query logging configuration." }, "AWS::Route53Resolver::ResolverRule": { + "DelegationRecord": "", "DomainName": "DNS queries for this domain name are forwarded to the IP addresses that are specified in `TargetIps` . If a query matches multiple Resolver rules (example.com and www.example.com), the query is routed using the Resolver rule that contains the most specific domain name (www.example.com).", "Name": "The name for the Resolver rule, which you specified when you created the Resolver rule.", "ResolverEndpointId": "The ID of the endpoint that the rule is associated with.", @@ -39719,7 +39799,7 @@ "BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to `TRUE` causes the following behavior:\n\n- PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.\n- PUT Object calls fail if the request includes a public ACL.\n- PUT Bucket calls fail if the request includes a public ACL.\n\nEnabling this setting doesn't affect existing policies or ACLs.", "BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to `TRUE` causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n\nEnabling this setting doesn't affect existing bucket policies.", "IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to `TRUE` causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.\n\nEnabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.", - "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." + "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." }, "AWS::S3::AccessPoint VpcConfiguration": { "VpcId": "If this field is specified, the access point will only allow connections from the specified VPC ID." @@ -39880,7 +39960,7 @@ "BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to `TRUE` causes the following behavior:\n\n- PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.\n- PUT Object calls fail if the request includes a public ACL.\n- PUT Bucket calls fail if the request includes a public ACL.\n\nEnabling this setting doesn't affect existing policies or ACLs.", "BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to `TRUE` causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n\nEnabling this setting doesn't affect existing bucket policies.", "IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to `TRUE` causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.\n\nEnabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.", - "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." + "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." }, "AWS::S3::Bucket QueueConfiguration": { "Event": "The Amazon S3 bucket event about which you want to publish messages to Amazon SQS. For more information, see [Supported Event Types](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the *Amazon S3 User Guide* .", @@ -40035,7 +40115,7 @@ "BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to `TRUE` causes the following behavior:\n\n- PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.\n- PUT Object calls fail if the request includes a public ACL.\n- PUT Bucket calls fail if the request includes a public ACL.\n\nEnabling this setting doesn't affect existing policies or ACLs.", "BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to `TRUE` causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n\nEnabling this setting doesn't affect existing bucket policies.", "IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to `TRUE` causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.\n\nEnabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.", - "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." + "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked." }, "AWS::S3::MultiRegionAccessPoint Region": { "Bucket": "The name of the associated bucket for the Region.", @@ -40215,7 +40295,7 @@ "BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for buckets in this account. Setting this element to `TRUE` causes the following behavior:\n\n- `PutBucketAcl` and `PutObjectAcl` calls fail if the specified ACL is public.\n- PUT Object calls fail if the request includes a public ACL.\n- PUT Bucket calls fail if the request includes a public ACL.\n\nEnabling this setting doesn't affect existing policies or ACLs.\n\nThis property is not supported for Amazon S3 on Outposts.", "BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for buckets in this account. Setting this element to `TRUE` causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.\n\nEnabling this setting doesn't affect existing bucket policies.\n\nThis property is not supported for Amazon S3 on Outposts.", "IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for buckets in this account. Setting this element to `TRUE` causes Amazon S3 to ignore all public ACLs on buckets in this account and any objects that they contain.\n\nEnabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.\n\nThis property is not supported for Amazon S3 on Outposts.", - "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts." + "RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS-service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts." }, "AWS::S3ObjectLambda::AccessPoint TransformationConfiguration": { "Actions": "A container for the action of an Object Lambda Access Point configuration. Valid inputs are `GetObject` , `HeadObject` , `ListObjects` , and `ListObjectsV2` .", @@ -40993,8 +41073,8 @@ "Products": "The specific operating system versions a patch repository applies to, such as \"Ubuntu16.04\", \"RedhatEnterpriseLinux7.2\" or \"Suse12.7\". For lists of supported product values, see [PatchFilter](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html) in the *AWS Systems Manager API Reference* ." }, "AWS::SSM::PatchBaseline Rule": { - "ApproveAfterDays": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", - "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "ApproveAfterDays": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` . \n\nNot supported for Debian Server or Ubuntu Server.", + "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "ComplianceLevel": "A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: `UNSPECIFIED` , `CRITICAL` , `HIGH` , `MEDIUM` , `LOW` , and `INFORMATIONAL` .", "EnableNonSecurity": "For managed nodes identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is `false` . Applies to Linux managed nodes only.", "PatchFilterGroup": "The patch filter group that defines the criteria for the rule." @@ -42374,6 +42454,7 @@ "LastModifiedTime": "The last time the model package was modified.", "MetadataProperties": "Metadata properties of the tracking entity, trial, or trial component.", "ModelApprovalStatus": "The approval status of the model. This can be one of the following values.\n\n- `APPROVED` - The model is approved\n- `REJECTED` - The model is rejected.\n- `PENDING_MANUAL_APPROVAL` - The model is waiting for manual approval.", + "ModelCard": "An Amazon SageMaker Model Card.", "ModelMetrics": "Metrics for the model.", "ModelPackageDescription": "The description of the model package.", "ModelPackageGroupName": "The model group to which the model belongs.", @@ -42381,8 +42462,10 @@ "ModelPackageStatusDetails": "Specifies the validation and image scan statuses of the model package.", "ModelPackageVersion": "The version number of a versioned model.", "SamplePayloadUrl": "The Amazon Simple Storage Service path where the sample payload are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).", + "SecurityConfig": "", "SkipModelValidation": "Indicates if you want to skip model validation.", "SourceAlgorithmSpecification": "A list of algorithms that were used to create a model package.", + "SourceUri": "The URI of the source for the model package.", "Tags": "A list of the tags associated with the model package. For more information, see [Tagging AWS resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference Guide* .", "Task": "The machine learning task your model package accomplishes. Common machine learning tasks include object detection and image classification.", "ValidationSpecification": "Specifies batch transform jobs that SageMaker runs to validate your model package." @@ -42453,10 +42536,20 @@ "ContentType": "The metric source content type.", "S3Uri": "The S3 URI for the metrics source." }, + "AWS::SageMaker::ModelPackage ModelAccessConfig": { + "AcceptEula": "Specifies agreement to the model end-user license agreement (EULA). The `AcceptEula` value must be explicitly defined as `True` in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model." + }, + "AWS::SageMaker::ModelPackage ModelCard": { + "ModelCardContent": "", + "ModelCardStatus": "The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.\n\n- `Draft` : The model card is a work in progress.\n- `PendingReview` : The model card is pending review.\n- `Approved` : The model card is approved.\n- `Archived` : The model card is archived. No more updates should be made to the model card, but it can still be exported." + }, "AWS::SageMaker::ModelPackage ModelDataQuality": { "Constraints": "Data quality constraints for a model.", "Statistics": "Data quality statistics for a model." }, + "AWS::SageMaker::ModelPackage ModelDataSource": { + "S3DataSource": "Specifies the S3 location of ML model data to deploy." + }, "AWS::SageMaker::ModelPackage ModelInput": { "DataInputConfig": "The input configuration object for the model." }, @@ -42473,6 +42566,7 @@ "FrameworkVersion": "The framework version of the Model Package Container Image.", "Image": "The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.\n\nIf you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both `registry/repository[:tag]` and `registry/repository[@digest]` image path formats. For more information, see [Using Your Own Algorithms with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) .", "ImageDigest": "An MD5 hash of the training algorithm that identifies the Docker image used for training.", + "ModelDataSource": "Specifies the location of ML model data to deploy during endpoint creation.", "ModelDataUrl": "The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single `gzip` compressed tar archive ( `.tar.gz` suffix).\n\n> The model artifacts must be in an S3 bucket that is in the same region as the model package.", "ModelInput": "A structure with Model Input details.", "NearestModelName": "The name of a pre-trained machine learning benchmarked by Amazon SageMaker Inference Recommender model that matches your model. You can find a list of benchmarked models by calling `ListModelMetadata` ." @@ -42493,6 +42587,15 @@ "S3DataType": "If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.\n\nIf you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.\n\nIf you choose `AugmentedManifestFile` , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. `AugmentedManifestFile` can only be used if the Channel's input mode is `Pipe` .", "S3Uri": "Depending on the value specified for the `S3DataType` , identifies either a key name prefix or a manifest. For example:\n\n- A key name prefix might look like this: `s3://bucketname/exampleprefix/`\n- A manifest might look like this: `s3://bucketname/example.manifest`\n\nA manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of `S3Uri` . Note that the prefix must be a valid non-empty `S3Uri` that precludes users from specifying a manifest whose individual `S3Uri` is sourced from different S3 buckets.\n\nThe following code example shows a valid manifest format:\n\n`[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},`\n\n`\"relative/path/to/custdata-1\",`\n\n`\"relative/path/custdata-2\",`\n\n`...`\n\n`\"relative/path/custdata-N\"`\n\n`]`\n\nThis JSON is equivalent to the following `S3Uri` list:\n\n`s3://customer_bucket/some/prefix/relative/path/to/custdata-1`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-2`\n\n`...`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-N`\n\nThe complete set of `S3Uri` in this manifest is the input data for the channel for this data source. The object that each `S3Uri` points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.\n\nYour input bucket must be located in same AWS region as your training job." }, + "AWS::SageMaker::ModelPackage S3ModelDataSource": { + "CompressionType": "Specifies how the ML model data is prepared.\n\nIf you choose `Gzip` and choose `S3Object` as the value of `S3DataType` , `S3Uri` identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment.\n\nIf you choose `None` and chooose `S3Object` as the value of `S3DataType` , `S3Uri` identifies an object that represents an uncompressed ML model to deploy.\n\nIf you choose None and choose `S3Prefix` as the value of `S3DataType` , `S3Uri` identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy.\n\nIf you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code:\n\n- If you choose `S3Object` as the value of `S3DataType` , then SageMaker will split the key of the S3 object referenced by `S3Uri` by slash (/), and use the last part as the filename of the file holding the content of the S3 object.\n- If you choose `S3Prefix` as the value of `S3DataType` , then for each S3 object under the key name pefix referenced by `S3Uri` , SageMaker will trim its key by the prefix, and use the remainder as the path (relative to `/opt/ml/model` ) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object.\n- Do not use any of the following as file names or directory names:\n\n- An empty or blank string\n- A string which contains null bytes\n- A string longer than 255 bytes\n- A single dot ( `.` )\n- A double dot ( `..` )\n- Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects `s3://mybucket/model/weights` and `s3://mybucket/model/weights/part1` and you specify `s3://mybucket/model/` as the value of `S3Uri` and `S3Prefix` as the value of `S3DataType` , then it will result in name clash between `/opt/ml/model/weights` (a regular file) and `/opt/ml/model/weights/` (a directory).\n- Do not organize the model artifacts in [S3 console using folders](https://docs.aws.amazon.com//AmazonS3/latest/userguide/using-folders.html) . When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure.", + "ModelAccessConfig": "Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the `ModelAccessConfig` . You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.", + "S3DataType": "Specifies the type of ML model data to deploy.\n\nIf you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by `S3Uri` always ends with a forward slash (/).\n\nIf you choose `S3Object` , `S3Uri` identifies an object that is the ML model data to deploy.", + "S3Uri": "Specifies the S3 path of ML model data to deploy." + }, + "AWS::SageMaker::ModelPackage SecurityConfig": { + "KmsKeyId": "" + }, "AWS::SageMaker::ModelPackage SourceAlgorithm": { "AlgorithmName": "The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.", "ModelDataUrl": "The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single `gzip` compressed tar archive ( `.tar.gz` suffix).\n\n> The model artifacts must be in an S3 bucket that is in the same AWS region as the algorithm." @@ -43275,7 +43378,7 @@ "RelatedFindingsId": "The product-generated identifier for a related finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "RelatedFindingsProductArn": "The ARN for the product that generated a related finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ResourceDetailsOther": "Custom fields and values about the resource that a finding pertains to.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "ResourceId": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", + "ResourceId": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS-service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "ResourcePartition": "The partition in which the resource that the finding pertains to is located. A partition is a group of AWS Regions . Each AWS account is scoped to one partition.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ResourceRegion": "The AWS Region where the resource that a finding pertains to is located.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ResourceTags": "A list of AWS tags associated with a resource at the time the finding was processed.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", @@ -43349,20 +43452,20 @@ "StringList": "A control parameter that is a list of strings." }, "AWS::SecurityHub::ConfigurationPolicy Policy": { - "SecurityHub": "The AWS service that the configuration policy applies to." + "SecurityHub": "The AWS-service that the configuration policy applies to." }, "AWS::SecurityHub::ConfigurationPolicy SecurityControlCustomParameter": { "Parameters": "An object that specifies parameter values for a control in a configuration policy.", "SecurityControlId": "The ID of the security control." }, "AWS::SecurityHub::ConfigurationPolicy SecurityControlsConfiguration": { - "DisabledSecurityControlIdentifiers": "A list of security controls that are disabled in the configuration policy. Security Hub enables all other controls (including newly released controls) other than the listed controls.", - "EnabledSecurityControlIdentifiers": "A list of security controls that are enabled in the configuration policy. Security Hub disables all other controls (including newly released controls) other than the listed controls.", + "DisabledSecurityControlIdentifiers": "A list of security controls that are disabled in the configuration policy.\n\nProvide only one of `EnabledSecurityControlIdentifiers` or `DisabledSecurityControlIdentifiers` .\n\nIf you provide `DisabledSecurityControlIdentifiers` , Security Hub enables all other controls not in the list, and enables [AutoEnableControls](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_UpdateSecurityHubConfiguration.html#securityhub-UpdateSecurityHubConfiguration-request-AutoEnableControls) .", + "EnabledSecurityControlIdentifiers": "A list of security controls that are enabled in the configuration policy.\n\nProvide only one of `EnabledSecurityControlIdentifiers` or `DisabledSecurityControlIdentifiers` .\n\nIf you provide `EnabledSecurityControlIdentifiers` , Security Hub disables all other controls not in the list, and disables [AutoEnableControls](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_UpdateSecurityHubConfiguration.html#securityhub-UpdateSecurityHubConfiguration-request-AutoEnableControls) .", "SecurityControlCustomParameters": "A list of security controls and control parameter values that are included in a configuration policy." }, "AWS::SecurityHub::ConfigurationPolicy SecurityHubPolicy": { - "EnabledStandardIdentifiers": "A list that defines which security standards are enabled in the configuration policy.", - "SecurityControlsConfiguration": "An object that defines which security controls are enabled in the configuration policy. The enablement status of a control is aligned across all of the enabled standards in an account.", + "EnabledStandardIdentifiers": "A list that defines which security standards are enabled in the configuration policy.\n\nThis property is required only if `ServiceEnabled` is set to `true` in your configuration policy.", + "SecurityControlsConfiguration": "An object that defines which security controls are enabled in the configuration policy. The enablement status of a control is aligned across all of the enabled standards in an account.\n\nThis property is required only if `ServiceEnabled` is set to true in your configuration policy.", "ServiceEnabled": "Indicates whether Security Hub is enabled in the policy." }, "AWS::SecurityHub::DelegatedAdmin": { @@ -43388,7 +43491,7 @@ "AwsAccountName": "The name of the AWS account in which a finding is generated.", "CompanyName": "The name of the findings provider (company) that owns the solution (product) that generates findings.", "ComplianceAssociatedStandardsId": "The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the [DescribeStandards](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeStandards.html) API response.", - "ComplianceSecurityControlId": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "ComplianceSecurityControlId": "The unique identifier of a control across standards. Values for this field typically consist of an AWS-service and a number, such as APIGateway.5.", "ComplianceSecurityControlParametersName": "The name of a security control parameter.", "ComplianceSecurityControlParametersValue": "The current value of a security control parameter.", "ComplianceStatus": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.", @@ -43538,7 +43641,7 @@ "LastUpdateReason": "The most recent reason for updating the customizable properties of a security control. This differs from the `UpdateReason` field of the [`BatchUpdateStandardsControlAssociations`](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_BatchUpdateStandardsControlAssociations.html) API, which tracks the reason for updating the enablement status of a control. This field accepts alphanumeric characters in addition to white spaces, dashes, and underscores.", "Parameters": "An object that identifies the name of a control parameter, its current value, and whether it has been customized.", "SecurityControlArn": "The Amazon Resource Name (ARN) for a security control across standards, such as `arn:aws:securityhub:eu-central-1:123456789012:security-control/S3.1` . This parameter doesn't mention a specific standard.", - "SecurityControlId": "The unique identifier of a security control across standards. Values for this field typically consist of an AWS service name and a number, such as APIGateway.3." + "SecurityControlId": "The unique identifier of a security control across standards. Values for this field typically consist of an AWS-service name and a number, such as APIGateway.3." }, "AWS::SecurityHub::SecurityControl ParameterConfiguration": { "Value": "The current value of a control parameter.", @@ -43600,7 +43703,7 @@ "AWS::SecurityLake::Subscriber": { "AccessTypes": "You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.\n\nSubscribers can consume data by directly querying AWS Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as `LAKEFORMATION` .", "DataLakeArn": "The Amazon Resource Name (ARN) used to create the data lake.", - "Sources": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "Sources": "Amazon Security Lake supports log and event collection for natively supported AWS-services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", "SubscriberDescription": "The subscriber descriptions for a subscriber account. The description for a subscriber includes `subscriberName` , `accountID` , `externalID` , and `subscriberId` .", "SubscriberIdentity": "The AWS identity used to access your data.", "SubscriberName": "The name of your Amazon Security Lake subscriber account.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 3bc8a4533..a49e19255 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -22514,7 +22514,7 @@ "additionalProperties": false, "properties": { "ServiceName": { - "markdownDescription": "The name of the AWS service .", + "markdownDescription": "The name of the AWS-service .", "title": "ServiceName", "type": "string" } @@ -22629,7 +22629,7 @@ "items": { "$ref": "#/definitions/AWS::AuditManager::Assessment.AWSService" }, - "markdownDescription": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty.", + "markdownDescription": "The AWS services that are included in the scope of the assessment.\n\n> This API parameter is no longer supported. If you use this parameter to specify one or more AWS-services , Audit Manager ignores this input. Instead, the value for `awsServices` will show as empty.", "title": "AwsServices", "type": "array" } @@ -32745,7 +32745,7 @@ "type": "string" }, "SlackChannelId": { - "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", + "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", "title": "SlackChannelId", "type": "string" }, @@ -39654,7 +39654,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", + "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", "title": "Values", "type": "array" } @@ -44503,7 +44503,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44586,7 +44586,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response.", "title": "SecretToken", "type": "string" } @@ -45666,12 +45666,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.LogConfiguration" }, - "markdownDescription": "The detailed activity logging destination of a user pool.", + "markdownDescription": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", "title": "LogConfigurations", "type": "array" }, "UserPoolId": { - "markdownDescription": "The ID of the user pool where you configured detailed activity logging.", + "markdownDescription": "The ID of the user pool where you configured logging.", "title": "UserPoolId", "type": "string" } @@ -45718,16 +45718,16 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.CloudWatchLogsConfiguration", - "markdownDescription": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", + "markdownDescription": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.", "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for detailed activity logging.", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging.", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -46338,7 +46338,7 @@ "additionalProperties": false, "properties": { "AdvancedSecurityMode": { - "markdownDescription": "The operating mode of advanced security features in your user pool.", + "markdownDescription": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication.", "title": "AdvancedSecurityMode", "type": "string" } @@ -62326,7 +62326,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "title": "AgentArns", "type": "array" }, @@ -62503,7 +62503,7 @@ }, "OnPremConfig": { "$ref": "#/definitions/AWS::DataSync::LocationNFS.OnPremConfig", - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "OnPremConfig" }, "ServerHostname": { @@ -62569,7 +62569,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "AgentArns", "type": "array" } @@ -62623,7 +62623,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "title": "AgentArns", "type": "array" }, @@ -73182,7 +73182,7 @@ "title": "IamInstanceProfile" }, "ImageId": { - "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "title": "ImageId", "type": "string" }, @@ -75020,7 +75020,7 @@ "type": "string" }, "destinationPrefixListId": { - "markdownDescription": "The prefix of the AWS service .", + "markdownDescription": "The prefix of the AWS-service .", "title": "destinationPrefixListId", "type": "string" }, @@ -79196,8 +79196,6 @@ "type": "string" }, "SecurityGroupReferencingSupport": { - "markdownDescription": "Enables you to reference a security group across VPCs attached to a transit gateway (TGW). Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.\n\nFor important information about this feature, see [Create a transit gateway](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html#create-tgw) in the *AWS Transit Gateway Guide* .", - "title": "SecurityGroupReferencingSupport", "type": "string" } }, @@ -143582,7 +143580,7 @@ "type": "string" }, "Principal": { - "markdownDescription": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "markdownDescription": "The AWS-service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "title": "Principal", "type": "string" }, @@ -143592,12 +143590,12 @@ "type": "string" }, "SourceAccount": { - "markdownDescription": "For AWS service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", + "markdownDescription": "For AWS-service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", "title": "SourceAccount", "type": "string" }, "SourceArn": { - "markdownDescription": "For AWS services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator.", + "markdownDescription": "For AWS-services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator.", "title": "SourceArn", "type": "string" } @@ -161720,8 +161718,6 @@ "additionalProperties": false, "properties": { "ChannelId": { - "markdownDescription": "The unique ID of the channel.", - "title": "ChannelId", "type": "string" }, "MultiplexId": { @@ -167343,7 +167339,7 @@ "type": "string" }, "LogType": { - "markdownDescription": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.", + "markdownDescription": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* .", "title": "LogType", "type": "string" } @@ -182693,7 +182689,7 @@ "type": "string" }, "OutputFormat": { - "markdownDescription": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "markdownDescription": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "title": "OutputFormat", "type": "string" }, @@ -225421,7 +225417,7 @@ "type": "string" }, "RestoreTime": { - "markdownDescription": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "markdownDescription": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "title": "RestoreTime", "type": "string" }, @@ -225484,7 +225480,7 @@ "type": "boolean" }, "UseLatestRestorableTime": { - "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "title": "UseLatestRestorableTime", "type": "boolean" }, @@ -235450,7 +235446,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -236317,7 +236313,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -237150,7 +237146,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to `TRUE` restricts access to this bucket to only AWS-service principals and authorized users within this account if the bucket has a public policy.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -238298,7 +238294,7 @@ "type": "boolean" }, "RestrictPublicBuckets": { - "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts.", + "markdownDescription": "Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to `TRUE` restricts access to buckets with public policies to only AWS-service principals and authorized users within this account.\n\nEnabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.\n\nThis property is not supported for Amazon S3 on Outposts.", "title": "RestrictPublicBuckets", "type": "boolean" } @@ -242419,12 +242415,12 @@ "additionalProperties": false, "properties": { "ApproveAfterDays": { - "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", + "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveAfterDays", "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveUntilDate", "type": "string" }, @@ -255297,7 +255293,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.StringFilter" }, - "markdownDescription": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", + "markdownDescription": "The identifier for the given resource type. For AWS resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For AWS resources that lack ARNs, this is the identifier as defined by the AWS-service that created the resource. For non- AWS resources, this is a unique identifier that is associated with the resource.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "title": "ResourceId", "type": "array" }, @@ -255852,7 +255848,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" }, - "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS-service and a number, such as APIGateway.5.", "title": "ComplianceSecurityControlId", "type": "array" }, @@ -257217,7 +257213,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityLake::Subscriber.Source" }, - "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS-services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", "title": "Sources", "type": "array" }, From 463d5eb05825626620e3843ba70f4d8afdb7d6dc Mon Sep 17 00:00:00 2001 From: Jacob Fuss <32497805+jfuss@users.noreply.github.com> Date: Wed, 14 Aug 2024 12:46:43 -0500 Subject: [PATCH 4/7] chore: Update Runtime to Python3.11 (#3634) Co-authored-by: Jacob Fuss --- .../templates/combination/all_policy_templates.yaml | 6 +++--- ...api_with_authorizers_invokefunction_set_none.yaml | 2 +- .../connector_function_to_location_place_index.yaml | 2 +- .../resources/templates/combination/depends_on.yaml | 2 +- .../function_with_custom_code_deploy.yaml | 2 +- .../combination/function_with_deployment_basic.yaml | 2 +- ..._with_deployment_default_role_managed_policy.yaml | 2 +- .../function_with_deployment_globals.yaml | 2 +- .../combination/function_with_http_api.yaml | 2 +- .../function_with_http_api_default_path.yaml | 2 +- .../combination/function_with_implicit_http_api.yaml | 2 +- .../combination/function_with_policy_templates.yaml | 2 +- .../combination/function_with_resource_refs.yaml | 4 ++-- .../templates/combination/http_api_with_auth.yaml | 2 +- .../combination/http_api_with_auth_updated.yaml | 2 +- ...with_fail_on_warnings_and_default_stage_name.yaml | 2 +- .../state_machine_with_policy_templates.yaml | 2 +- .../templates/single/basic_api_with_mode.yaml | 2 +- .../templates/single/basic_api_with_mode_update.yaml | 2 +- ...th_deployment_preference_alarms_intrinsic_if.yaml | 2 +- .../templates/single/function_with_role_path.yaml | 2 +- .../templates/single/state_machine_with_api.yaml | 2 +- .../input/api_http_with_default_iam_authorizer.yaml | 2 +- .../input/api_merge_definitions_with_any_method.yaml | 2 +- .../input/api_with_merge_definitions_null_paths.yaml | 2 +- ...ser_pool_with_new_property_and_cognito_event.yaml | 2 +- .../error_state_machine_with_api_intrinsics.yaml | 2 +- tests/translator/input/function_with_cw_event.yaml | 2 +- .../input/function_with_ignore_globals.yaml | 2 +- .../translator/input/function_with_null_events.yaml | 2 +- .../input/function_with_runtime_config.yaml | 12 ++++++------ tests/translator/input/inline_precedence.yaml | 2 +- .../input/managed_policies_everything.yaml | 2 +- tests/translator/input/managed_policies_minimal.yaml | 2 +- tests/translator/input/schema_validation_4.yaml | 2 +- tests/translator/input/state_machine_with_api.yaml | 2 +- .../input/state_machine_with_api_single.yaml | 2 +- .../state_machine_with_sam_policy_templates.yaml | 2 +- .../output/api_http_with_default_iam_authorizer.json | 2 +- .../api_merge_definitions_with_any_method.json | 2 +- .../api_with_merge_definitions_null_paths.json | 2 +- .../aws-cn/api_http_with_default_iam_authorizer.json | 2 +- .../api_merge_definitions_with_any_method.json | 2 +- .../api_with_merge_definitions_null_paths.json | 2 +- ...ser_pool_with_new_property_and_cognito_event.json | 2 +- .../output/aws-cn/function_with_cw_event.json | 2 +- .../output/aws-cn/function_with_ignore_globals.json | 2 +- .../output/aws-cn/function_with_null_events.json | 2 +- .../output/aws-cn/function_with_runtime_config.json | 12 ++++++------ .../translator/output/aws-cn/inline_precedence.json | 2 +- .../output/aws-cn/managed_policies_everything.json | 2 +- .../output/aws-cn/managed_policies_minimal.json | 2 +- .../output/aws-cn/schema_validation_4.json | 2 +- .../output/aws-cn/state_machine_with_api.json | 2 +- .../output/aws-cn/state_machine_with_api_single.json | 2 +- .../state_machine_with_sam_policy_templates.json | 2 +- .../api_http_with_default_iam_authorizer.json | 2 +- .../api_merge_definitions_with_any_method.json | 2 +- .../api_with_merge_definitions_null_paths.json | 2 +- ...ser_pool_with_new_property_and_cognito_event.json | 2 +- .../output/aws-us-gov/function_with_cw_event.json | 2 +- .../aws-us-gov/function_with_ignore_globals.json | 2 +- .../output/aws-us-gov/function_with_null_events.json | 2 +- .../aws-us-gov/function_with_runtime_config.json | 12 ++++++------ .../output/aws-us-gov/inline_precedence.json | 2 +- .../aws-us-gov/managed_policies_everything.json | 2 +- .../output/aws-us-gov/managed_policies_minimal.json | 2 +- .../output/aws-us-gov/schema_validation_4.json | 2 +- .../output/aws-us-gov/state_machine_with_api.json | 2 +- .../aws-us-gov/state_machine_with_api_single.json | 2 +- .../state_machine_with_sam_policy_templates.json | 2 +- ...ser_pool_with_new_property_and_cognito_event.json | 2 +- tests/translator/output/function_with_cw_event.json | 2 +- .../output/function_with_ignore_globals.json | 2 +- .../translator/output/function_with_null_events.json | 2 +- .../output/function_with_runtime_config.json | 12 ++++++------ tests/translator/output/inline_precedence.json | 2 +- .../output/managed_policies_everything.json | 2 +- .../translator/output/managed_policies_minimal.json | 2 +- tests/translator/output/schema_validation_4.json | 2 +- tests/translator/output/state_machine_with_api.json | 2 +- .../output/state_machine_with_api_single.json | 2 +- .../state_machine_with_sam_policy_templates.json | 2 +- tests/translator/test_translator.py | 2 +- 84 files changed, 107 insertions(+), 107 deletions(-) diff --git a/integration/resources/templates/combination/all_policy_templates.yaml b/integration/resources/templates/combination/all_policy_templates.yaml index e571be561..cf8ffc7d7 100644 --- a/integration/resources/templates/combination/all_policy_templates.yaml +++ b/integration/resources/templates/combination/all_policy_templates.yaml @@ -8,7 +8,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SQSPollerPolicy: @@ -123,7 +123,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SESEmailTemplateCrudPolicy: {} @@ -187,7 +187,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - ElasticMapReduceModifyInstanceFleetPolicy: ClusterId: name diff --git a/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml b/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml index fe2fd256f..f3c42ae14 100644 --- a/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml +++ b/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml @@ -12,7 +12,7 @@ Resources: InlineCode: | print("hello") Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Events: API3: Type: Api diff --git a/integration/resources/templates/combination/connector_function_to_location_place_index.yaml b/integration/resources/templates/combination/connector_function_to_location_place_index.yaml index 52c21c0a6..ba7b579e8 100644 --- a/integration/resources/templates/combination/connector_function_to_location_place_index.yaml +++ b/integration/resources/templates/combination/connector_function_to_location_place_index.yaml @@ -9,7 +9,7 @@ Resources: TriggerFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: index.handler InlineCode: | import boto3 diff --git a/integration/resources/templates/combination/depends_on.yaml b/integration/resources/templates/combination/depends_on.yaml index 036b9b939..d1949b3b2 100644 --- a/integration/resources/templates/combination/depends_on.yaml +++ b/integration/resources/templates/combination/depends_on.yaml @@ -12,7 +12,7 @@ Resources: Role: Fn::GetAtt: LambdaRole.Arn Handler: lambda_function.lambda_handler - Runtime: python3.8 + Runtime: python3.11 Timeout: 15 CodeUri: ${codeuri} diff --git a/integration/resources/templates/combination/function_with_custom_code_deploy.yaml b/integration/resources/templates/combination/function_with_custom_code_deploy.yaml index b05ad644b..d85650312 100644 --- a/integration/resources/templates/combination/function_with_custom_code_deploy.yaml +++ b/integration/resources/templates/combination/function_with_custom_code_deploy.yaml @@ -10,7 +10,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live diff --git a/integration/resources/templates/combination/function_with_deployment_basic.yaml b/integration/resources/templates/combination/function_with_deployment_basic.yaml index 4fc5f175b..f02fe7bad 100644 --- a/integration/resources/templates/combination/function_with_deployment_basic.yaml +++ b/integration/resources/templates/combination/function_with_deployment_basic.yaml @@ -5,7 +5,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live diff --git a/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml b/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml index 7f4bb115f..3540d5d9f 100644 --- a/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml +++ b/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live DeploymentPreference: Type: Canary10Percent5Minutes diff --git a/integration/resources/templates/combination/function_with_deployment_globals.yaml b/integration/resources/templates/combination/function_with_deployment_globals.yaml index 8b8c88e8d..eb0708dd3 100644 --- a/integration/resources/templates/combination/function_with_deployment_globals.yaml +++ b/integration/resources/templates/combination/function_with_deployment_globals.yaml @@ -16,7 +16,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live DeploymentRole: diff --git a/integration/resources/templates/combination/function_with_http_api.yaml b/integration/resources/templates/combination/function_with_http_api.yaml index ae453c88c..0911ddcc9 100644 --- a/integration/resources/templates/combination/function_with_http_api.yaml +++ b/integration/resources/templates/combination/function_with_http_api.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_http_api_default_path.yaml b/integration/resources/templates/combination/function_with_http_api_default_path.yaml index 469e50a9e..bb3234377 100644 --- a/integration/resources/templates/combination/function_with_http_api_default_path.yaml +++ b/integration/resources/templates/combination/function_with_http_api_default_path.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_implicit_http_api.yaml b/integration/resources/templates/combination/function_with_implicit_http_api.yaml index 3da86931b..441217886 100644 --- a/integration/resources/templates/combination/function_with_implicit_http_api.yaml +++ b/integration/resources/templates/combination/function_with_implicit_http_api.yaml @@ -4,7 +4,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_policy_templates.yaml b/integration/resources/templates/combination/function_with_policy_templates.yaml index bc7b99e09..08d73a8f1 100644 --- a/integration/resources/templates/combination/function_with_policy_templates.yaml +++ b/integration/resources/templates/combination/function_with_policy_templates.yaml @@ -14,7 +14,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SQSPollerPolicy: QueueName: diff --git a/integration/resources/templates/combination/function_with_resource_refs.yaml b/integration/resources/templates/combination/function_with_resource_refs.yaml index 4ce85a020..e4c1b9aae 100644 --- a/integration/resources/templates/combination/function_with_resource_refs.yaml +++ b/integration/resources/templates/combination/function_with_resource_refs.yaml @@ -10,14 +10,14 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live MyOtherFunction: Type: AWS::Serverless::Function Properties: CodeUri: ${codeuri} - Runtime: python3.8 + Runtime: python3.11 Handler: hello.handler Environment: Variables: diff --git a/integration/resources/templates/combination/http_api_with_auth.yaml b/integration/resources/templates/combination/http_api_with_auth.yaml index 9aa90a092..3eb0eedb7 100644 --- a/integration/resources/templates/combination/http_api_with_auth.yaml +++ b/integration/resources/templates/combination/http_api_with_auth.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/http_api_with_auth_updated.yaml b/integration/resources/templates/combination/http_api_with_auth_updated.yaml index a1b40ab53..bfc35137e 100644 --- a/integration/resources/templates/combination/http_api_with_auth_updated.yaml +++ b/integration/resources/templates/combination/http_api_with_auth_updated.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml b/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml index a9b1800a0..d54723d42 100644 --- a/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml +++ b/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml @@ -16,7 +16,7 @@ Resources: InlineCode: | def handler(event, context): print("Hello, world!") - Runtime: python3.8 + Runtime: python3.11 Architectures: - x86_64 Events: diff --git a/integration/resources/templates/combination/state_machine_with_policy_templates.yaml b/integration/resources/templates/combination/state_machine_with_policy_templates.yaml index 967c8a477..2cf8fe0f0 100644 --- a/integration/resources/templates/combination/state_machine_with_policy_templates.yaml +++ b/integration/resources/templates/combination/state_machine_with_policy_templates.yaml @@ -24,7 +24,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 MyQueue: Type: AWS::SQS::Queue diff --git a/integration/resources/templates/single/basic_api_with_mode.yaml b/integration/resources/templates/single/basic_api_with_mode.yaml index 2fd8ee154..cbab574a9 100644 --- a/integration/resources/templates/single/basic_api_with_mode.yaml +++ b/integration/resources/templates/single/basic_api_with_mode.yaml @@ -9,7 +9,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live InlineCode: | import json diff --git a/integration/resources/templates/single/basic_api_with_mode_update.yaml b/integration/resources/templates/single/basic_api_with_mode_update.yaml index 5b6376cbf..4810f2526 100644 --- a/integration/resources/templates/single/basic_api_with_mode_update.yaml +++ b/integration/resources/templates/single/basic_api_with_mode_update.yaml @@ -9,7 +9,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live InlineCode: | def handler(event, context): diff --git a/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml b/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml index a736b3e1d..efd2f8d75 100644 --- a/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml +++ b/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml @@ -9,7 +9,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live DeploymentPreference: Type: Linear10PercentEvery3Minutes diff --git a/integration/resources/templates/single/function_with_role_path.yaml b/integration/resources/templates/single/function_with_role_path.yaml index fb55ea958..e6e9dca2f 100644 --- a/integration/resources/templates/single/function_with_role_path.yaml +++ b/integration/resources/templates/single/function_with_role_path.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RolePath: /foo/bar/ Metadata: SamTransformTest: true diff --git a/integration/resources/templates/single/state_machine_with_api.yaml b/integration/resources/templates/single/state_machine_with_api.yaml index 8b3b5d58c..6b9b4e3d5 100644 --- a/integration/resources/templates/single/state_machine_with_api.yaml +++ b/integration/resources/templates/single/state_machine_with_api.yaml @@ -14,7 +14,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/api_http_with_default_iam_authorizer.yaml b/tests/translator/input/api_http_with_default_iam_authorizer.yaml index a2bfabbb2..4d7d96107 100644 --- a/tests/translator/input/api_http_with_default_iam_authorizer.yaml +++ b/tests/translator/input/api_http_with_default_iam_authorizer.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: app.lambda_handler - Runtime: python3.8 + Runtime: python3.11 Role: Fn::GetAtt: - HelloWorldFunctionRole diff --git a/tests/translator/input/api_merge_definitions_with_any_method.yaml b/tests/translator/input/api_merge_definitions_with_any_method.yaml index 0b50b1aed..3e689f453 100644 --- a/tests/translator/input/api_merge_definitions_with_any_method.yaml +++ b/tests/translator/input/api_merge_definitions_with_any_method.yaml @@ -15,7 +15,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: code/handler - Runtime: python3.8 + Runtime: python3.11 Events: AllEvent: Type: Api diff --git a/tests/translator/input/api_with_merge_definitions_null_paths.yaml b/tests/translator/input/api_with_merge_definitions_null_paths.yaml index 797eac94c..4c3b19f65 100644 --- a/tests/translator/input/api_with_merge_definitions_null_paths.yaml +++ b/tests/translator/input/api_with_merge_definitions_null_paths.yaml @@ -16,7 +16,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: code/handler - Runtime: python3.8 + Runtime: python3.11 Events: AllEvent: Type: Api diff --git a/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml b/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml index 60056fcbc..9ddac00bb 100644 --- a/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml +++ b/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml @@ -8,7 +8,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 InlineCode: foo Handler: bar Events: diff --git a/tests/translator/input/error_state_machine_with_api_intrinsics.yaml b/tests/translator/input/error_state_machine_with_api_intrinsics.yaml index 5e45422de..e6739d6a9 100644 --- a/tests/translator/input/error_state_machine_with_api_intrinsics.yaml +++ b/tests/translator/input/error_state_machine_with_api_intrinsics.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/function_with_cw_event.yaml b/tests/translator/input/function_with_cw_event.yaml index 4e550988b..5f62cefc1 100644 --- a/tests/translator/input/function_with_cw_event.yaml +++ b/tests/translator/input/function_with_cw_event.yaml @@ -3,7 +3,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Events: diff --git a/tests/translator/input/function_with_ignore_globals.yaml b/tests/translator/input/function_with_ignore_globals.yaml index e16fcb91b..a36c02014 100644 --- a/tests/translator/input/function_with_ignore_globals.yaml +++ b/tests/translator/input/function_with_ignore_globals.yaml @@ -1,6 +1,6 @@ Globals: Function: - Runtime: python3.8 + Runtime: python3.11 Handler: index.lambda_handler MemorySize: 128 diff --git a/tests/translator/input/function_with_null_events.yaml b/tests/translator/input/function_with_null_events.yaml index 5dc591caa..fd9ac2447 100644 --- a/tests/translator/input/function_with_null_events.yaml +++ b/tests/translator/input/function_with_null_events.yaml @@ -4,5 +4,5 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/queues.zip Handler: handlers.handler - Runtime: python3.8 + Runtime: python3.11 Events: diff --git a/tests/translator/input/function_with_runtime_config.yaml b/tests/translator/input/function_with_runtime_config.yaml index a9302aa10..1c5d6e565 100644 --- a/tests/translator/input/function_with_runtime_config.yaml +++ b/tests/translator/input/function_with_runtime_config.yaml @@ -12,7 +12,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: Auto MinimalFunctionWithManualRuntimeManagementConfig: @@ -20,16 +20,16 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: Manual - RuntimeVersionArn: !Sub arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505 + RuntimeVersionArn: !Sub arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505 FunctionWithRuntimeManagementConfigAndAlias: Type: AWS::Serverless::Function Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live RuntimeManagementConfig: UpdateRuntimeOn: Auto @@ -38,7 +38,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: !Ref RuntimeUpdateParam FunctionWithIntrinsicRuntimeVersion: @@ -46,7 +46,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: !Ref RuntimeUpdateParam RuntimeVersionArn: !Ref RuntimeVersionParam diff --git a/tests/translator/input/inline_precedence.yaml b/tests/translator/input/inline_precedence.yaml index 6b96bab62..51bef8b22 100644 --- a/tests/translator/input/inline_precedence.yaml +++ b/tests/translator/input/inline_precedence.yaml @@ -7,4 +7,4 @@ Resources: pass" CodeUri: . Handler: index.lambda_handler - Runtime: python3.8 + Runtime: python3.11 diff --git a/tests/translator/input/managed_policies_everything.yaml b/tests/translator/input/managed_policies_everything.yaml index 80b724ff1..0a5592414 100644 --- a/tests/translator/input/managed_policies_everything.yaml +++ b/tests/translator/input/managed_policies_everything.yaml @@ -13,7 +13,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Policies: diff --git a/tests/translator/input/managed_policies_minimal.yaml b/tests/translator/input/managed_policies_minimal.yaml index 8f82c6cc2..5e2d40bca 100644 --- a/tests/translator/input/managed_policies_minimal.yaml +++ b/tests/translator/input/managed_policies_minimal.yaml @@ -2,7 +2,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Policies: diff --git a/tests/translator/input/schema_validation_4.yaml b/tests/translator/input/schema_validation_4.yaml index d790743dc..41070daeb 100644 --- a/tests/translator/input/schema_validation_4.yaml +++ b/tests/translator/input/schema_validation_4.yaml @@ -59,7 +59,7 @@ Resources: OtherFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Environment: diff --git a/tests/translator/input/state_machine_with_api.yaml b/tests/translator/input/state_machine_with_api.yaml index 086995574..df4001847 100644 --- a/tests/translator/input/state_machine_with_api.yaml +++ b/tests/translator/input/state_machine_with_api.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/state_machine_with_api_single.yaml b/tests/translator/input/state_machine_with_api_single.yaml index 0fe28583e..3720506d8 100644 --- a/tests/translator/input/state_machine_with_api_single.yaml +++ b/tests/translator/input/state_machine_with_api_single.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/state_machine_with_sam_policy_templates.yaml b/tests/translator/input/state_machine_with_sam_policy_templates.yaml index 388ddea30..9671ace84 100644 --- a/tests/translator/input/state_machine_with_sam_policy_templates.yaml +++ b/tests/translator/input/state_machine_with_sam_policy_templates.yaml @@ -14,7 +14,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/resolver.zip Handler: resolver.handler - Runtime: python3.8 + Runtime: python3.11 NestedWorkflow: Type: AWS::Serverless::StateMachine diff --git a/tests/translator/output/api_http_with_default_iam_authorizer.json b/tests/translator/output/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/api_merge_definitions_with_any_method.json b/tests/translator/output/api_merge_definitions_with_any_method.json index 6d8a0ea0e..c6b65e2bb 100644 --- a/tests/translator/output/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/api_merge_definitions_with_any_method.json @@ -46,7 +46,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/api_with_merge_definitions_null_paths.json b/tests/translator/output/api_with_merge_definitions_null_paths.json index 3cddd4eef..91b39e6e8 100644 --- a/tests/translator/output/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/api_with_merge_definitions_null_paths.json @@ -47,7 +47,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json b/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json b/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json index de73f4dc3..c31894edc 100644 --- a/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json @@ -54,7 +54,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json b/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json index 0a754aa07..1720309cf 100644 --- a/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json @@ -55,7 +55,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json index 94cc8b3f4..fadd1cc52 100644 --- a/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_cw_event.json b/tests/translator/output/aws-cn/function_with_cw_event.json index 4677081d1..fe9f87db8 100644 --- a/tests/translator/output/aws-cn/function_with_cw_event.json +++ b/tests/translator/output/aws-cn/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_ignore_globals.json b/tests/translator/output/aws-cn/function_with_ignore_globals.json index 6b6e7ecea..df3f1b073 100644 --- a/tests/translator/output/aws-cn/function_with_ignore_globals.json +++ b/tests/translator/output/aws-cn/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_null_events.json b/tests/translator/output/aws-cn/function_with_null_events.json index 5c3342984..28bce77ae 100644 --- a/tests/translator/output/aws-cn/function_with_null_events.json +++ b/tests/translator/output/aws-cn/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_runtime_config.json b/tests/translator/output/aws-cn/function_with_runtime_config.json index 3c580a92c..46b43274c 100644 --- a/tests/translator/output/aws-cn/function_with_runtime_config.json +++ b/tests/translator/output/aws-cn/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/aws-cn/inline_precedence.json b/tests/translator/output/aws-cn/inline_precedence.json index 51cf5d4a6..e6bdd079b 100644 --- a/tests/translator/output/aws-cn/inline_precedence.json +++ b/tests/translator/output/aws-cn/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/managed_policies_everything.json b/tests/translator/output/aws-cn/managed_policies_everything.json index 1f5a57467..6340b370e 100644 --- a/tests/translator/output/aws-cn/managed_policies_everything.json +++ b/tests/translator/output/aws-cn/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/managed_policies_minimal.json b/tests/translator/output/aws-cn/managed_policies_minimal.json index 79c1a514b..cdaa3d491 100644 --- a/tests/translator/output/aws-cn/managed_policies_minimal.json +++ b/tests/translator/output/aws-cn/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/schema_validation_4.json b/tests/translator/output/aws-cn/schema_validation_4.json index 27c10b5a6..7bf45a916 100644 --- a/tests/translator/output/aws-cn/schema_validation_4.json +++ b/tests/translator/output/aws-cn/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_api.json b/tests/translator/output/aws-cn/state_machine_with_api.json index 0e355ebbe..d10441cb9 100644 --- a/tests/translator/output/aws-cn/state_machine_with_api.json +++ b/tests/translator/output/aws-cn/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_api_single.json b/tests/translator/output/aws-cn/state_machine_with_api_single.json index 706c4eca5..d18c4618a 100644 --- a/tests/translator/output/aws-cn/state_machine_with_api_single.json +++ b/tests/translator/output/aws-cn/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json b/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json index afe1e2689..f759b27eb 100644 --- a/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json b/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json b/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json index 8ec856af8..1ee01d89e 100644 --- a/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json @@ -54,7 +54,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json b/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json index 9da42fe51..1a4a14964 100644 --- a/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json @@ -55,7 +55,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json index 2d688bb9b..3958494af 100644 --- a/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_cw_event.json b/tests/translator/output/aws-us-gov/function_with_cw_event.json index 0cccfd6fa..d82391fad 100644 --- a/tests/translator/output/aws-us-gov/function_with_cw_event.json +++ b/tests/translator/output/aws-us-gov/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_ignore_globals.json b/tests/translator/output/aws-us-gov/function_with_ignore_globals.json index ad0011e53..1468e72af 100644 --- a/tests/translator/output/aws-us-gov/function_with_ignore_globals.json +++ b/tests/translator/output/aws-us-gov/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_null_events.json b/tests/translator/output/aws-us-gov/function_with_null_events.json index 18620761f..5f0572afc 100644 --- a/tests/translator/output/aws-us-gov/function_with_null_events.json +++ b/tests/translator/output/aws-us-gov/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_runtime_config.json b/tests/translator/output/aws-us-gov/function_with_runtime_config.json index bd585a2af..91bd9cb7f 100644 --- a/tests/translator/output/aws-us-gov/function_with_runtime_config.json +++ b/tests/translator/output/aws-us-gov/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/aws-us-gov/inline_precedence.json b/tests/translator/output/aws-us-gov/inline_precedence.json index 8e94957b4..233946f39 100644 --- a/tests/translator/output/aws-us-gov/inline_precedence.json +++ b/tests/translator/output/aws-us-gov/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/managed_policies_everything.json b/tests/translator/output/aws-us-gov/managed_policies_everything.json index 8d726b36e..457de474e 100644 --- a/tests/translator/output/aws-us-gov/managed_policies_everything.json +++ b/tests/translator/output/aws-us-gov/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/managed_policies_minimal.json b/tests/translator/output/aws-us-gov/managed_policies_minimal.json index 2086a5129..4d6712762 100644 --- a/tests/translator/output/aws-us-gov/managed_policies_minimal.json +++ b/tests/translator/output/aws-us-gov/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/schema_validation_4.json b/tests/translator/output/aws-us-gov/schema_validation_4.json index 23a41fffc..96c4ab21a 100644 --- a/tests/translator/output/aws-us-gov/schema_validation_4.json +++ b/tests/translator/output/aws-us-gov/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_api.json b/tests/translator/output/aws-us-gov/state_machine_with_api.json index 1293ceeed..206df14e6 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_api.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_api_single.json b/tests/translator/output/aws-us-gov/state_machine_with_api_single.json index 5b0d5b3d9..2648c24ff 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_api_single.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json b/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json index e3e9297b1..c63baf7c2 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json index 7636c5ffe..e3a0f8511 100644 --- a/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_cw_event.json b/tests/translator/output/function_with_cw_event.json index 745a57e6b..cd94b0d9d 100644 --- a/tests/translator/output/function_with_cw_event.json +++ b/tests/translator/output/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_ignore_globals.json b/tests/translator/output/function_with_ignore_globals.json index 8c3add664..edb7d7cd2 100644 --- a/tests/translator/output/function_with_ignore_globals.json +++ b/tests/translator/output/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_null_events.json b/tests/translator/output/function_with_null_events.json index 7cc118ad2..b1bbcdad5 100644 --- a/tests/translator/output/function_with_null_events.json +++ b/tests/translator/output/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_runtime_config.json b/tests/translator/output/function_with_runtime_config.json index 1a523f7fb..60f79deb0 100644 --- a/tests/translator/output/function_with_runtime_config.json +++ b/tests/translator/output/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/inline_precedence.json b/tests/translator/output/inline_precedence.json index 423d7d291..7fd836c8f 100644 --- a/tests/translator/output/inline_precedence.json +++ b/tests/translator/output/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/managed_policies_everything.json b/tests/translator/output/managed_policies_everything.json index a5d744384..7eba902ca 100644 --- a/tests/translator/output/managed_policies_everything.json +++ b/tests/translator/output/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/managed_policies_minimal.json b/tests/translator/output/managed_policies_minimal.json index 87a39ca3c..8c99ddad2 100644 --- a/tests/translator/output/managed_policies_minimal.json +++ b/tests/translator/output/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/schema_validation_4.json b/tests/translator/output/schema_validation_4.json index 546e207e3..3c061e2d3 100644 --- a/tests/translator/output/schema_validation_4.json +++ b/tests/translator/output/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_api.json b/tests/translator/output/state_machine_with_api.json index fd0c27c20..578b67e59 100644 --- a/tests/translator/output/state_machine_with_api.json +++ b/tests/translator/output/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_api_single.json b/tests/translator/output/state_machine_with_api_single.json index aca9c79ba..1f5ef12c7 100644 --- a/tests/translator/output/state_machine_with_api_single.json +++ b/tests/translator/output/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_sam_policy_templates.json b/tests/translator/output/state_machine_with_sam_policy_templates.json index 848257ccd..7108504af 100644 --- a/tests/translator/output/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/test_translator.py b/tests/translator/test_translator.py index 6090b1093..802782666 100644 --- a/tests/translator/test_translator.py +++ b/tests/translator/test_translator.py @@ -781,7 +781,7 @@ class TestTemplateValidation(TestCase): "MyFunction": { "Type": "AWS::Serverless::Function", "Properties": { - "Runtime": "python3.8", + "Runtime": "python3.11", "Handler": "foo", "InlineCode": "bar", "Policies": [ From 806998f9438c8c4528e2c8b582a9c7f5658e8521 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 13:39:07 -0700 Subject: [PATCH 5/7] chore(schema): update (#3642) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 126 +++++----- schema_source/cloudformation-docs.json | 284 +++++++++++++++++------ schema_source/cloudformation.schema.json | 126 +++++----- 3 files changed, 341 insertions(+), 195 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 38220d293..011e6f63e 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -8149,9 +8149,13 @@ "additionalProperties": false, "properties": { "Destination": { + "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Destination", "type": "string" }, "Source": { + "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Source", "type": "string" } }, @@ -41920,7 +41924,7 @@ "properties": { "Auth": { "$ref": "#/definitions/AWS::CodeBuild::Project.SourceAuth", - "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "title": "Auth" }, "BuildSpec": { @@ -41978,12 +41982,12 @@ "additionalProperties": false, "properties": { "Resource": { - "markdownDescription": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The resource value that applies to the specified authorization type.", "title": "Resource", "type": "string" }, "Type": { - "markdownDescription": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "Type", "type": "string" } @@ -42230,7 +42234,7 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "AuthType", "type": "string" }, @@ -42240,7 +42244,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "title": "Token", "type": "string" }, @@ -72628,7 +72632,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateTagSpecification" }, - "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -84034,7 +84038,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::Service.LogConfiguration", - "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the docker conainer create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "title": "LogConfiguration" }, "Namespace": { @@ -84328,7 +84332,7 @@ "type": "array" }, "IpcMode": { - "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "IpcMode", "type": "string" }, @@ -84338,12 +84342,12 @@ "type": "string" }, "NetworkMode": { - "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", + "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", "title": "NetworkMode", "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84382,7 +84386,7 @@ "type": "array" }, "TaskRoleArn": { - "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "title": "TaskRoleArn", "type": "string" }, @@ -84440,12 +84444,12 @@ "items": { "type": "string" }, - "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the docker conainer create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", "title": "Command", "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker conainer create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -84466,7 +84470,7 @@ "type": "array" }, "DisableNetworking": { - "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker conainer create command.\n\n> This parameter is not supported for Windows containers.", "title": "DisableNetworking", "type": "boolean" }, @@ -84474,7 +84478,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker conainer create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsSearchDomains", "type": "array" }, @@ -84482,13 +84486,13 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the the docker conainer create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsServers", "type": "array" }, "DockerLabels": { "additionalProperties": true, - "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker conainer create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84501,7 +84505,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker conainer create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", "title": "DockerSecurityOptions", "type": "array" }, @@ -84509,7 +84513,7 @@ "items": { "type": "string" }, - "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", + "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in tthe docker conainer create command and the `--entrypoint` option to docker run.", "title": "EntryPoint", "type": "array" }, @@ -84517,7 +84521,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.KeyValuePair" }, - "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the docker conainer create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", "title": "Environment", "type": "array" }, @@ -84525,7 +84529,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.EnvironmentFile" }, - "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "EnvironmentFiles", "type": "array" }, @@ -84538,7 +84542,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostEntry" }, - "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker conainer create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "title": "ExtraHosts", "type": "array" }, @@ -84549,21 +84553,21 @@ }, "HealthCheck": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HealthCheck", - "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker conainer create command and the `HEALTHCHECK` parameter of docker run.", "title": "HealthCheck" }, "Hostname": { - "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in thethe docker conainer create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", "title": "Hostname", "type": "string" }, "Image": { - "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker conainer create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", "title": "Image", "type": "string" }, "Interactive": { - "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker conainer create command and the `--interactive` option to docker run.", "title": "Interactive", "type": "boolean" }, @@ -84571,7 +84575,7 @@ "items": { "type": "string" }, - "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker conainer create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "title": "Links", "type": "array" }, @@ -84591,7 +84595,7 @@ "type": "number" }, "MemoryReservation": { - "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the the docker conainer create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "title": "MemoryReservation", "type": "number" }, @@ -84599,12 +84603,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.MountPoint" }, - "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the the docker conainer create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "title": "MountPoints", "type": "array" }, "Name": { - "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in tthe docker conainer create command and the `--name` option to docker run.", "title": "Name", "type": "string" }, @@ -84617,17 +84621,17 @@ "type": "array" }, "Privileged": { - "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the the docker conainer create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "Privileged", "type": "boolean" }, "PseudoTerminal": { - "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in tthe docker conainer create command and the `--tty` option to docker run.", "title": "PseudoTerminal", "type": "boolean" }, "ReadonlyRootFilesystem": { - "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker conainer create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "ReadonlyRootFilesystem", "type": "boolean" }, @@ -84666,7 +84670,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in tthe docker conainer create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -84679,7 +84683,7 @@ "type": "array" }, "User": { - "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the docker conainer create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", "title": "User", "type": "string" }, @@ -84687,12 +84691,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.VolumeFrom" }, - "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in tthe docker conainer create command and the `--volumes-from` option to docker run.", "title": "VolumesFrom", "type": "array" }, "WorkingDirectory": { - "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker conainer create command and the `--workdir` option to docker run.", "title": "WorkingDirectory", "type": "string" } @@ -84752,13 +84756,13 @@ "type": "boolean" }, "Driver": { - "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker conainer create command and the `xxdriver` option to docker volume create.", "title": "Driver", "type": "string" }, "DriverOpts": { "additionalProperties": true, - "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84769,7 +84773,7 @@ }, "Labels": { "additionalProperties": true, - "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker conainer create command and the `xxlabel` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84851,12 +84855,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "", + "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "", + "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", "title": "Domain", "type": "string" } @@ -84921,7 +84925,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in tthe docker conainer create command", "title": "Command", "type": "array" }, @@ -84998,7 +85002,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker conainer create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Add", "type": "array" }, @@ -85006,7 +85010,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker conainer create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Drop", "type": "array" } @@ -85041,27 +85045,27 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Device" }, - "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in tthe docker conainer create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", "title": "Devices", "type": "array" }, "InitProcessEnabled": { - "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "title": "InitProcessEnabled", "type": "boolean" }, "MaxSwap": { - "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "MaxSwap", "type": "number" }, "SharedMemorySize": { - "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", "title": "SharedMemorySize", "type": "number" }, "Swappiness": { - "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "Swappiness", "type": "number" }, @@ -85069,7 +85073,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Tmpfs" }, - "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", + "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", "title": "Tmpfs", "type": "array" } @@ -103009,12 +103013,12 @@ "type": "array" }, "TotalCpuLimit": { - "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", + "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", "title": "TotalCpuLimit", "type": "number" }, "TotalMemoryLimit": { - "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", + "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.", "title": "TotalMemoryLimit", "type": "number" } @@ -103313,7 +103317,7 @@ "title": "AnywhereConfiguration" }, "ApplyCapacity": { - "markdownDescription": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "ApplyCapacity", "type": "string" }, @@ -103334,7 +103338,7 @@ }, "ContainerGroupsConfiguration": { "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration", - "markdownDescription": "*This data type is used with the Amazon GameLift containers feature, which is currently in public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", + "markdownDescription": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", "title": "ContainerGroupsConfiguration" }, "Description": { @@ -103366,12 +103370,12 @@ "type": "string" }, "InstanceRoleARN": { - "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", + "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", "title": "InstanceRoleARN", "type": "string" }, "InstanceRoleCredentialsProvider": { - "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", + "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", "title": "InstanceRoleCredentialsProvider", "type": "string" }, @@ -103631,7 +103635,7 @@ }, "LocationCapacity": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationCapacity", - "markdownDescription": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "LocationCapacity" } }, @@ -103665,7 +103669,7 @@ "type": "number" }, "MaxConcurrentGameSessionActivations": { - "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance or container. This setting limits the instance resources that can be used for new game activations at any one time.", + "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", "title": "MaxConcurrentGameSessionActivations", "type": "number" }, @@ -103754,7 +103758,7 @@ "additionalProperties": false, "properties": { "ConcurrentExecutions": { - "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance or container..", + "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance.", "title": "ConcurrentExecutions", "type": "number" }, @@ -105394,7 +105398,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 821ec121e..fa7088785 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -1194,6 +1194,10 @@ "TimeoutInMillis": "Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs.", "TlsConfig": "The TLS configuration for a private integration. If you specify a TLS configuration, private integration traffic uses the HTTPS protocol. Supported only for HTTP APIs." }, + "AWS::ApiGatewayV2::Integration ResponseParameter": { + "Destination": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "Source": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) ." + }, "AWS::ApiGatewayV2::Integration TlsConfig": { "ServerNameToVerify": "If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting." }, @@ -4880,28 +4884,153 @@ "ServerSideEncryptionConfiguration": "Contains details about the configuration of the server-side encryption.", "VectorIngestionConfiguration": "Contains details about how to ingest the documents in the data source." }, + "AWS::Bedrock::DataSource BedrockFoundationModelConfiguration": { + "ModelArn": "The model's ARN.", + "ParsingPrompt": "Instructions for interpreting the contents of a document." + }, "AWS::Bedrock::DataSource ChunkingConfiguration": { "ChunkingStrategy": "Knowledge base can split your source data into chunks. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.\n\n- `FIXED_SIZE` \u2013 Amazon Bedrock splits your source data into chunks of the approximate size that you set in the `fixedSizeChunkingConfiguration` .\n- `HIERARCHICAL` \u2013 Split documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.\n- `SEMANTIC` \u2013 Split documents into chunks based on groups of similar content derived with natural language processing.\n- `NONE` \u2013 Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.", - "FixedSizeChunkingConfiguration": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field." + "FixedSizeChunkingConfiguration": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field.", + "HierarchicalChunkingConfiguration": "Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.", + "SemanticChunkingConfiguration": "Settings for semantic document chunking for a data source. Semantic chunking splits a document into into smaller documents based on groups of similar content derived from the text with natural language processing." + }, + "AWS::Bedrock::DataSource ConfluenceCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the Confluence content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource ConfluenceDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the Confluence content. For example, configuring specific types of Confluence content.", + "SourceConfiguration": "The endpoint information to connect to your Confluence data source." + }, + "AWS::Bedrock::DataSource ConfluenceSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your Confluence instance.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your Confluence instance URL. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [Confluence connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/confluence-data-source-connector.html#configuration-confluence-connector) .", + "HostType": "The supported host type, whether online/cloud or server/on-premises.", + "HostUrl": "The Confluence host URL or instance URL." + }, + "AWS::Bedrock::DataSource CrawlFilterConfiguration": { + "PatternObjectFilter": "The configuration of filtering certain objects or content types of the data source.", + "Type": "The type of filtering that you want to apply to certain objects or content of the data source. For example, the `PATTERN` type is regular expression patterns you can apply to filter your content." + }, + "AWS::Bedrock::DataSource CustomTransformationConfiguration": { + "IntermediateStorage": "An S3 bucket path for input and output objects.", + "Transformations": "A Lambda function that processes documents." }, "AWS::Bedrock::DataSource DataSourceConfiguration": { + "ConfluenceConfiguration": "The configuration information to connect to Confluence as your data source.\n\n> Confluence data source connector is in preview release and is subject to change.", "S3Configuration": "The configuration information to connect to Amazon S3 as your data source.", - "Type": "The type of data source." + "SalesforceConfiguration": "The configuration information to connect to Salesforce as your data source.\n\n> Salesforce data source connector is in preview release and is subject to change.", + "SharePointConfiguration": "The configuration information to connect to SharePoint as your data source.\n\n> SharePoint data source connector is in preview release and is subject to change.", + "Type": "The type of data source.", + "WebConfiguration": "The configuration of web URLs to crawl for your data source. You should be authorized to crawl the URLs.\n\n> Crawling web URLs as your data source is in preview release and is subject to change." }, "AWS::Bedrock::DataSource FixedSizeChunkingConfiguration": { "MaxTokens": "The maximum number of tokens to include in a chunk.", "OverlapPercentage": "The percentage of overlap between adjacent chunks of a data source." }, + "AWS::Bedrock::DataSource HierarchicalChunkingConfiguration": { + "LevelConfigurations": "Token settings for each layer.", + "OverlapTokens": "The number of tokens to repeat across chunks in the same layer." + }, + "AWS::Bedrock::DataSource HierarchicalChunkingLevelConfiguration": { + "MaxTokens": "The maximum number of tokens that a chunk can contain in this layer." + }, + "AWS::Bedrock::DataSource IntermediateStorage": { + "S3Location": "An S3 bucket path." + }, + "AWS::Bedrock::DataSource ParsingConfiguration": { + "BedrockFoundationModelConfiguration": "Settings for a foundation model used to parse documents for a data source.", + "ParsingStrategy": "The parsing strategy for the data source." + }, + "AWS::Bedrock::DataSource ParsingPrompt": { + "ParsingPromptText": "Instructions for interpreting the contents of a document." + }, + "AWS::Bedrock::DataSource PatternObjectFilter": { + "ExclusionFilters": "A list of one or more exclusion regular expression patterns to exclude certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn\u2019t crawled.", + "InclusionFilters": "A list of one or more inclusion regular expression patterns to include certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn\u2019t crawled.", + "ObjectType": "The supported object type or content type of the data source." + }, + "AWS::Bedrock::DataSource PatternObjectFilterConfiguration": { + "Filters": "The configuration of specific filters applied to your data source content. You can filter out or include certain content." + }, "AWS::Bedrock::DataSource S3DataSourceConfiguration": { "BucketArn": "The Amazon Resource Name (ARN) of the S3 bucket that contains your data.", "BucketOwnerAccountId": "The account ID for the owner of the S3 bucket.", "InclusionPrefixes": "A list of S3 prefixes to include certain files or content. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) ." }, + "AWS::Bedrock::DataSource S3Location": { + "URI": "The location's URI. For example, `s3://my-bucket/chunk-processor/` ." + }, + "AWS::Bedrock::DataSource SalesforceCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the Salesforce content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource SalesforceDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the Salesforce content. For example, configuring specific types of Salesforce content.", + "SourceConfiguration": "The endpoint information to connect to your Salesforce data source." + }, + "AWS::Bedrock::DataSource SalesforceSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your Salesforce instance.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your Salesforce instance URL. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [Salesforce connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/salesforce-data-source-connector.html#configuration-salesforce-connector) .", + "HostUrl": "The Salesforce host URL or instance URL." + }, + "AWS::Bedrock::DataSource SeedUrl": { + "Url": "A seed or starting point URL." + }, + "AWS::Bedrock::DataSource SemanticChunkingConfiguration": { + "BreakpointPercentileThreshold": "The dissimilarity threshold for splitting chunks.", + "BufferSize": "The buffer size.", + "MaxTokens": "The maximum number of tokens that a chunk can contain." + }, "AWS::Bedrock::DataSource ServerSideEncryptionConfiguration": { "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key used to encrypt the resource." }, + "AWS::Bedrock::DataSource SharePointCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource SharePointDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the SharePoint content. For example, configuring specific types of SharePoint content.", + "SourceConfiguration": "The endpoint information to connect to your SharePoint data source." + }, + "AWS::Bedrock::DataSource SharePointSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your SharePoint site/sites.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [SharePoint connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/sharepoint-data-source-connector.html#configuration-sharepoint-connector) .", + "Domain": "The domain of your SharePoint instance or site URL/URLs.", + "HostType": "The supported host type, whether online/cloud or server/on-premises.", + "SiteUrls": "A list of one or more SharePoint site URLs.", + "TenantId": "The identifier of your Microsoft 365 tenant." + }, + "AWS::Bedrock::DataSource Transformation": { + "StepToApply": "When the service applies the transformation.", + "TransformationFunction": "A Lambda function that processes documents." + }, + "AWS::Bedrock::DataSource TransformationFunction": { + "TransformationLambdaConfiguration": "The Lambda function." + }, + "AWS::Bedrock::DataSource TransformationLambdaConfiguration": { + "LambdaArn": "The function's ARN identifier." + }, + "AWS::Bedrock::DataSource UrlConfiguration": { + "SeedUrls": "One or more seed or starting point URLs." + }, "AWS::Bedrock::DataSource VectorIngestionConfiguration": { - "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried." + "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.", + "CustomTransformationConfiguration": "A custom document transformer for parsed data source documents.", + "ParsingConfiguration": "A custom parser for data source documents." + }, + "AWS::Bedrock::DataSource WebCrawlerConfiguration": { + "CrawlerLimits": "The configuration of crawl limits for the web URLs.", + "ExclusionFilters": "A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn\u2019t crawled.", + "InclusionFilters": "A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn\u2019t crawled.", + "Scope": "The scope of what is crawled for your URLs.\n\nYou can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain \"aws.amazon.com\" can also include sub domain \"docs.aws.amazon.com\"." + }, + "AWS::Bedrock::DataSource WebCrawlerLimits": { + "RateLimit": "The max rate at which pages are crawled, up to 300 per minute per host." + }, + "AWS::Bedrock::DataSource WebDataSourceConfiguration": { + "CrawlerConfiguration": "The Web Crawler configuration details for the web data source.", + "SourceConfiguration": "The source configuration details for the web data source." + }, + "AWS::Bedrock::DataSource WebSourceConfiguration": { + "UrlConfiguration": "The configuration of the URL/URLs." }, "AWS::Bedrock::Flow": { "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.", @@ -5158,8 +5287,8 @@ "FiltersConfig": "Contains the type of the content filter and how strongly it should apply to prompts and model responses." }, "AWS::Bedrock::Guardrail ContextualGroundingFilterConfig": { - "Threshold": "", - "Type": "" + "Threshold": "The threshold details for the guardrails contextual grounding filter.", + "Type": "The filter details for the guardrails contextual grounding filter." }, "AWS::Bedrock::Guardrail ContextualGroundingPolicyConfig": { "FiltersConfig": "" @@ -7128,7 +7257,7 @@ "Name": "The name of either the enterprise or organization that will send webhook events to CodeBuild , depending on if the webhook is a global or organization webhook respectively." }, "AWS::CodeBuild::Project Source": { - "Auth": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "Auth": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "BuildSpec": "The build specification for the project. If this value is not provided, then the source code must contain a buildspec file named `buildspec.yml` at the root level. If this value is provided, it can be either a single string containing the entire build specification, or the path to an alternate buildspec file relative to the value of the built-in environment variable `CODEBUILD_SRC_DIR` . The alternate buildspec file can have a name other than `buildspec.yml` , for example `myspec.yml` or `build_spec_qa.yml` or similar. For more information, see the [Build Spec Reference](https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-example) in the *AWS CodeBuild User Guide* .", "BuildStatusConfig": "Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is `GITHUB` , `GITHUB_ENTERPRISE` , or `BITBUCKET` .", "GitCloneDepth": "The depth of history to download. Minimum value is 0. If this value is 0, greater than 25, or not provided, then the full history is downloaded with each build project. If your source type is Amazon S3, this value is not supported.", @@ -7140,8 +7269,8 @@ "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." }, "AWS::CodeBuild::Project SourceAuth": { - "Resource": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", - "Type": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only." + "Resource": "The resource value that applies to the specified authorization type.", + "Type": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER." }, "AWS::CodeBuild::Project Tag": { "Key": "The tag's key.", @@ -7181,9 +7310,9 @@ "Value": "The tag's value." }, "AWS::CodeBuild::SourceCredential": { - "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "ServerType": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", - "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "Username": "The Bitbucket username when the `authType` is BASIC_AUTH. This parameter is not valid for other types of source providers or connections." }, "AWS::CodeCommit::Repository": { @@ -7684,6 +7813,7 @@ "CognitoStreams": "Configuration options for configuring Amazon Cognito streams.", "DeveloperProviderName": "The \"domain\" Amazon Cognito uses when referencing your users. This name acts as a placeholder that allows your backend and the Amazon Cognito service to communicate about the developer provider. For the `DeveloperProviderName` , you can use letters and periods (.), underscores (_), and dashes (-).\n\n*Minimum length* : 1\n\n*Maximum length* : 100", "IdentityPoolName": "The name of your Amazon Cognito identity pool.\n\n*Minimum length* : 1\n\n*Maximum length* : 128\n\n*Pattern* : `[\\w\\s+=,.@-]+`", + "IdentityPoolTags": "Tags to assign to the identity pool. A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.", "OpenIdConnectProviderARNs": "The Amazon Resource Names (ARNs) of the OpenID connect providers.", "PushSync": "The configuration options to be applied to the identity pool.", "SamlProviderARNs": "The Amazon Resource Names (ARNs) of the Security Assertion Markup Language (SAML) providers.", @@ -7703,6 +7833,10 @@ "ApplicationArns": "The ARNs of the Amazon SNS platform applications that could be used by clients.", "RoleArn": "An IAM role configured to allow Amazon Cognito to call Amazon SNS on behalf of the developer." }, + "AWS::Cognito::IdentityPool Tag": { + "Key": "", + "Value": "" + }, "AWS::Cognito::IdentityPoolPrincipalTag": { "IdentityPoolId": "The identity pool that you want to associate with this principal tag map.", "IdentityProviderName": "The identity pool identity provider (IdP) that you want to associate with this principal tag map.", @@ -11416,8 +11550,8 @@ "Tenancy": "Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:\n\n- `default` - The Capacity Reservation is created on hardware that is shared with other AWS accounts .\n- `dedicated` - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account ." }, "AWS::EC2::CapacityReservation Tag": { - "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", - "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::EC2::CapacityReservation TagSpecification": { "ResourceType": "The type of resource to tag. Specify `capacity-reservation` .", @@ -11769,6 +11903,7 @@ }, "AWS::EC2::IPAM": { "Description": "The description for the IPAM.", + "EnablePrivateGua": "Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.", "OperatingRegions": "The operating Regions for an IPAM. Operating Regions are AWS Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers and monitors resources in the AWS Regions you select as operating Regions.\n\nFor more information about operating Regions, see [Create an IPAM](https://docs.aws.amazon.com//vpc/latest/ipam/create-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Tags": "The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key `Owner` and the value `TeamA` , specify `tag:Owner` for the filter name and `TeamA` for the filter value.", "Tier": "IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see the [VPC IPAM product pricing page](https://docs.aws.amazon.com//vpc/pricing/) ." @@ -12016,7 +12151,7 @@ "AWS::EC2::LaunchTemplate": { "LaunchTemplateData": "The information for the launch template.", "LaunchTemplateName": "A name for the launch template.", - "TagSpecifications": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "TagSpecifications": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "VersionDescription": "A description for the first version of the launch template." }, "AWS::EC2::LaunchTemplate AcceleratorCount": { @@ -13015,8 +13150,8 @@ "TrafficMirrorFilterId": "The ID of the filter that this rule is associated with." }, "AWS::EC2::TrafficMirrorFilterRule Tag": { - "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", - "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::EC2::TrafficMirrorFilterRule TrafficMirrorPortRange": { "FromPort": "The start of the Traffic Mirror port range. This applies to the TCP and UDP protocols.", @@ -13251,7 +13386,7 @@ }, "AWS::EC2::VPNConnection": { "CustomerGatewayId": "The ID of the customer gateway at your end of the VPN connection.", - "EnableAcceleration": "", + "EnableAcceleration": "Indicate whether to enable acceleration for the VPN connection.\n\nDefault: `false`", "StaticRoutesOnly": "Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.\n\nIf you are creating a VPN connection for a device that does not support Border Gateway Protocol (BGP), you must specify `true` .", "Tags": "Any tags assigned to the VPN connection.", "TransitGatewayId": "The ID of the transit gateway associated with the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", @@ -13690,7 +13825,7 @@ }, "AWS::ECS::Service ServiceConnectConfiguration": { "Enabled": "Specifies whether to use Service Connect with this service.", - "LogConfiguration": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "LogConfiguration": "The log configuration for the container. This parameter maps to `LogConfig` in the docker conainer create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "Namespace": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace for use with Service Connect. The namespace must be in the same AWS Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", "Services": "The list of Service Connect service objects. These are names and aliases (also known as endpoints) that are used by other Amazon ECS services to connect to this service.\n\nThis field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to connect to other services within the namespace. An example of this would be a frontend application that accepts incoming requests from either a load balancer that's attached to the service or by other means.\n\nAn object selects a port from the task definition, assigns a name for the AWS Cloud Map service, and a list of aliases (endpoints) and ports for client applications to refer to this service." }, @@ -13747,16 +13882,16 @@ "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", "Family": "The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.\n\nA family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.\n\n> To use revision numbers when you update a task definition, specify this property. If you don't specify a value, AWS CloudFormation generates a new task definition each time that you update it.", "InferenceAccelerators": "The Elastic Inference accelerators to use for the containers in the task.", - "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "Memory": "The amount (in MiB) of memory used by the task.\n\nIf your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified, the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see [ContainerDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) .\n\nIf your tasks runs on AWS Fargate , this field is required. You must use one of the following values. The value you choose determines your range of valid values for the `cpu` parameter.\n\n- 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available `cpu` values: 256 (.25 vCPU)\n- 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available `cpu` values: 512 (.5 vCPU)\n- 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available `cpu` values: 1024 (1 vCPU)\n- Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available `cpu` values: 2048 (2 vCPU)\n- Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available `cpu` values: 4096 (4 vCPU)\n- Between 16 GB and 60 GB in 4 GB increments - Available `cpu` values: 8192 (8 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.\n- Between 32GB and 120 GB in 8 GB increments - Available `cpu` values: 16384 (16 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.", - "NetworkMode": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", - "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "NetworkMode": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", + "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "PlacementConstraints": "An array of placement constraint objects to use for tasks.\n\n> This parameter isn't supported for tasks run on AWS Fargate .", "ProxyConfiguration": "The configuration details for the App Mesh proxy.\n\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the `ecs-init` package to use a proxy configuration. If your container instances are launched from the Amazon ECS optimized AMI version `20190301` or later, they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .", "RequiresCompatibilities": "The task launch types the task definition was validated against. The valid values are `EC2` , `FARGATE` , and `EXTERNAL` . For more information, see [Amazon ECS launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", "RuntimePlatform": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", "Tags": "The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", - "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "Volumes": "The list of data volume definitions for the task. For more information, see [Using data volumes in tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> The `host` and `sourcePath` parameters aren't supported for tasks run on AWS Fargate ." }, "AWS::ECS::TaskDefinition AuthorizationConfig": { @@ -13764,46 +13899,46 @@ "IAM": "Determines whether to use the Amazon ECS task role defined in a task definition when mounting the Amazon EFS file system. If it is turned on, transit encryption must be turned on in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html#efs-volume-accesspoints) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::TaskDefinition ContainerDefinition": { - "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", - "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the docker conainer create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", + "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker conainer create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "CredentialSpecs": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", "DependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.\n\nFor tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nIf the task definition is used in a blue/green deployment that uses [AWS::CodeDeploy::DeploymentGroup BlueGreenDeploymentConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-bluegreendeploymentconfiguration.html) , the `dependsOn` parameter is not supported. For more information see [Issue #680](https://docs.aws.amazon.com/https://github.com/aws-cloudformation/cloudformation-coverage-roadmap/issues/680) on the on the GitHub website.", - "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", - "DnsSearchDomains": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", - "DnsServers": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", - "DockerLabels": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", - "DockerSecurityOptions": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", - "EntryPoint": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", - "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", - "EnvironmentFiles": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker conainer create command.\n\n> This parameter is not supported for Windows containers.", + "DnsSearchDomains": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker conainer create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", + "DnsServers": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the the docker conainer create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", + "DockerLabels": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker conainer create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "DockerSecurityOptions": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker conainer create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "EntryPoint": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in tthe docker conainer create command and the `--entrypoint` option to docker run.", + "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the docker conainer create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "EnvironmentFiles": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "Essential": "If the `essential` parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as `false` , its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", - "ExtraHosts": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "ExtraHosts": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker conainer create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "FirelensConfiguration": "The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see [Custom Log Routing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) in the *Amazon Elastic Container Service Developer Guide* .", - "HealthCheck": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "Hostname": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", - "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", - "Interactive": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "HealthCheck": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker conainer create command and the `HEALTHCHECK` parameter of docker run.", + "Hostname": "The hostname to use for your container. This parameter maps to `Hostname` in thethe docker conainer create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker conainer create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "Interactive": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker conainer create command and the `--interactive` option to docker run.", + "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker conainer create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .\n\n> This parameter is not supported for Windows containers.", "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "Memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task `memory` value, if one is specified. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf using the Fargate launch type, this parameter is optional.\n\nIf using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level `memory` and `memoryReservation` value, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container, so you should not specify fewer than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.", - "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", - "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", - "Name": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the the docker conainer create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the the docker conainer create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "Name": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in tthe docker conainer create command and the `--name` option to docker run.", "PortMappings": "The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.\n\nFor task definitions that use the `awsvpc` network mode, you should only specify the `containerPort` . The `hostPort` can be left blank or it must be the same value as the `containerPort` .\n\nPort mappings on Windows use the `NetNAT` gateway address rather than `localhost` . There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.\n\nThis parameter maps to `PortBindings` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--publish` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . If the network mode of a task definition is set to `none` , then you can't specify port mappings. If the network mode of a task definition is set to `host` , then host ports must either be undefined or they must match the container port in the port mapping.\n\n> After a task reaches the `RUNNING` status, manual and automatic host and container port assignments are visible in the *Network Bindings* section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the `networkBindings` section [DescribeTasks](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html) responses.", - "Privileged": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", - "PseudoTerminal": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "Privileged": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the the docker conainer create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "PseudoTerminal": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in tthe docker conainer create command and the `--tty` option to docker run.", + "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker conainer create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "RepositoryCredentials": "The private repository authentication credentials to use.", "ResourceRequirements": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", "Secrets": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* .", "StartTimeout": "Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a `COMPLETE` , `SUCCESS` , or `HEALTHY` status. If a `startTimeout` value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a `STOPPED` state.\n\n> When the `ECS_CONTAINER_START_TIMEOUT` container agent configuration variable is used, it's enforced independently from this start timeout value. \n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks using the EC2 launch type, your container instances require at least version `1.26.0` of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version `1.26.0-1` of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values for Fargate are 2-120 seconds.", "StopTimeout": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nThe max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", - "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in tthe docker conainer create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "Ulimits": "A list of `ulimits` to set in the container. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Valid naming values are displayed in the [Ulimit](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html) data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", - "User": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", - "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "WorkingDirectory": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) ." + "User": "The user to use inside the container. This parameter maps to `User` in the docker conainer create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in tthe docker conainer create command and the `--volumes-from` option to docker run.", + "WorkingDirectory": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker conainer create command and the `--workdir` option to docker run." }, "AWS::ECS::TaskDefinition ContainerDependency": { "Condition": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a `zero` status. This condition can't be set on an essential container.\n- `HEALTHY` - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.", @@ -13816,9 +13951,9 @@ }, "AWS::ECS::TaskDefinition DockerVolumeConfiguration": { "Autoprovision": "If this value is `true` , the Docker volume is created if it doesn't already exist.\n\n> This field is only used if the `scope` is `shared` .", - "Driver": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", - "DriverOpts": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", - "Labels": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "Driver": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker conainer create command and the `xxdriver` option to docker volume create.", + "DriverOpts": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", + "Labels": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker conainer create command and the `xxlabel` option to docker volume create.", "Scope": "The scope for the Docker volume that determines its lifecycle. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops." }, "AWS::ECS::TaskDefinition EFSVolumeConfiguration": { @@ -13836,8 +13971,8 @@ "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." }, "AWS::ECS::TaskDefinition FSxAuthorizationConfig": { - "CredentialsParameter": "", - "Domain": "" + "CredentialsParameter": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", + "Domain": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2." }, "AWS::ECS::TaskDefinition FSxWindowsFileServerVolumeConfiguration": { "AuthorizationConfig": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", @@ -13849,7 +13984,7 @@ "Type": "The log router to use. The valid values are `fluentd` or `fluentbit` ." }, "AWS::ECS::TaskDefinition HealthCheck": { - "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in tthe docker conainer create command", "Interval": "The time period in seconds between each health check execution. You may specify between 5 and 300 seconds. The default value is 30 seconds.", "Retries": "The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.", "StartPeriod": "The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the `startPeriod` is off.\n\n> If a health check succeeds within the `startPeriod` , then the container is considered healthy and any subsequent failures count toward the maximum number of retries.", @@ -13867,8 +14002,8 @@ "DeviceType": "The Elastic Inference accelerator type to use." }, "AWS::ECS::TaskDefinition KernelCapabilities": { - "Add": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", - "Drop": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`" + "Add": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker conainer create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "Drop": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker conainer create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`" }, "AWS::ECS::TaskDefinition KeyValuePair": { "Name": "The name of the key-value pair. For environment variables, this is the name of the environment variable.", @@ -13876,12 +14011,12 @@ }, "AWS::ECS::TaskDefinition LinuxParameters": { "Capabilities": "The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.\n\n> For tasks that use the Fargate launch type, `capabilities` is supported for all platform versions but the `add` parameter is only supported if using platform version 1.4.0 or later.", - "Devices": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", - "InitProcessEnabled": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", - "MaxSwap": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", - "SharedMemorySize": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", - "Swappiness": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", - "Tmpfs": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported." + "Devices": "Any host devices to expose to the container. This parameter maps to `Devices` in tthe docker conainer create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "InitProcessEnabled": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "MaxSwap": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "SharedMemorySize": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "Swappiness": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "Tmpfs": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported." }, "AWS::ECS::TaskDefinition LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", @@ -16763,8 +16898,8 @@ "OperatingSystem": "The platform required for all containers in the container group definition.\n\n> Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the [Amazon Linux 2 FAQs](https://docs.aws.amazon.com/https://aws.amazon.com/amazon-linux-2/faqs/) . For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See [Migrate to Amazon GameLift server SDK version 5.](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk5-migration.html)", "SchedulingStrategy": "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group maintains only one copy per fleet instance.", "Tags": "", - "TotalCpuLimit": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", - "TotalMemoryLimit": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) ." + "TotalCpuLimit": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", + "TotalMemoryLimit": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group." }, "AWS::GameLift::ContainerGroupDefinition ContainerDefinition": { "Command": "A command that's passed to the container on startup. Each argument for the command is an additional string in the array. See the [ContainerDefinition::command](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-command) parameter in the *Amazon Elastic Container Service API reference.*", @@ -16814,18 +16949,18 @@ }, "AWS::GameLift::Fleet": { "AnywhereConfiguration": "Amazon GameLift Anywhere configuration options.", - "ApplyCapacity": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "ApplyCapacity": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "BuildId": "A unique identifier for a build to be deployed on the new fleet. If you are deploying the fleet with a custom game build, you must specify this property. The build must have been successfully uploaded to Amazon GameLift and be in a `READY` status. This fleet setting cannot be changed once the fleet is created.", "CertificateConfiguration": "Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the `CertificateConfiguration` is `DISABLED` . You can't change this property after you create the fleet.\n\nAWS Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition.\n\n> ACM isn't available in all AWS regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see [Supported Regions](https://docs.aws.amazon.com/acm/latest/userguide/acm-regions.html) in the *AWS Certificate Manager User Guide* .", "ComputeType": "The type of compute resource used to host your game servers.\n\n- `EC2` \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting.\n- `CONTAINER` \u2013 Container images with your game server build and supporting software are deployed to Amazon EC2 instances for cloud hosting. With this compute type, you must specify the `ContainerGroupsConfiguration` parameter.\n- `ANYWHERE` \u2013 Game servers or container images with your game server and supporting software are deployed to compute resources that are provided and managed by you. With this compute type, you can also set the `AnywhereConfiguration` parameter.", - "ContainerGroupsConfiguration": "*This data type is used with the Amazon GameLift containers feature, which is currently in public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", + "ContainerGroupsConfiguration": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", "Description": "A description for the fleet.", "DesiredEC2Instances": "The number of EC2 instances that you want this fleet to host. When creating a new fleet, GameLift automatically sets this value to \"1\" and initiates a single instance. Once the fleet is active, update this value to trigger GameLift to add or remove instances from the fleet.", "EC2InboundPermissions": "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for EC2 and container fleets. You can leave this parameter empty when creating the fleet, but you must call `UpdateFleetPortSettings` to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.\n\nTo manage inbound access for a container fleet, set this parameter to the same port numbers that you set for the fleet's connection port range. During the life of the fleet, update this parameter to control which connection ports are open to inbound traffic.", "EC2InstanceType": "The Amazon GameLift-supported Amazon EC2 instance type to use with EC2 and container fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See [Amazon Elastic Compute Cloud Instance Types](https://docs.aws.amazon.com/ec2/instance-types/) for detailed descriptions of Amazon EC2 instance types.", "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", - "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", - "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", + "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", + "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", @@ -16871,7 +17006,7 @@ }, "AWS::GameLift::Fleet LocationConfiguration": { "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", - "LocationCapacity": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" + "LocationCapacity": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" }, "AWS::GameLift::Fleet ResourceCreationLimitPolicy": { "NewGameSessionsPerCreator": "A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.\n\nThe policy is evaluated when a player tries to create a new game session. On receiving a `CreateGameSession` request, Amazon GameLift checks that the player (identified by `CreatorId` ) has created fewer than game session limit in the specified time period.", @@ -16879,7 +17014,7 @@ }, "AWS::GameLift::Fleet RuntimeConfiguration": { "GameSessionActivationTimeoutSeconds": "The maximum amount of time (in seconds) allowed to launch a new game session and have it report ready to host players. During this time, the game session is in status `ACTIVATING` . If the game session does not become active before the timeout, it is ended and the game session status is changed to `TERMINATED` .", - "MaxConcurrentGameSessionActivations": "The number of game sessions in status `ACTIVATING` to allow on an instance or container. This setting limits the instance resources that can be used for new game activations at any one time.", + "MaxConcurrentGameSessionActivations": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", "ServerProcesses": "A collection of server process configurations that identify what server processes to run on fleet computes." }, "AWS::GameLift::Fleet ScalingPolicy": { @@ -16897,7 +17032,7 @@ "UpdateStatus": "The current status of the fleet's scaling policies in a requested fleet location. The status `PENDING_UPDATE` indicates that an update was requested for the fleet but has not yet been completed for the location." }, "AWS::GameLift::Fleet ServerProcess": { - "ConcurrentExecutions": "The number of server processes using this configuration that run concurrently on each instance or container..", + "ConcurrentExecutions": "The number of server processes using this configuration that run concurrently on each instance.", "LaunchPath": "The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:\n\n- Windows (custom game builds only): `C:\\game` . Example: \" `C:\\game\\MyGame\\server.exe` \"\n- Linux: `/local/game` . Examples: \" `/local/game/MyGame/server.exe` \" or \" `/local/game/MyRealtimeScript.js` \"\n\n> Amazon GameLift doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations `initSDK()` and `ProcessReady()` .", "Parameters": "An optional list of parameters to pass to the server executable or Realtime script on launch.\n\nLength Constraints: Minimum length of 1. Maximum length of 1024.\n\nPattern: [A-Za-z0-9_:.+\\/\\\\\\- =@{},?'\\[\\]\"]+" }, @@ -17123,7 +17258,7 @@ }, "AWS::Glue::Connection ConnectionInput": { "ConnectionProperties": "These key-value pairs define parameters for the connection.", - "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "Description": "The description of the connection.", "MatchCriteria": "A list of criteria that can be used in selecting this connection.", "Name": "The name of the connection.", @@ -22978,6 +23113,7 @@ "FilterCriteria": "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) .", "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "FunctionResponseTypes": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "KmsKeyArn": "", "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", "MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1,\nwhich sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", @@ -23045,6 +23181,7 @@ "LoggingConfig": "The function's Amazon CloudWatch Logs configuration settings.", "MemorySize": "The amount of [memory available to the function](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-memory-console) at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. Note that new AWS accounts have reduced concurrency and memory quotas. AWS raises these quotas automatically based on your usage. You can also request a quota increase.", "PackageType": "The type of deployment package. Set to `Image` for container image and set `Zip` for .zip file archive.", + "RecursiveLoop": "", "ReservedConcurrentExecutions": "The number of simultaneous executions to reserve for the function.", "Role": "The Amazon Resource Name (ARN) of the function's execution role.", "Runtime": "The identifier of the function's [runtime](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) . Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.\n\nThe following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see [Runtime use after deprecation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-deprecation-levels) .\n\nFor a list of all currently supported runtimes, see [Supported runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtimes-supported) .", @@ -38766,8 +38903,8 @@ }, "AWS::Redshift::Cluster LoggingProperties": { "BucketName": "The name of an existing S3 bucket where the log files are to be stored.\n\nConstraints:\n\n- Must be in the same region as the cluster\n- The cluster must have read bucket and put object permissions", - "LogDestinationType": "", - "LogExports": "", + "LogDestinationType": "The log destination type. An enum with possible values of `s3` and `cloudwatch` .", + "LogExports": "The collection of exported log types. Possible values are `connectionlog` , `useractivitylog` , and `userlog` .", "S3KeyPrefix": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger" }, "AWS::Redshift::Cluster Tag": { @@ -44257,6 +44394,7 @@ "ApplicationId": "The ID of the application.", "ApplicationType": "The type of the application.", "Credentials": "The credentials of the SAP application.", + "DatabaseArn": "The Amazon Resource Name (ARN) of the database.", "Instances": "The Amazon EC2 instances on which your SAP application is running.", "SapInstanceNumber": "The SAP instance number of the application.", "Sid": "The System ID of the application.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index a49e19255..1a7ec3a70 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -8135,9 +8135,13 @@ "additionalProperties": false, "properties": { "Destination": { + "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Destination", "type": "string" }, "Source": { + "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Source", "type": "string" } }, @@ -41892,7 +41896,7 @@ "properties": { "Auth": { "$ref": "#/definitions/AWS::CodeBuild::Project.SourceAuth", - "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "title": "Auth" }, "BuildSpec": { @@ -41950,12 +41954,12 @@ "additionalProperties": false, "properties": { "Resource": { - "markdownDescription": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The resource value that applies to the specified authorization type.", "title": "Resource", "type": "string" }, "Type": { - "markdownDescription": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "Type", "type": "string" } @@ -42202,7 +42206,7 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "AuthType", "type": "string" }, @@ -42212,7 +42216,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "title": "Token", "type": "string" }, @@ -72593,7 +72597,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateTagSpecification" }, - "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -83999,7 +84003,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::Service.LogConfiguration", - "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the docker conainer create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "title": "LogConfiguration" }, "Namespace": { @@ -84293,7 +84297,7 @@ "type": "array" }, "IpcMode": { - "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "IpcMode", "type": "string" }, @@ -84303,12 +84307,12 @@ "type": "string" }, "NetworkMode": { - "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", + "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", "title": "NetworkMode", "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84347,7 +84351,7 @@ "type": "array" }, "TaskRoleArn": { - "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "title": "TaskRoleArn", "type": "string" }, @@ -84405,12 +84409,12 @@ "items": { "type": "string" }, - "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the docker conainer create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", "title": "Command", "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker conainer create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -84431,7 +84435,7 @@ "type": "array" }, "DisableNetworking": { - "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker conainer create command.\n\n> This parameter is not supported for Windows containers.", "title": "DisableNetworking", "type": "boolean" }, @@ -84439,7 +84443,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker conainer create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsSearchDomains", "type": "array" }, @@ -84447,13 +84451,13 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the the docker conainer create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsServers", "type": "array" }, "DockerLabels": { "additionalProperties": true, - "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker conainer create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84466,7 +84470,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker conainer create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", "title": "DockerSecurityOptions", "type": "array" }, @@ -84474,7 +84478,7 @@ "items": { "type": "string" }, - "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", + "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in tthe docker conainer create command and the `--entrypoint` option to docker run.", "title": "EntryPoint", "type": "array" }, @@ -84482,7 +84486,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.KeyValuePair" }, - "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the docker conainer create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", "title": "Environment", "type": "array" }, @@ -84490,7 +84494,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.EnvironmentFile" }, - "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "EnvironmentFiles", "type": "array" }, @@ -84503,7 +84507,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostEntry" }, - "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker conainer create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "title": "ExtraHosts", "type": "array" }, @@ -84514,21 +84518,21 @@ }, "HealthCheck": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HealthCheck", - "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker conainer create command and the `HEALTHCHECK` parameter of docker run.", "title": "HealthCheck" }, "Hostname": { - "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in thethe docker conainer create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", "title": "Hostname", "type": "string" }, "Image": { - "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker conainer create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", "title": "Image", "type": "string" }, "Interactive": { - "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker conainer create command and the `--interactive` option to docker run.", "title": "Interactive", "type": "boolean" }, @@ -84536,7 +84540,7 @@ "items": { "type": "string" }, - "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker conainer create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "title": "Links", "type": "array" }, @@ -84556,7 +84560,7 @@ "type": "number" }, "MemoryReservation": { - "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the the docker conainer create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "title": "MemoryReservation", "type": "number" }, @@ -84564,12 +84568,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.MountPoint" }, - "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the the docker conainer create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "title": "MountPoints", "type": "array" }, "Name": { - "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in tthe docker conainer create command and the `--name` option to docker run.", "title": "Name", "type": "string" }, @@ -84582,17 +84586,17 @@ "type": "array" }, "Privileged": { - "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the the docker conainer create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "Privileged", "type": "boolean" }, "PseudoTerminal": { - "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in tthe docker conainer create command and the `--tty` option to docker run.", "title": "PseudoTerminal", "type": "boolean" }, "ReadonlyRootFilesystem": { - "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker conainer create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "ReadonlyRootFilesystem", "type": "boolean" }, @@ -84631,7 +84635,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in tthe docker conainer create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -84644,7 +84648,7 @@ "type": "array" }, "User": { - "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the docker conainer create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", "title": "User", "type": "string" }, @@ -84652,12 +84656,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.VolumeFrom" }, - "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in tthe docker conainer create command and the `--volumes-from` option to docker run.", "title": "VolumesFrom", "type": "array" }, "WorkingDirectory": { - "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker conainer create command and the `--workdir` option to docker run.", "title": "WorkingDirectory", "type": "string" } @@ -84717,13 +84721,13 @@ "type": "boolean" }, "Driver": { - "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker conainer create command and the `xxdriver` option to docker volume create.", "title": "Driver", "type": "string" }, "DriverOpts": { "additionalProperties": true, - "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84734,7 +84738,7 @@ }, "Labels": { "additionalProperties": true, - "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker conainer create command and the `xxlabel` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84816,12 +84820,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "", + "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "", + "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", "title": "Domain", "type": "string" } @@ -84886,7 +84890,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in tthe docker conainer create command", "title": "Command", "type": "array" }, @@ -84963,7 +84967,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker conainer create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Add", "type": "array" }, @@ -84971,7 +84975,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker conainer create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Drop", "type": "array" } @@ -85006,27 +85010,27 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Device" }, - "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in tthe docker conainer create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", "title": "Devices", "type": "array" }, "InitProcessEnabled": { - "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "title": "InitProcessEnabled", "type": "boolean" }, "MaxSwap": { - "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "MaxSwap", "type": "number" }, "SharedMemorySize": { - "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", "title": "SharedMemorySize", "type": "number" }, "Swappiness": { - "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "Swappiness", "type": "number" }, @@ -85034,7 +85038,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Tmpfs" }, - "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", + "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", "title": "Tmpfs", "type": "array" } @@ -102967,12 +102971,12 @@ "type": "array" }, "TotalCpuLimit": { - "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", + "markdownDescription": "The amount of CPU units on a fleet instance to allocate for the container group. All containers in the group share these resources. This property is an integer value in CPU units (1 vCPU is equal to 1024 CPU units).\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must be equal to or greater than the sum of all container-specific CPU limits in the group.", "title": "TotalCpuLimit", "type": "number" }, "TotalMemoryLimit": { - "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.\n\nFor more details on memory allocation, see the [Container fleet design guide](https://docs.aws.amazon.com/gamelift/latest/developerguide/containers-design-fleet) .", + "markdownDescription": "The amount of memory (in MiB) on a fleet instance to allocate for the container group. All containers in the group share these resources.\n\nYou can set additional limits for each `ContainerDefinition` in the group. If individual containers have limits, this value must meet the following requirements:\n\n- Equal to or greater than the sum of all container-specific soft memory limits in the group.\n- Equal to or greater than any container-specific hard limits in the group.", "title": "TotalMemoryLimit", "type": "number" } @@ -103271,7 +103275,7 @@ "title": "AnywhereConfiguration" }, "ApplyCapacity": { - "markdownDescription": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "ApplyCapacity", "type": "string" }, @@ -103292,7 +103296,7 @@ }, "ContainerGroupsConfiguration": { "$ref": "#/definitions/AWS::GameLift::Fleet.ContainerGroupsConfiguration", - "markdownDescription": "*This data type is used with the Amazon GameLift containers feature, which is currently in public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", + "markdownDescription": "*This data type is currently not available. It is under improvement as we respond to customer feedback from the Containers public preview.*\n\nConfiguration details for a set of container groups, for use when creating a fleet with compute type `CONTAINER` .\n\n*Used with:* `CreateFleet`", "title": "ContainerGroupsConfiguration" }, "Description": { @@ -103324,12 +103328,12 @@ "type": "string" }, "InstanceRoleARN": { - "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", + "markdownDescription": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", "title": "InstanceRoleARN", "type": "string" }, "InstanceRoleCredentialsProvider": { - "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", + "markdownDescription": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\".", "title": "InstanceRoleCredentialsProvider", "type": "string" }, @@ -103589,7 +103593,7 @@ }, "LocationCapacity": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationCapacity", - "markdownDescription": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", + "markdownDescription": "Current resource capacity settings for managed EC2 fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)", "title": "LocationCapacity" } }, @@ -103623,7 +103627,7 @@ "type": "number" }, "MaxConcurrentGameSessionActivations": { - "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance or container. This setting limits the instance resources that can be used for new game activations at any one time.", + "markdownDescription": "The number of game sessions in status `ACTIVATING` to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.", "title": "MaxConcurrentGameSessionActivations", "type": "number" }, @@ -103712,7 +103716,7 @@ "additionalProperties": false, "properties": { "ConcurrentExecutions": { - "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance or container..", + "markdownDescription": "The number of server processes using this configuration that run concurrently on each instance.", "title": "ConcurrentExecutions", "type": "number" }, @@ -105352,7 +105356,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, From 15dc964a9e47e084b809985bbe898f7441e8a67b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:18:39 -0700 Subject: [PATCH 6/7] chore(schema): update (#3643) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 68 +++++++-------- schema_source/cloudformation-docs.json | 104 +++++++++++++---------- schema_source/cloudformation.schema.json | 68 +++++++-------- 3 files changed, 124 insertions(+), 116 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 011e6f63e..88d28b5fc 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -571,7 +571,7 @@ }, "RevocationConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.RevocationConfiguration", - "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\nThe following requirements and constraints apply to revocation configurations.\n\n- A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n- In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n- A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n- In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".\n- To revoke a certificate, delete the resource from your template, and call the AWS Private CA [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) API and specify the resource's certificate authority ARN.", "title": "RevocationConfiguration" }, "SigningAlgorithm": { @@ -8149,13 +8149,9 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", - "title": "Destination", "type": "string" }, "Source": { - "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", - "title": "Source", "type": "string" } }, @@ -22755,7 +22751,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -61733,32 +61729,32 @@ "additionalProperties": false, "properties": { "AccessPointArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", "title": "AccessPointArn", "type": "string" }, "Ec2Config": { "$ref": "#/definitions/AWS::DataSync::LocationEFS.Ec2Config", - "markdownDescription": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", + "markdownDescription": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", "title": "Ec2Config" }, "EfsFilesystemArn": { - "markdownDescription": "Specifies the ARN for the Amazon EFS file system.", + "markdownDescription": "Specifies the ARN for your Amazon EFS file system.", "title": "EfsFilesystemArn", "type": "string" }, "FileSystemAccessRoleArn": { - "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", + "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", "title": "FileSystemAccessRoleArn", "type": "string" }, "InTransitEncryption": { - "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", "title": "InTransitEncryption", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -63501,7 +63497,7 @@ "additionalProperties": false, "properties": { "ScheduleExpression": { - "markdownDescription": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "markdownDescription": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "title": "ScheduleExpression", "type": "string" }, @@ -76354,12 +76350,12 @@ "items": { "$ref": "#/definitions/AWS::EC2::PrefixList.Entry" }, - "markdownDescription": "One or more entries for the prefix list.", + "markdownDescription": "The entries for the prefix list.", "title": "Entries", "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -83103,7 +83099,7 @@ "type": "number" }, "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "title": "MaximumScalingStepSize", "type": "number" }, @@ -83735,7 +83731,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -83743,7 +83739,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -83834,12 +83830,12 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", "title": "MaximumPercent", "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -84586,7 +84582,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { @@ -84855,12 +84851,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", + "markdownDescription": "", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", + "markdownDescription": "", "title": "Domain", "type": "string" } @@ -85544,7 +85540,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -85552,7 +85548,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -117723,7 +117719,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -117964,7 +117960,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -142683,7 +142679,7 @@ "items": { "type": "string" }, - "markdownDescription": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "markdownDescription": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "title": "FunctionResponseTypes", "type": "array" }, @@ -142982,7 +142978,7 @@ }, "Code": { "$ref": "#/definitions/AWS::Lambda::Function.Code", - "markdownDescription": "The code for the function.", + "markdownDescription": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "title": "Code" }, "CodeSigningConfigArn": { @@ -240121,7 +240117,7 @@ "type": "string" }, "KmsKeyArn": { - "markdownDescription": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "markdownDescription": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "title": "KmsKeyArn", "type": "string" }, @@ -240601,12 +240597,12 @@ "additionalProperties": false, "properties": { "ArchivePolicy": { - "markdownDescription": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", + "markdownDescription": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", "title": "ArchivePolicy", "type": "object" }, "ContentBasedDeduplication": { - "markdownDescription": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "markdownDescription": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "title": "ContentBasedDeduplication", "type": "boolean" }, @@ -241533,7 +241529,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", + "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", "title": "Values", "type": "array" } @@ -242494,7 +242490,7 @@ "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveUntilDate", "type": "string" }, @@ -248158,7 +248154,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "markdownDescription": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -254961,7 +254957,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster", + "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster", "title": "TargetType", "type": "string" } diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index fa7088785..a07c5cca2 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -98,7 +98,7 @@ "CsrExtensions": "Specifies information to be added to the extension section of the certificate signing request (CSR).", "KeyAlgorithm": "Type of the public key algorithm and size, in bits, of the key pair that your CA creates when it issues a certificate. When you create a subordinate CA, you must use a key algorithm supported by the parent CA.", "KeyStorageSecurityStandard": "Specifies a cryptographic key management compliance standard used for handling CA keys.\n\nDefault: FIPS_140_2_LEVEL_3_OR_HIGHER\n\n> Some AWS Regions do not support the default. When creating a CA in these Regions, you must provide `FIPS_140_2_LEVEL_2_OR_HIGHER` as the argument for `KeyStorageSecurityStandard` . Failure to do this results in an `InvalidArgsException` with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"\n> \n> For information about security standard support in various Regions, see [Storage and security compliance of AWS Private CA private keys](https://docs.aws.amazon.com/privateca/latest/userguide/data-protection.html#private-keys) .", - "RevocationConfiguration": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "RevocationConfiguration": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\nThe following requirements and constraints apply to revocation configurations.\n\n- A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n- In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n- A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n- In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".\n- To revoke a certificate, delete the resource from your template, and call the AWS Private CA [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) API and specify the resource's certificate authority ARN.", "SigningAlgorithm": "Name of the algorithm your private CA uses to sign certificate requests.\n\nThis parameter should not be confused with the `SigningAlgorithm` parameter used to sign certificates when they are issued.", "Subject": "Structure that contains X.500 distinguished name information for your private CA.", "Tags": "Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA. For information using tags with IAM to manage permissions, see [Controlling Access Using IAM Tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html) .", @@ -1194,10 +1194,6 @@ "TimeoutInMillis": "Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs.", "TlsConfig": "The TLS configuration for a private integration. If you specify a TLS configuration, private integration traffic uses the HTTPS protocol. Supported only for HTTP APIs." }, - "AWS::ApiGatewayV2::Integration ResponseParameter": { - "Destination": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", - "Source": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) ." - }, "AWS::ApiGatewayV2::Integration TlsConfig": { "ServerNameToVerify": "If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting." }, @@ -3782,7 +3778,7 @@ "DesiredCapacity": "The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.\n\nThe number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity when creating the stack, the default is the minimum size of the group.\n\nCloudFormation marks the Auto Scaling group as successful (by setting its status to CREATE_COMPLETE) when the desired capacity is reached. However, if a maximum Spot price is set in the launch template or launch configuration that you specified, then desired capacity is not used as a criteria for success. Whether your request is fulfilled depends on Spot Instance capacity and your maximum price.", "DesiredCapacityType": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Create a mixed instances group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", "HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. This is useful if your instances do not immediately pass their health checks after they enter the `InService` state. For more information, see [Set the health check grace period for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `0` seconds", - "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "InstanceId": "The ID of the instance used to base the launch configuration on. For more information, see [Create an Auto Scaling group using an EC2 instance](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `LaunchTemplate` , `MixedInstancesPolicy` , or `LaunchConfigurationName` , don't specify `InstanceId` .", "InstanceMaintenancePolicy": "An instance maintenance policy. For more information, see [Set instance maintenance policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) in the *Amazon EC2 Auto Scaling User Guide* .", "LaunchConfigurationName": "The name of the launch configuration to use to launch instances.\n\nRequired only if you don't specify `LaunchTemplate` , `MixedInstancesPolicy` , or `InstanceId` .", @@ -7126,6 +7122,7 @@ "EnvironmentType": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "FleetServiceRole": "The service role associated with the compute fleet. For more information, see [Allow a user to add a permission policy for a fleet service role](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-permission-policy-fleet-service-role.html) in the *AWS CodeBuild User Guide* .", "FleetVpcConfig": "Information about the VPC configuration that AWS CodeBuild accesses.", + "ImageId": "The Amazon Machine Image (AMI) of the compute fleet.", "Name": "The name of the compute fleet.", "OverflowBehavior": "The compute fleet overflow behavior.\n\n- For overflow behavior `QUEUE` , your overflow builds need to wait on the existing fleet instance to become available.\n- For overflow behavior `ON_DEMAND` , your overflow builds run on CodeBuild on-demand.\n\n> If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see [Example policy statement to allow CodeBuild access to AWS services required to create a VPC network interface](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-create-vpc-network-interface) .", "Tags": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags." @@ -10422,12 +10419,12 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationEFS": { - "AccessPointArn": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", - "Ec2Config": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", - "EfsFilesystemArn": "Specifies the ARN for the Amazon EFS file system.", - "FileSystemAccessRoleArn": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", - "InTransitEncryption": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", - "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "AccessPointArn": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", + "Ec2Config": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", + "EfsFilesystemArn": "Specifies the ARN for your Amazon EFS file system.", + "FileSystemAccessRoleArn": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", + "InTransitEncryption": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location." }, "AWS::DataSync::LocationEFS Ec2Config": { @@ -10703,7 +10700,7 @@ "Subdirectory": "Specifies a bucket prefix for your report." }, "AWS::DataSync::Task TaskSchedule": { - "ScheduleExpression": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "ScheduleExpression": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "Status": "Specifies whether to enable or disable your task schedule. Your schedule is enabled by default, but there can be situations where you need to disable it. For example, you might need to perform maintenance on a storage system before you can begin a recurring DataSync transfer.\n\nDataSync might disable your schedule automatically if your task fails repeatedly with the same error. For more information, see the [*DataSync User Guide*](https://docs.aws.amazon.com/datasync/latest/userguide/task-scheduling.html#pause-task-schedule) ." }, "AWS::DataSync::Task Transferred": { @@ -12108,8 +12105,8 @@ "DocumentName": "The name of an SSM document to associate with the instance." }, "AWS::EC2::Instance State": { - "Code": "", - "Name": "" + "Code": "The state of the instance as a 16-bit unsigned integer.\n\nThe high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.\n\nThe low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.\n\nThe valid values for instance-state-code will all be in the range of the low byte and they are:\n\n- `0` : `pending`\n- `16` : `running`\n- `32` : `shutting-down`\n- `48` : `terminated`\n- `64` : `stopping`\n- `80` : `stopped`\n\nYou can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.", + "Name": "The current state of the instance." }, "AWS::EC2::Instance Tag": { "Key": "The tag key.", @@ -12777,8 +12774,8 @@ }, "AWS::EC2::PrefixList": { "AddressFamily": "The IP address type.\n\nValid Values: `IPv4` | `IPv6`", - "Entries": "One or more entries for the prefix list.", - "MaxEntries": "The maximum number of entries for the prefix list.", + "Entries": "The entries for the prefix list.", + "MaxEntries": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "PrefixListName": "A name for the prefix list.\n\nConstraints: Up to 255 characters in length. The name cannot start with `com.amazonaws` .", "Tags": "The tags for the prefix list." }, @@ -13666,7 +13663,7 @@ }, "AWS::ECS::CapacityProvider ManagedScaling": { "InstanceWarmupPeriod": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", - "MaximumScalingStepSize": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "MaximumScalingStepSize": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "MinimumScalingStepSize": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", "Status": "Determines whether to use managed scaling for the capacity provider.", "TargetCapacity": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used." @@ -13762,8 +13759,8 @@ }, "AWS::ECS::Service AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", - "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", - "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC." + "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::Service CapacityProviderStrategyItem": { "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", @@ -13782,8 +13779,8 @@ "AWS::ECS::Service DeploymentConfiguration": { "Alarms": "Information about the CloudWatch alarms.", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", - "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", - "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." + "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." }, "AWS::ECS::Service DeploymentController": { "Type": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service." @@ -13920,7 +13917,7 @@ "Interactive": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker conainer create command and the `--interactive` option to docker run.", "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker conainer create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .\n\n> This parameter is not supported for Windows containers.", - "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "Memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task `memory` value, if one is specified. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf using the Fargate launch type, this parameter is optional.\n\nIf using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level `memory` and `memoryReservation` value, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container, so you should not specify fewer than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.", "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the the docker conainer create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the the docker conainer create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", @@ -13971,8 +13968,8 @@ "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." }, "AWS::ECS::TaskDefinition FSxAuthorizationConfig": { - "CredentialsParameter": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", - "Domain": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2." + "CredentialsParameter": "", + "Domain": "" }, "AWS::ECS::TaskDefinition FSxWindowsFileServerVolumeConfiguration": { "AuthorizationConfig": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", @@ -14105,8 +14102,8 @@ }, "AWS::ECS::TaskSet AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", - "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", - "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC." + "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::TaskSet LoadBalancer": { "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", @@ -17385,7 +17382,8 @@ }, "AWS::Glue::Database": { "CatalogId": "The AWS account ID for the account in which to create the catalog object.\n\n> To specify the account ID, you can use the `Ref` intrinsic function with the `AWS::AccountId` pseudo parameter. For example: `!Ref AWS::AccountId`", - "DatabaseInput": "The metadata for the database." + "DatabaseInput": "The metadata for the database.", + "DatabaseName": "The name of the catalog database." }, "AWS::Glue::Database DataLakePrincipal": { "DataLakePrincipalIdentifier": "An identifier for the AWS Lake Formation principal." @@ -18778,6 +18776,15 @@ "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::IVS::PublicKey": { + "Name": "Public key name. The value does not need to be unique.", + "PublicKeyMaterial": "The public portion of a customer-generated key pair.", + "Tags": "An array of key-value pairs to apply to this resource." + }, + "AWS::IVS::PublicKey Tag": { + "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." + }, "AWS::IVS::RecordingConfiguration": { "DestinationConfiguration": "A destination configuration describes an S3 bucket where recorded video will be stored. See the DestinationConfiguration property type for more information.", "Name": "Recording-configuration name. The value does not need to be unique.", @@ -18807,9 +18814,14 @@ "TargetIntervalSeconds": "The targeted thumbnail-generation interval in seconds. This is configurable (and required) only if [RecordingMode](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-recordingconfiguration-thumbnailconfiguration.html#cfn-ivs-recordingconfiguration-thumbnailconfiguration-recordingmode) is `INTERVAL` .\n\n> Setting a value for `TargetIntervalSeconds` does not guarantee that thumbnails are generated at the specified interval. For thumbnails to be generated at the `TargetIntervalSeconds` interval, the `IDR/Keyframe` value for the input video must be less than the `TargetIntervalSeconds` value. See [Amazon IVS Streaming Configuration](https://docs.aws.amazon.com/ivs/latest/LowLatencyUserGuide/streaming-config.html) for information on setting `IDR/Keyframe` to the recommended value in video-encoder settings. \n\n*Default* : 60" }, "AWS::IVS::Stage": { + "AutoParticipantRecordingConfiguration": "", "Name": "Stage name.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) ." }, + "AWS::IVS::Stage AutoParticipantRecordingConfiguration": { + "MediaTypes": "Types of media to be recorded. Default: `AUDIO_VIDEO` .", + "StorageConfigurationArn": "ARN of the StorageConfiguration resource to use for individual participant recording. Default: \"\" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated." + }, "AWS::IVS::Stage Tag": { "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." @@ -19017,7 +19029,7 @@ "Workflows": "Contains an array of workflow configuration objects." }, "AWS::ImageBuilder::Image EcrConfiguration": { - "ContainerTags": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "ContainerTags": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "RepositoryName": "The name of the container repository that Amazon Inspector scans to identify findings for your container images. The name includes the path for the repository location. If you don\u2019t provide this information, Image Builder creates a repository in your account named `image-builder-image-scanning-repository` for vulnerability scans of your output container images." }, "AWS::ImageBuilder::Image ImageScanningConfiguration": { @@ -19055,7 +19067,7 @@ "Workflows": "Contains the workflows that run for the image pipeline." }, "AWS::ImageBuilder::ImagePipeline EcrConfiguration": { - "ContainerTags": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "ContainerTags": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "RepositoryName": "The name of the container repository that Amazon Inspector scans to identify findings for your container images. The name includes the path for the repository location. If you don\u2019t provide this information, Image Builder creates a repository in your account named `image-builder-image-scanning-repository` for vulnerability scans of your output container images." }, "AWS::ImageBuilder::ImagePipeline ImageScanningConfiguration": { @@ -23112,8 +23124,8 @@ "EventSourceArn": "The Amazon Resource Name (ARN) of the event source.\n\n- *Amazon Kinesis* \u2013 The ARN of the data stream or a stream consumer.\n- *Amazon DynamoDB Streams* \u2013 The ARN of the stream.\n- *Amazon Simple Queue Service* \u2013 The ARN of the queue.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) ).\n- *Amazon MQ* \u2013 The ARN of the broker.\n- *Amazon DocumentDB* \u2013 The ARN of the DocumentDB change stream.", "FilterCriteria": "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) .", "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", - "FunctionResponseTypes": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", - "KmsKeyArn": "", + "FunctionResponseTypes": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that Lambda uses to encrypt your function's [filter criteria](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-basics) .", "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", "MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1,\nwhich sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", @@ -23166,7 +23178,7 @@ }, "AWS::Lambda::Function": { "Architectures": "The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is `x86_64` .", - "Code": "The code for the function.", + "Code": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "CodeSigningConfigArn": "To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration\nincludes a set of signing profiles, which define the trusted publishers for this function.", "DeadLetterConfig": "A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see [Dead-letter queues](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-dlq) .", "Description": "A description of the function.", @@ -23181,7 +23193,7 @@ "LoggingConfig": "The function's Amazon CloudWatch Logs configuration settings.", "MemorySize": "The amount of [memory available to the function](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-memory-console) at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. Note that new AWS accounts have reduced concurrency and memory quotas. AWS raises these quotas automatically based on your usage. You can also request a quota increase.", "PackageType": "The type of deployment package. Set to `Image` for container image and set `Zip` for .zip file archive.", - "RecursiveLoop": "", + "RecursiveLoop": "The status of your function's recursive loop detection configuration.\n\nWhen this value is set to `Allow` and Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.\n\nWhen this value is set to `Terminate` and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.", "ReservedConcurrentExecutions": "The number of simultaneous executions to reserve for the function.", "Role": "The Amazon Resource Name (ARN) of the function's execution role.", "Runtime": "The identifier of the function's [runtime](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) . Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.\n\nThe following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see [Runtime use after deprecation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-deprecation-levels) .\n\nFor a list of all currently supported runtimes, see [Supported runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtimes-supported) .", @@ -23236,8 +23248,8 @@ "OptimizationStatus": "When you provide a [qualified Amazon Resource Name (ARN)](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using) , this response element indicates whether SnapStart is activated for the specified function version." }, "AWS::Lambda::Function Tag": { - "Key": "", - "Value": "" + "Key": "The key for this tag.", + "Value": "The value for this tag." }, "AWS::Lambda::Function TracingConfig": { "Mode": "The tracing mode." @@ -24730,6 +24742,9 @@ "AWS::MSK::Replicator ReplicationStartingPosition": { "Type": "" }, + "AWS::MSK::Replicator ReplicationTopicNameConfiguration": { + "Type": "" + }, "AWS::MSK::Replicator Tag": { "Key": "", "Value": "" @@ -24739,6 +24754,7 @@ "CopyTopicConfigurations": "", "DetectAndCopyNewTopics": "", "StartingPosition": "", + "TopicNameConfiguration": "", "TopicsToExclude": "", "TopicsToReplicate": "" }, @@ -29667,7 +29683,7 @@ "DisplayName": "The name of the Amazon Q Business application.", "EncryptionConfiguration": "Provides the identifier of the AWS KMS key used to encrypt data indexed by Amazon Q Business. Amazon Q Business doesn't support asymmetric keys.", "IdentityCenterInstanceArn": "The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for\u2014or connecting to\u2014your Amazon Q Business application.\n\n*Required* : `Yes`", - "QAppsConfiguration": "Configuration information about Amazon Q Apps. (preview feature)", + "QAppsConfiguration": "Configuration information about Amazon Q Apps.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics.", "Tags": "A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @." }, @@ -40903,7 +40919,7 @@ }, "AWS::SES::ReceiptRule S3Action": { "BucketName": "The name of the Amazon S3 bucket for incoming email.", - "KmsKeyArn": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "KmsKeyArn": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "ObjectKeyPrefix": "The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.", "TopicArn": "The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. You can find the ARN of a topic by using the [ListTopics](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html) operation in Amazon SNS.\n\nFor more information about Amazon SNS topics, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) ." }, @@ -40955,8 +40971,8 @@ "TopicArn": "The ARN of the topic to subscribe to." }, "AWS::SNS::Topic": { - "ArchivePolicy": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", - "ContentBasedDeduplication": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "ArchivePolicy": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", + "ContentBasedDeduplication": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "DataProtectionPolicy": "The body of the policy document you want to use for this topic.\n\nYou can only add one policy per topic.\n\nThe policy must be in JSON string format.\n\nLength Constraints: Maximum length of 30,720.", "DeliveryStatusLogging": "The `DeliveryStatusLogging` configuration enables you to log the delivery status of messages sent from your Amazon SNS topic to subscribed endpoints with the following supported delivery protocols:\n\n- HTTP\n- Amazon Kinesis Data Firehose\n- AWS Lambda\n- Platform application endpoint\n- Amazon Simple Queue Service\n\nOnce configured, log entries are sent to Amazon CloudWatch Logs.", "DisplayName": "The display name to use for an Amazon SNS topic with SMS subscriptions. The display name must be maximum 100 characters long, including hyphens (-), underscores (_), spaces, and tabs.", @@ -41066,7 +41082,7 @@ "AWS::SSM::Document AttachmentsSource": { "Key": "The key of a key-value pair that identifies the location of an attachment to a document.", "Name": "The name of the document attachment file.", - "Values": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`" + "Values": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`" }, "AWS::SSM::Document DocumentRequires": { "Name": "The name of the required SSM document. The name can be an Amazon Resource Name (ARN).", @@ -41211,7 +41227,7 @@ }, "AWS::SSM::PatchBaseline Rule": { "ApproveAfterDays": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` . \n\nNot supported for Debian Server or Ubuntu Server.", - "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", + "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "ComplianceLevel": "A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: `UNSPECIFIED` , `CRITICAL` , `HIGH` , `MEDIUM` , `LOW` , and `INFORMATIONAL` .", "EnableNonSecurity": "For managed nodes identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is `false` . Applies to Linux managed nodes only.", "PatchFilterGroup": "The patch filter group that defines the criteria for the rule." @@ -42179,7 +42195,7 @@ }, "AWS::SageMaker::Model ContainerDefinition": { "ContainerHostname": "This parameter is ignored for models that contain only a `PrimaryContainer` .\n\nWhen a `ContainerDefinition` is part of an inference pipeline, the value of the parameter uniquely identifies the container for the purposes of logging and metrics. For information, see [Use Logs and Metrics to Monitor an Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html) . If you don't specify a value for this parameter for a `ContainerDefinition` that is part of an inference pipeline, a unique name is automatically assigned based on the position of the `ContainerDefinition` in the pipeline. If you specify a value for the `ContainerHostName` for any `ContainerDefinition` that is part of an inference pipeline, you must specify a value for the `ContainerHostName` parameter of every `ContainerDefinition` in that pipeline.", - "Environment": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "Environment": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "Image": "The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both `registry/repository[:tag]` and `registry/repository[@digest]` image path formats. For more information, see [Using Your Own Algorithms with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "ImageConfig": "Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see [Use a Private Docker Registry for Real-Time Inference Containers](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-containers-inference-private.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "InferenceSpecificationName": "The inference specification name in the model package version.", @@ -43465,7 +43481,7 @@ "AWS::SecretsManager::SecretTargetAttachment": { "SecretId": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", "TargetId": "The ID of the database or cluster.", - "TargetType": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster" + "TargetType": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster" }, "AWS::SecurityHub::AutomationRule": { "Actions": "One or more actions to update finding fields if a finding matches the conditions specified in `Criteria` .", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 1a7ec3a70..b69e4a866 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -571,7 +571,7 @@ }, "RevocationConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.RevocationConfiguration", - "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\nThe following requirements and constraints apply to revocation configurations.\n\n- A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n- In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n- A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n- In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".\n- To revoke a certificate, delete the resource from your template, and call the AWS Private CA [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) API and specify the resource's certificate authority ARN.", "title": "RevocationConfiguration" }, "SigningAlgorithm": { @@ -8135,13 +8135,9 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", - "title": "Destination", "type": "string" }, "Source": { - "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", - "title": "Source", "type": "string" } }, @@ -22727,7 +22723,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -61705,32 +61701,32 @@ "additionalProperties": false, "properties": { "AccessPointArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", "title": "AccessPointArn", "type": "string" }, "Ec2Config": { "$ref": "#/definitions/AWS::DataSync::LocationEFS.Ec2Config", - "markdownDescription": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", + "markdownDescription": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", "title": "Ec2Config" }, "EfsFilesystemArn": { - "markdownDescription": "Specifies the ARN for the Amazon EFS file system.", + "markdownDescription": "Specifies the ARN for your Amazon EFS file system.", "title": "EfsFilesystemArn", "type": "string" }, "FileSystemAccessRoleArn": { - "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", + "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", "title": "FileSystemAccessRoleArn", "type": "string" }, "InTransitEncryption": { - "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", "title": "InTransitEncryption", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -63473,7 +63469,7 @@ "additionalProperties": false, "properties": { "ScheduleExpression": { - "markdownDescription": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "markdownDescription": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "title": "ScheduleExpression", "type": "string" }, @@ -76319,12 +76315,12 @@ "items": { "$ref": "#/definitions/AWS::EC2::PrefixList.Entry" }, - "markdownDescription": "One or more entries for the prefix list.", + "markdownDescription": "The entries for the prefix list.", "title": "Entries", "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -83068,7 +83064,7 @@ "type": "number" }, "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "title": "MaximumScalingStepSize", "type": "number" }, @@ -83700,7 +83696,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -83708,7 +83704,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -83799,12 +83795,12 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", "title": "MaximumPercent", "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -84551,7 +84547,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { @@ -84820,12 +84816,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", + "markdownDescription": "", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", + "markdownDescription": "", "title": "Domain", "type": "string" } @@ -85509,7 +85505,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -85517,7 +85513,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -117681,7 +117677,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -117922,7 +117918,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -142641,7 +142637,7 @@ "items": { "type": "string" }, - "markdownDescription": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "markdownDescription": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "title": "FunctionResponseTypes", "type": "array" }, @@ -142933,7 +142929,7 @@ }, "Code": { "$ref": "#/definitions/AWS::Lambda::Function.Code", - "markdownDescription": "The code for the function.", + "markdownDescription": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "title": "Code" }, "CodeSigningConfigArn": { @@ -240065,7 +240061,7 @@ "type": "string" }, "KmsKeyArn": { - "markdownDescription": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "markdownDescription": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "title": "KmsKeyArn", "type": "string" }, @@ -240538,12 +240534,12 @@ "additionalProperties": false, "properties": { "ArchivePolicy": { - "markdownDescription": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", + "markdownDescription": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", "title": "ArchivePolicy", "type": "object" }, "ContentBasedDeduplication": { - "markdownDescription": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "markdownDescription": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "title": "ContentBasedDeduplication", "type": "boolean" }, @@ -241463,7 +241459,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", + "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", "title": "Values", "type": "array" } @@ -242424,7 +242420,7 @@ "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\n> This parameter is marked as not required, but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` . \n\nNot supported for Debian Server or Ubuntu Server.", "title": "ApproveUntilDate", "type": "string" }, @@ -248088,7 +248084,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "markdownDescription": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -254891,7 +254887,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster", + "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster", "title": "TargetType", "type": "string" } From 26cdbeddee9218a093e78cea89b23318e108b14b Mon Sep 17 00:00:00 2001 From: aws-sam-cli-bot <46753707+aws-sam-cli-bot@users.noreply.github.com> Date: Thu, 12 Sep 2024 21:25:18 +0000 Subject: [PATCH 7/7] chore: bump version to 1.92.0 --- samtranslator/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samtranslator/__init__.py b/samtranslator/__init__.py index eb97acb8c..e70b96d07 100644 --- a/samtranslator/__init__.py +++ b/samtranslator/__init__.py @@ -1 +1 @@ -__version__ = "1.91.0" +__version__ = "1.92.0"