diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index c96762e32..648fb27b8 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -26184,7 +26184,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -33254,7 +33254,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33818,7 +33818,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33932,7 +33932,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -35488,7 +35488,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36322,12 +36322,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36362,11 +36362,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36835,7 +36835,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36867,7 +36867,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36925,7 +36925,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37048,22 +37048,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37071,7 +37071,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37087,12 +37087,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39305,7 +39305,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -39628,7 +39628,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -44531,7 +44531,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44614,7 +44614,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", "title": "SecretToken", "type": "string" } @@ -64650,7 +64650,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64732,7 +64732,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65379,7 +65379,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65728,7 +65728,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -69829,7 +69829,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71198,7 +71198,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -77684,7 +77684,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77907,7 +77907,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78232,7 +78232,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -82813,7 +82813,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82894,37 +82894,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82932,7 +82932,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82968,7 +82968,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -90375,12 +90375,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90418,7 +90418,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90431,7 +90431,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90472,12 +90472,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90653,7 +90653,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90668,7 +90668,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90779,7 +90779,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90922,22 +90922,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90947,7 +90947,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90965,7 +90965,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90990,7 +90990,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -91013,7 +91013,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -91021,7 +91021,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91036,7 +91036,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91090,7 +91090,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91110,7 +91110,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91123,12 +91123,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91247,7 +91247,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91471,7 +91471,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91532,7 +91532,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91553,7 +91553,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -95084,7 +95084,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95171,7 +95171,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95181,7 +95181,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95329,7 +95329,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95384,7 +95384,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95703,7 +95703,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95924,7 +95924,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -103381,7 +103381,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103627,7 +103627,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104220,7 +104220,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -114633,7 +114633,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -129708,7 +129708,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -130243,7 +130243,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -140506,7 +140506,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -153456,7 +153456,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -167377,7 +167377,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -182807,7 +182807,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224627,7 +224627,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224810,7 +224810,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224868,7 +224868,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224952,7 +224952,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225058,17 +225058,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225081,7 +225081,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225178,7 +225178,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225223,7 +225223,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225261,12 +225261,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225281,7 +225281,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225392,7 +225392,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225402,7 +225402,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225432,7 +225432,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225518,7 +225518,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225634,7 +225634,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225703,12 +225703,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225716,7 +225716,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225802,7 +225802,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225880,7 +225880,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225895,7 +225895,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225911,12 +225911,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225977,7 +225977,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -226030,12 +226030,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226079,7 +226079,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226214,7 +226214,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226227,7 +226227,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226413,7 +226413,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226429,7 +226429,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226518,12 +226518,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226730,7 +226730,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226816,7 +226816,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226834,7 +226834,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226874,7 +226874,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226905,7 +226905,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -235666,7 +235666,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -242381,7 +242381,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -261095,7 +261095,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index f49fa532d..735064c16 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -254,6 +254,9 @@ "Key": "The key of the tag. Must not begin with `aws:` .", "Value": "The value of the tag." }, + "AWS::ARCZonalShift::AutoshiftObserverNotificationStatus": { + "Status": "" + }, "AWS::ARCZonalShift::ZonalAutoshiftConfiguration": { "PracticeRunConfiguration": "A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, Route 53 ARC shifts traffic for the resource weekly for practice runs.\n\nPractice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.\n\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.", "ResourceIdentifier": "The identifier for the resource that AWS shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.\n\nAt this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.", @@ -3288,17 +3291,17 @@ "TargetLocation": "The target location of the input file." }, "AWS::AppTest::TestCase M2ManagedActionProperties": { - "ForceStop": "Force stops the AWS Mainframe Modernization managed action properties.", - "ImportDataSetLocation": "The import data set location of the AWS Mainframe Modernization managed action properties." + "ForceStop": "Force stops the Mainframe Modernization managed action properties.", + "ImportDataSetLocation": "The import data set location of the Mainframe Modernization managed action properties." }, "AWS::AppTest::TestCase M2ManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization managed application action.", - "Properties": "The properties of the AWS Mainframe Modernization managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization managed application action." + "ActionType": "The action type of the Mainframe Modernization managed application action.", + "Properties": "The properties of the Mainframe Modernization managed application action.", + "Resource": "The resource of the Mainframe Modernization managed application action." }, "AWS::AppTest::TestCase M2NonManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization non-managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization non-managed application action." + "ActionType": "The action type of the Mainframe Modernization non-managed application action.", + "Resource": "The resource of the Mainframe Modernization non-managed application action." }, "AWS::AppTest::TestCase MainframeAction": { "ActionType": "The action type of the mainframe action.", @@ -3320,8 +3323,8 @@ }, "AWS::AppTest::TestCase ResourceAction": { "CloudFormationAction": "The CloudFormation action of the resource action.", - "M2ManagedApplicationAction": "The AWS Mainframe Modernization managed application action of the resource action.", - "M2NonManagedApplicationAction": "The AWS Mainframe Modernization non-managed application action of the resource action." + "M2ManagedApplicationAction": "The Mainframe Modernization managed application action of the resource action.", + "M2NonManagedApplicationAction": "The Mainframe Modernization non-managed application action of the resource action." }, "AWS::AppTest::TestCase Script": { "ScriptLocation": "The script location of the scripts.", @@ -4327,7 +4330,7 @@ }, "AWS::Backup::BackupVault": { "AccessPolicy": "A resource-based policy that is used to manage access permissions on the target backup vault.", - "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "BackupVaultTags": "The tags to assign to the backup vault.", "EncryptionKeyArn": "A server-side encryption key you can specify to encrypt your backups from services that support full AWS Backup management; for example, `arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` . If you specify a key, you must specify its ARN, not its alias. If you do not specify a key, AWS Backup creates a KMS key for you by default.\n\nTo learn which AWS Backup services support full AWS Backup management and how AWS Backup handles encryption for backups from services that do not yet support full AWS Backup , see [Encryption for backups in AWS Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/encryption.html)", "LockConfiguration": "Configuration for [AWS Backup Vault Lock](https://docs.aws.amazon.com/aws-backup/latest/devguide/vault-lock.html) .", @@ -4663,6 +4666,7 @@ "AWS::Batch::JobDefinition NodeRangeProperty": { "Container": "The container details for the node range.", "EcsProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "EksProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, @@ -4899,10 +4903,244 @@ "AWS::Bedrock::DataSource VectorIngestionConfiguration": { "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried." }, + "AWS::Bedrock::Flow": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.", + "Definition": "The definition of the nodes and connections between the nodes in the flow.", + "DefinitionS3Location": "The Amazon S3 location of the flow definition.", + "DefinitionString": "The definition of the flow as a JSON-formatted string. The string must match the format in [FlowDefinition](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-flow-flowdefinition.html) .", + "DefinitionSubstitutions": "A map that specifies the mappings for placeholder variables in the prompt flow definition. This enables the customer to inject values obtained at runtime. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map. Only supported with the `DefinitionString` and `DefinitionS3Location` fields.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", + "Description": "A description of the flow.", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see [Create a service row for flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide.", + "Name": "The name of the flow.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "TestAliasTags": "" + }, + "AWS::Bedrock::Flow ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::Flow FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::Flow FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::Flow FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::Flow FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::Flow FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::Flow FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::Flow FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::Flow FlowNodeConfiguration": { + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node." + }, + "AWS::Bedrock::Flow FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::Flow LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::Flow LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::Flow PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::Flow PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::Flow PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Flow PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Flow PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Flow PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Flow S3Location": { + "Bucket": "The S3 bucket containing the flow definition.", + "Key": "The object key for the S3 location containing the definition.", + "Version": "The Amazon S3 location from which to retrieve data for an S3 retrieve node or to which to store data for an S3 storage node." + }, + "AWS::Bedrock::Flow TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." + }, + "AWS::Bedrock::FlowAlias": { + "Description": "A description of the alias.", + "FlowArn": "The Amazon Resource Name (ARN) of the alias.", + "Name": "The name of the alias.", + "RoutingConfiguration": "A list of configurations about the versions that the alias maps to. Currently, you can only specify one.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::FlowAlias FlowAliasRoutingConfigurationListItem": { + "FlowVersion": "The version that the alias maps to." + }, + "AWS::Bedrock::FlowVersion": { + "Description": "The description of the flow version.", + "FlowArn": "The Amazon Resource Name (ARN) of the flow that the version belongs to." + }, + "AWS::Bedrock::FlowVersion ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::FlowVersion FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::FlowVersion FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::FlowVersion FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::FlowVersion FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::FlowVersion FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::FlowVersion FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::FlowVersion FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::FlowVersion FlowNodeConfiguration": { + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node." + }, + "AWS::Bedrock::FlowVersion FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::FlowVersion LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::FlowVersion LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::FlowVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::FlowVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::FlowVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::FlowVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." + }, "AWS::Bedrock::Guardrail": { "BlockedInputMessaging": "The message to return when the guardrail blocks a prompt.", "BlockedOutputsMessaging": "The message to return when the guardrail blocks a model response.", "ContentPolicyConfig": "The content filter policies to configure for the guardrail.", + "ContextualGroundingPolicyConfig": "", "Description": "A description of the guardrail.", "KmsKeyArn": "The ARN of the AWS KMS key that you use to encrypt the guardrail.", "Name": "The name of the guardrail.", @@ -4919,6 +5157,13 @@ "AWS::Bedrock::Guardrail ContentPolicyConfig": { "FiltersConfig": "Contains the type of the content filter and how strongly it should apply to prompts and model responses." }, + "AWS::Bedrock::Guardrail ContextualGroundingFilterConfig": { + "Threshold": "", + "Type": "" + }, + "AWS::Bedrock::Guardrail ContextualGroundingPolicyConfig": { + "FiltersConfig": "" + }, "AWS::Bedrock::Guardrail ManagedWordsConfig": { "Type": "The managed word type to configure for the guardrail." }, @@ -4968,10 +5213,30 @@ "StorageConfiguration": "Contains details about the storage configuration of the knowledge base.", "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" }, + "AWS::Bedrock::KnowledgeBase BedrockEmbeddingModelConfiguration": { + "Dimensions": "The dimensions details for the vector configuration used on the Bedrock embeddings model." + }, + "AWS::Bedrock::KnowledgeBase EmbeddingModelConfiguration": { + "BedrockEmbeddingModelConfiguration": "The vector configuration details on the Bedrock embeddings model." + }, "AWS::Bedrock::KnowledgeBase KnowledgeBaseConfiguration": { "Type": "The type of data that the data source is converted into for the knowledge base.", "VectorKnowledgeBaseConfiguration": "Contains details about the embeddings model that'sused to convert the data source." }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasConfiguration": { + "CollectionName": "The collection name of the knowledge base in MongoDB Atlas.", + "CredentialsSecretArn": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that contains user credentials for your MongoDB Atlas cluster.", + "DatabaseName": "The database name in your MongoDB Atlas cluster for your knowledge base.", + "Endpoint": "The endpoint URL of your MongoDB Atlas cluster for your knowledge base.", + "EndpointServiceName": "The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "VectorIndexName": "The name of the MongoDB Atlas vector search index." + }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." + }, "AWS::Bedrock::KnowledgeBase OpenSearchServerlessConfiguration": { "CollectionArn": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", @@ -5006,13 +5271,87 @@ "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." }, "AWS::Bedrock::KnowledgeBase StorageConfiguration": { + "MongoDbAtlasConfiguration": "Contains the storage configuration of the knowledge base in MongoDB Atlas.", "OpensearchServerlessConfiguration": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", "PineconeConfiguration": "Contains the storage configuration of the knowledge base in Pinecone.", "RdsConfiguration": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", "Type": "The vector store service in which the knowledge base is stored." }, "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { - "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base." + "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base." + }, + "AWS::Bedrock::Prompt": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.", + "DefaultVariant": "The name of the default variant for the prompt. This value must match the `name` field in the relevant [PromptVariant](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptVariant.html) object.", + "Description": "The description of the prompt.", + "Name": "The name of the prompt.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "Variants": "A list of objects, each containing details about a variant of the prompt." + }, + "AWS::Bedrock::Prompt PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Prompt PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Prompt PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Prompt PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Prompt PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::Prompt TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt.", + "TextS3Location": "The Amazon S3 location of the prompt text." + }, + "AWS::Bedrock::Prompt TextS3Location": { + "Bucket": "The Amazon S3 bucket containing the prompt text.", + "Key": "The object key for the Amazon S3 location.", + "Version": "The version of the Amazon S3 location to use." + }, + "AWS::Bedrock::PromptVersion": { + "Description": "The description of the prompt version.", + "PromptArn": "The Amazon Resource Name (ARN) of the version of the prompt." + }, + "AWS::Bedrock::PromptVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::PromptVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::PromptVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::PromptVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::PromptVersion PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::PromptVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." }, "AWS::BillingConductor::BillingGroup": { "AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", @@ -5448,7 +5787,7 @@ "AWS::CleanRooms::ConfiguredTable": { "AllowedColumns": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", "AnalysisMethod": "The analysis method for the configured table. The only valid value is currently `DIRECT_QUERY`.", - "AnalysisRules": "The entire created analysis rule.", + "AnalysisRules": "The analysis rule that was created for the configured table.", "Description": "A description for the configured table.", "Name": "A name for the configured table.", "TableReference": "The AWS Glue table that this configured table represents.", @@ -5468,6 +5807,7 @@ "Type": "The type of analysis rule." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleAggregation": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.\n\nThe `additionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", "AggregateColumns": "The columns that query runners are allowed to use in aggregation queries.", "AllowedJoinOperators": "Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is `AND` .", "DimensionColumns": "The columns that query runners are allowed to select, group by, or filter by.", @@ -5477,11 +5817,14 @@ "ScalarFunctions": "Set of scalar functions that are allowed to be used on dimension columns and the output of aggregation of metrics." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleCustom": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedAnalyses": "The ARN of the analysis templates that are allowed by the custom analysis rule.", "AllowedAnalysisProviders": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", - "DifferentialPrivacy": "The differential privacy configuration." + "DifferentialPrivacy": "The differential privacy configuration.", + "DisallowedOutputColumns": "A list of columns that aren't allowed to be shown in the query output." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleList": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedJoinOperators": "The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is `AND` .", "JoinColumns": "Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables.", "ListColumns": "Columns that can be listed in the output." @@ -5512,6 +5855,7 @@ "Value": "The value of the tag." }, "AWS::CleanRooms::ConfiguredTableAssociation": { + "ConfiguredTableAssociationAnalysisRules": "An analysis rule for a configured table association. This analysis rule specifies how data from the table can be used within its associated collaboration. In the console, the `ConfiguredTableAssociationAnalysisRule` is referred to as the *collaboration analysis rule* .", "ConfiguredTableIdentifier": "A unique identifier for the configured table to be associated to. Currently accepts a configured table ID.", "Description": "A description of the configured table association.", "MembershipIdentifier": "The unique ID for the membership this configured table association belongs to.", @@ -5519,10 +5863,80 @@ "RoleArn": "The service will assume this role to access catalog metadata and query the table.", "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRule": { + "Policy": "The policy of the configured table association analysis rule.", + "Type": "The type of the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleAggregation": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.\n\nThe `allowedAdditionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleCustom": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleList": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicy": { + "V1": "The policy for the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicyV1": { + "Aggregation": "Analysis rule type that enables only aggregation queries on a configured table.", + "Custom": "Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy.", + "List": "Analysis rule type that enables only list queries on a configured table." + }, "AWS::CleanRooms::ConfiguredTableAssociation Tag": { "Key": "The key of the tag.", "Value": "The value of the tag." }, + "AWS::CleanRooms::IdMappingTable": { + "Description": "The description of the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID mapping table.", + "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key.", + "MembershipIdentifier": "The unique identifier of the membership resource for the ID mapping table.", + "Name": "The name of the ID mapping table.", + "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the referenced resource in AWS Entity Resolution . Valid values are ID mapping workflow ARNs.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID mapping table resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID mapping table resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceProperties": { + "IdMappingTableInputSource": "The input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputSource": { + "IdNamespaceAssociationId": "The unique identifier of the ID namespace association.", + "Type": "The type of the input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, + "AWS::CleanRooms::IdNamespaceAssociation": { + "Description": "The description of the ID namespace association.", + "IdMappingConfig": "The configuration settings for the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID namespace association.", + "MembershipIdentifier": "The unique identifier of the membership that contains the ID namespace association.", + "Name": "The name of this ID namespace association.", + "Tags": "" + }, + "AWS::CleanRooms::IdNamespaceAssociation IdMappingConfig": { + "AllowUseAsDimensionColumn": "An indicator as to whether you can use your column as a dimension column in the ID mapping table ( `TRUE` ) or not ( `FALSE` ).\n\nDefault is `FALSE` ." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the AWS Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID namespace association resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID namespace association resource." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceProperties": { + "IdMappingWorkflowsSupported": "Defines how ID mapping workflows are supported for this ID namespace association.", + "IdNamespaceType": "The ID namespace type for this ID namespace association." + }, + "AWS::CleanRooms::IdNamespaceAssociation Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, "AWS::CleanRooms::Membership": { "CollaborationIdentifier": "The unique ID for the associated collaboration.", "DefaultResultConfiguration": "The default protected query result configuration as specified by the member who can receive results.", @@ -5534,7 +5948,7 @@ "QueryCompute": "The payment responsibilities accepted by the collaboration member for query compute costs." }, "AWS::CleanRooms::Membership MembershipProtectedQueryOutputConfiguration": { - "S3": "Required configuration for a protected query with an `S3` output type." + "S3": "Required configuration for a protected query with an `s3` output type." }, "AWS::CleanRooms::Membership MembershipProtectedQueryResultConfiguration": { "OutputConfiguration": "Configuration for protected query results.", @@ -5555,7 +5969,7 @@ "AWS::CleanRooms::PrivacyBudgetTemplate": { "AutoRefresh": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", "MembershipIdentifier": "The identifier for a membership resource.", - "Parameters": "Specifies the epislon and noise parameters for the privacy budget template.", + "Parameters": "Specifies the epsilon and noise parameters for the privacy budget template.", "PrivacyBudgetType": "Specifies the type of the privacy budget template.", "Tags": "" }, @@ -5742,7 +6156,7 @@ "RetainStacksOnAccountRemoval": "If set to `true` , stack resources are retained when an account is removed from a target organization or OU. If set to `false` , stack resources are deleted. Specify only if `Enabled` is set to `True` ." }, "AWS::CloudFormation::StackSet DeploymentTargets": { - "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "Accounts": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", "AccountsUrl": "Returns the value of the `AccountsUrl` property.", "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" @@ -5850,16 +6264,16 @@ "Value": "The request header value." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleHeaderPolicyConfig": { - "Header": "", - "Value": "" + "Header": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", + "Value": "Specifies the value to assign to the header for a single header policy." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightConfig": { "SessionStickinessConfig": "Session stickiness provides the ability to define multiple requests from a single viewer as a single session. This prevents the potentially inconsistent experience of sending some of a given user's requests to your staging distribution, while others are sent to your primary distribution. Define the session duration using TTL values.", "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightPolicyConfig": { - "SessionStickinessConfig": "", - "Weight": "" + "SessionStickinessConfig": "Enable session stickiness for the associated origin or cache settings.", + "Weight": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings." }, "AWS::CloudFront::ContinuousDeploymentPolicy TrafficConfig": { "SingleHeaderConfig": "Determines which HTTP requests are sent to the staging distribution.", @@ -5933,12 +6347,12 @@ }, "AWS::CloudFront::Distribution DistributionConfig": { "Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", - "CNAMEs": "", + "CNAMEs": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "CacheBehaviors": "A complex type that contains zero or more `CacheBehavior` elements.", "Comment": "A comment to describe the distribution. The comment cannot be longer than 128 characters.", "ContinuousDeploymentPolicyId": "The identifier of a continuous deployment policy. For more information, see `CreateContinuousDeploymentPolicy` .", "CustomErrorResponses": "A complex type that controls the following:\n\n- Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n- How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n\nFor more information about custom error pages, see [Customizing Error Responses](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) in the *Amazon CloudFront Developer Guide* .", - "CustomOrigin": "", + "CustomOrigin": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "DefaultCacheBehavior": "A complex type that describes the default cache behavior if you don't specify a `CacheBehavior` element or if files don't match any of the values of `PathPattern` in `CacheBehavior` elements. You must create exactly one default cache behavior.", "DefaultRootObject": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "Enabled": "From this field, you can enable or disable the selected distribution.", @@ -5949,7 +6363,7 @@ "Origins": "A complex type that contains information about origins for this distribution.\n\nSpecify a value for either the `Origins` or `OriginGroups` property.", "PriceClass": "The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify `PriceClass_All` , CloudFront responds to requests for your objects from all CloudFront edge locations.\n\nIf you specify a price class other than `PriceClass_All` , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n\nFor more information about price classes, see [Choosing the Price Class for a CloudFront Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) in the *Amazon CloudFront Developer Guide* . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see [Amazon CloudFront Pricing](https://docs.aws.amazon.com/cloudfront/pricing/) .", "Restrictions": "A complex type that identifies ways in which you want to restrict distribution of your content.", - "S3Origin": "", + "S3Origin": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "Staging": "A Boolean that indicates whether this is a staging distribution. When this value is `true` , this is a staging distribution. When this value is `false` , this is not a staging distribution.", "ViewerCertificate": "A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.", "WebACLId": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) ." @@ -5974,15 +6388,15 @@ "LambdaFunctionARN": "The ARN of the Lambda@Edge function. You must specify the ARN of a function version; you can't specify an alias or $LATEST." }, "AWS::CloudFront::Distribution LegacyCustomOrigin": { - "DNSName": "", - "HTTPPort": "", - "HTTPSPort": "", - "OriginProtocolPolicy": "", - "OriginSSLProtocols": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "HTTPPort": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", + "HTTPSPort": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", + "OriginProtocolPolicy": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", + "OriginSSLProtocols": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution LegacyS3Origin": { - "DNSName": "", - "OriginAccessIdentity": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "OriginAccessIdentity": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead." }, "AWS::CloudFront::Distribution Logging": { "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", @@ -6302,7 +6716,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -6344,7 +6758,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -7153,7 +7567,7 @@ "Name": "The name of a pipeline-level variable." }, "AWS::CodePipeline::Webhook": { - "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "AuthenticationConfiguration": "Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the `SecretToken` property must be set. For IP, only the `AllowedIPRange` property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.", "Filters": "A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.", "Name": "The name of the webhook.", @@ -7164,7 +7578,7 @@ }, "AWS::CodePipeline::Webhook WebhookAuthConfiguration": { "AllowedIPRange": "The property used to configure acceptance of webhooks in an IP address range. For IP, only the `AllowedIPRange` property must be set. This property must be set to a valid CIDR range.", - "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities." + "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response." }, "AWS::CodePipeline::Webhook WebhookFilterRule": { "JsonPath": "A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the `MatchEquals` field. Otherwise, the request is ignored. For more information, see [Java JsonPath implementation](https://docs.aws.amazon.com/https://github.com/json-path/JsonPath) in GitHub.", @@ -10299,7 +10713,7 @@ }, "AWS::Deadline::Farm": { "Description": "A description of the farm that helps identify what the farm is used for.", - "DisplayName": "The display name of the farm.", + "DisplayName": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "KmsKeyArn": "The ARN for the KMS key.", "Tags": "The tags to add to your farm. Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings." }, @@ -10310,7 +10724,7 @@ "AWS::Deadline::Fleet": { "Configuration": "The configuration details for the fleet.", "Description": "A description that helps identify what the fleet is used for.", - "DisplayName": "The display name of the fleet summary to update.", + "DisplayName": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "MaxWorkerCount": "The maximum number of workers specified in the fleet.", "MinWorkerCount": "The minimum number of workers in the fleet.", @@ -10408,7 +10822,7 @@ "ProductId": "The product ID." }, "AWS::Deadline::Monitor": { - "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.", + "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "IdentityCenterInstanceArn": "The Amazon Resource Name (ARN) of the IAM Identity Center instance responsible for authenticating monitor users.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role for the monitor. Users of the monitor use this role to access Deadline Cloud resources.", "Subdomain": "The subdomain used for the monitor URL. The full URL of the monitor is subdomain.Region.deadlinecloud.amazonaws.com." @@ -10417,7 +10831,7 @@ "AllowedStorageProfileIds": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", "DefaultBudgetAction": "The default action taken on a queue summary if a budget wasn't configured.", "Description": "A description of the queue that helps identify what the queue is used for.", - "DisplayName": "The display name of the queue summary to update.", + "DisplayName": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "JobAttachmentSettings": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", "JobRunAsUser": "Identifies the user for a job.", @@ -10459,7 +10873,7 @@ "QueueId": "The queue ID." }, "AWS::Deadline::StorageProfile": { - "DisplayName": "The display name of the storage profile summary to update.", + "DisplayName": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The unique identifier of the farm that contains the storage profile.", "FileSystemLocations": "Operating system specific file system path to the storage location.", "OsFamily": "The operating system (OS) family." @@ -11148,7 +11562,7 @@ "Placement": "The location where the instance launched, if applicable.", "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf the On-Demand `AllocationStrategy` is set to `prioritized` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacity-optimized-prioritized` , EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SubnetId": "The IDs of the subnets in which to launch the instances. Separate multiple subnet IDs using commas (for example, `subnet-1234abcdeexample1, subnet-0987cdef6example2` ). A request of type `instant` can have only one subnet ID.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::EC2Fleet FleetLaunchTemplateSpecificationRequest": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -11342,7 +11756,7 @@ "AwsService": "Limits which service in AWS that the pool can be used in. \"ec2\", for example, allows users to use space for Elastic IP addresses and VPCs.", "Description": "The description of the IPAM pool.", "IpamScopeId": "The ID of the scope in which you would like to create the IPAM pool.", - "Locale": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "Locale": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "ProvisionedCidrs": "Information about the CIDRs provisioned to an IPAM pool.", "PublicIpSource": "The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is `BYOIP` . For more information, see [Create IPv6 pools](https://docs.aws.amazon.com//vpc/latest/ipam/intro-create-ipv6-pools.html) in the *Amazon VPC IPAM User Guide* . By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see [Quotas for your IPAM](https://docs.aws.amazon.com//vpc/latest/ipam/quotas-ipam.html) in the *Amazon VPC IPAM User Guide* .", "PubliclyAdvertisable": "Determines if a pool is publicly advertisable. This option is not available for pools with AddressFamily set to `ipv4` .", @@ -12387,7 +12801,7 @@ "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf `OnDemandAllocationStrategy` is set to `prioritized` , Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacityOptimizedPrioritized` , Spot Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SpotPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n\n> If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.", "SubnetId": "The ID of the subnet in which to launch the instances.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet LoadBalancersConfig": { "ClassicLoadBalancersConfig": "The Classic Load Balancers.", @@ -12435,7 +12849,7 @@ "SubnetId": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "TagSpecifications": "The tags to apply during creation.", "UserData": "The base64-encoded user data that instances use when starting up. User data is limited to 16 KB.", - "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet SpotFleetMonitoring": { "Enabled": "Enables monitoring for the instance.\n\nDefault: `false`" @@ -12500,7 +12914,7 @@ "AvailabilityZone": "The Availability Zone of the subnet.\n\nIf you update this property, you must also update the `CidrBlock` property.", "AvailabilityZoneId": "The AZ ID of the subnet.", "CidrBlock": "The IPv4 CIDR block assigned to the subnet.\n\nIf you update this property, we create a new subnet, and then delete the existing one.", - "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "EnableLniAtDeviceIndex": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", "Ipv4IpamPoolId": "An IPv4 IPAM pool ID for the subnet.", "Ipv4NetmaskLength": "An IPv4 netmask length for the subnet.", @@ -12798,6 +13212,7 @@ }, "AWS::EC2::VPNConnection": { "CustomerGatewayId": "The ID of the customer gateway at your end of the VPN connection.", + "EnableAcceleration": "", "StaticRoutesOnly": "Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.\n\nIf you are creating a VPN connection for a device that does not support Border Gateway Protocol (BGP), you must specify `true` .", "Tags": "Any tags assigned to the VPN connection.", "TransitGatewayId": "The ID of the transit gateway associated with the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", @@ -13031,7 +13446,7 @@ "Tags": "An array of key-value pairs to apply to this resource." }, "AWS::ECR::Repository EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::Repository ImageScanningConfiguration": { @@ -13046,17 +13461,18 @@ "Value": "A `value` acts as a descriptor within a tag category (key)." }, "AWS::ECR::RepositoryCreationTemplate": { - "AppliedFor": "", - "Description": "", - "EncryptionConfiguration": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", - "ImageTagMutability": "", - "LifecyclePolicy": "", - "Prefix": "", - "RepositoryPolicy": "", - "ResourceTags": "The tags attached to the resource." + "AppliedFor": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", + "CustomRoleArn": "The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.", + "Description": "The description associated with the repository creation template.", + "EncryptionConfiguration": "The encryption configuration associated with the repository creation template.", + "ImageTagMutability": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", + "LifecyclePolicy": "The lifecycle policy to use for repositories created using the template.", + "Prefix": "The repository namespace prefix associated with the repository creation template.", + "RepositoryPolicy": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", + "ResourceTags": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." }, "AWS::ECR::RepositoryCreationTemplate EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::RepositoryCreationTemplate Tag": { @@ -13661,6 +14077,7 @@ "ResourcesVpcConfig": "The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see [Amazon EKS Service IAM Role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) in the **Amazon EKS User Guide** .", "Tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags don't propagate to any other resources associated with the cluster.\n\n> You must have the `eks:TagResource` and `eks:UntagResource` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", + "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the EKS User Guide.](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available." }, "AWS::EKS::Cluster AccessConfig": { @@ -13707,6 +14124,9 @@ "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::EKS::Cluster UpgradePolicy": { + "SupportType": "" + }, "AWS::EKS::FargateProfile": { "ClusterName": "The name of your cluster.", "FargateProfileName": "The name of the Fargate profile.", @@ -14221,6 +14641,7 @@ "AutoStopConfiguration": "The configuration for an application to automatically stop after a certain amount of time being idle.", "ImageConfiguration": "The image configuration applied to all worker types.", "InitialCapacity": "The initial capacity of the application.", + "InteractiveConfiguration": "The interactive configuration object that enables the interactive use cases for an application.", "MaximumCapacity": "The maximum capacity of the application. This is cumulative across all workers at any given point in time during the lifespan of the application is created. No new resources will be created once any one of the defined limits is hit.", "MonitoringConfiguration": "A configuration specification to be used when provisioning an application. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.", "Name": "The name of the application.", @@ -14261,6 +14682,10 @@ "Key": "", "Value": "" }, + "AWS::EMRServerless::Application InteractiveConfiguration": { + "LivyEndpointEnabled": "Enables an Apache Livy endpoint that you can connect to and run interactive jobs.", + "StudioEnabled": "Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook." + }, "AWS::EMRServerless::Application LogTypeMapKeyValuePair": { "Key": "", "Value": "" @@ -14302,25 +14727,25 @@ }, "AWS::ElastiCache::CacheCluster": { "AZMode": "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.\n\nThis parameter is only supported for Memcached clusters.\n\nIf the `AZMode` and `PreferredAvailabilityZones` are not specified, ElastiCache assumes `single-az` mode.", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has `cluster-enabled='yes'` when creating a cluster.", "CacheSecurityGroupNames": "A list of security group names to associate with this cluster.\n\nUse this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).", "CacheSubnetGroupName": "The name of the subnet group to be used for the cluster.\n\nUse this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see `[AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .`", "ClusterName": "A name for the cache cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the cache cluster. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nThe name must contain 1 to 50 alphanumeric characters or hyphens. The name must start with a letter and cannot end with a hyphen or contain two consecutive hyphens.", "Engine": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", "EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", - "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheNodes": "The number of cache nodes that the cache cluster should have.\n\n> However, if the `PreferredAvailabilityZone` and `PreferredAvailabilityZones` properties were not previously specified and you don't specify any new values, an update requires [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "Port": "The port number on which each of the cache nodes accepts connections.", "PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.\n\nAll nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use `PreferredAvailabilityZones` .\n\nDefault: System chosen Availability Zone.", "PreferredAvailabilityZones": "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.\n\nThis option is only supported on Memcached.\n\n> If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheNodes` . \n\nIf you want all the nodes in the same Availability Zone, use `PreferredAvailabilityZone` instead, or repeat the Availability Zone multiple times in the list.\n\nDefault: System chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", - "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", - "SnapshotName": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotName": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot taken today is retained for 5 days before being deleted.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nDefault: 0 (i.e., automatic backups are disabled for this cache cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "Tags": "A list of tags to be added to this resource.", @@ -14348,10 +14773,10 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::GlobalReplicationGroup": { - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "CacheNodeType": "The cache node type of the Global datastore", "CacheParameterGroupName": "The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore.", - "EngineVersion": "The Elasticache Redis engine version.", + "EngineVersion": "The Elasticache Redis OSS engine version.", "GlobalNodeGroupCount": "The number of node groups that comprise the Global Datastore.", "GlobalReplicationGroupDescription": "The optional description of the Global datastore", "GlobalReplicationGroupIdSuffix": "The suffix name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.", @@ -14369,7 +14794,7 @@ "ReshardingConfigurations": "A list of PreferredAvailabilityZones objects that specifies the configuration of a node group in the resharded cluster." }, "AWS::ElastiCache::GlobalReplicationGroup ReshardingConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PreferredAvailabilityZones": "A list of preferred availability zones for the nodes in this cluster." }, "AWS::ElastiCache::ParameterGroup": { @@ -14383,28 +14808,28 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::ReplicationGroup": { - "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", - "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", - "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "CacheSecurityGroupNames": "A list of cache security group names to associate with this replication group.", "CacheSubnetGroupName": "The name of the cache subnet group to be used for the replication group.\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see [AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .", - "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "GlobalReplicationGroupId": "The name of the Global datastore", - "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "KmsKeyId": "The ID of the KMS key used to encrypt the disk on the cluster.", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", "MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", - "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheClusters": "The number of clusters this replication group initially has.\n\nThis parameter is not used if there is more than one node group (shard). You should use `ReplicasPerNodeGroup` instead.\n\nIf `AutomaticFailoverEnabled` is `true` , the value of this parameter must be at least 2. If `AutomaticFailoverEnabled` is `false` you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.\n\nThe maximum permitted value for `NumCacheClusters` is 6 (1 primary plus 5 replicas).", - "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "Port": "The port number on which each member of the replication group accepts connections.", "PreferredCacheClusterAZs": "A list of EC2 Availability Zones in which the replication group's clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.\n\nThis parameter is not used if there is more than one node group (shard). You should use `NodeGroupConfiguration` instead.\n\n> If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheClusters` . \n\nDefault: system chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", @@ -14413,14 +14838,14 @@ "ReplicationGroupDescription": "A user-created description for the replication group.", "ReplicationGroupId": "The replication group identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- A name must contain from 1 to 40 alphanumeric characters or hyphens.\n- The first character must be a letter.\n- A name cannot end with a hyphen or contain two consecutive hyphens.", "SecurityGroupIds": "One or more Amazon VPC security groups associated with this replication group.\n\nUse this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).", - "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "SnapshotName": "The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to `restoring` while the new replication group is being created.", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot that was taken today is retained for 5 days before being deleted.\n\nDefault: 0 (i.e., automatic backups are disabled for this cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.", - "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "Tags": "A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key= `myKey` , Value= `myKeyValue` . You can include multiple tags as shown following: Key= `myKey` , Value= `myKeyValue` Key= `mySecondKey` , Value= `mySecondKeyValue` . Tags on replication groups will be replicated to all nodes.", - "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", - "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "UserGroupIds": "The ID of user group to associate with the replication group." }, "AWS::ElastiCache::ReplicationGroup CloudWatchLogsDestinationDetails": { @@ -14440,7 +14865,7 @@ "LogType": "Valid value is either `slow-log` , which refers to [slow-log](https://docs.aws.amazon.com/https://redis.io/commands/slowlog) or `engine-log` ." }, "AWS::ElastiCache::ReplicationGroup NodeGroupConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PrimaryAvailabilityZone": "The Availability Zone where the primary node of this node group (shard) is launched.", "ReplicaAvailabilityZones": "A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of `ReplicaCount` or `ReplicasPerNodeGroup` if not specified.", "ReplicaCount": "The number of read replica nodes in this node group (shard).", @@ -14465,7 +14890,7 @@ }, "AWS::ElastiCache::ServerlessCache": { "CacheUsageLimits": "The cache usage limit for the serverless cache.", - "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "Description": "A description of the serverless cache.", "Endpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "Engine": "The engine the serverless cache is compatible with.", @@ -14476,10 +14901,10 @@ "SecurityGroupIds": "The IDs of the EC2 security groups associated with the serverless cache.", "ServerlessCacheName": "The unique identifier of the serverless cache.", "SnapshotArnsToRestore": "The ARN of the snapshot from which to restore data into the new cache.", - "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "SubnetIds": "If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC.", "Tags": "A list of tags to be added to this resource.", - "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL." + "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL." }, "AWS::ElastiCache::ServerlessCache CacheUsageLimits": { "DataStorage": "The maximum data storage limit in the cache, expressed in Gigabytes.", @@ -15048,7 +15473,7 @@ }, "AWS::EntityResolution::IdMappingWorkflow": { "Description": "A description of the workflow.", - "IdMappingTechniques": "An object which defines the `idMappingType` and the `providerProperties` .", + "IdMappingTechniques": "An object which defines the ID mapping technique and any additional configurations.", "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "OutputSourceConfig": "A list of `IdMappingWorkflowOutputSource` objects, each of which contains fields `OutputS3Path` and `Output` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.", @@ -15056,21 +15481,20 @@ "WorkflowName": "The name of the workflow. There can't be multiple `IdMappingWorkflows` with the same name." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModel": "", - "RuleDefinitionType": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of the `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModel": "The type of matching record that is allowed to be used in an ID mapping workflow.\n\nIf the value is set to `ONE_SOURCE_TO_ONE_TARGET` , only one record in the source can be matched to the same record in the target.\n\nIf the value is set to `MANY_SOURCE_TO_ONE_TARGET` , multiple records in the source can be matched to one record in the target.", + "RuleDefinitionType": "The set of rules you can use in an ID mapping workflow. The limitations specified for the source or target to define the match rules must be compatible.", + "Rules": "The rules that can be used for ID mapping." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingTechniques": { "IdMappingType": "The type of ID mapping.", - "NormalizationVersion": "", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowOutputSource": { "KMSArn": "Customer AWS KMS ARN for encryption at rest. If not provided, system will use an AWS Entity Resolution managed KMS key.", @@ -15085,8 +15509,8 @@ "ProviderServiceArn": "The ARN of the provider service." }, "AWS::EntityResolution::IdMappingWorkflow Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdMappingWorkflow Tag": { "Key": "The key of the tag.", @@ -15099,15 +15523,15 @@ "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", "Tags": "The tags used to organize, track, or control access for this resource.", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdNamespace IdNamespaceIdMappingWorkflowProperties": { "IdMappingType": "The type of ID mapping.", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdNamespace IdNamespaceInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaName": "The name of the schema." }, "AWS::EntityResolution::IdNamespace NamespaceProviderProperties": { @@ -15115,14 +15539,14 @@ "ProviderServiceArn": "The Amazon Resource Name (ARN) of the provider service." }, "AWS::EntityResolution::IdNamespace NamespaceRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModels": "", - "RuleDefinitionTypes": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModels": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RuleDefinitionTypes": "The sets of rules you can use in an ID mapping workflow. The limitations specified for the source and target must be compatible.", + "Rules": "The rules for the ID namespace." }, "AWS::EntityResolution::IdNamespace Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdNamespace Tag": { "Key": "The key of the tag.", @@ -15170,8 +15594,8 @@ "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::MatchingWorkflow RuleBasedProperties": { - "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", - "MatchPurpose": "", + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "MatchPurpose": "An indicator of whether to generate IDs and index the data or not.\n\nIf you choose `IDENTIFIER_GENERATION` , the process generates IDs and indexes the data.\n\nIf you choose `INDEXING` , the process indexes the data without generating IDs.", "Rules": "A list of `Rule` objects, each of which have fields `RuleName` and `MatchingKeys` ." }, "AWS::EntityResolution::MatchingWorkflow Tag": { @@ -15195,8 +15619,8 @@ "AWS::EntityResolution::SchemaMapping SchemaInputAttribute": { "FieldName": "A string containing the field name.", "GroupName": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "Hashed": "", - "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "Hashed": "Indicates if the column values are hashed in the schema input. If the value is set to `TRUE` , the column values are hashed. If the value is set to `FALSE` , the column values are cleartext.", + "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "SubType": "The subtype of the attribute, selected from a list of values.", "Type": "The type of the attribute, selected from a list of values." }, @@ -16363,7 +16787,7 @@ "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", - "Locations": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", @@ -16407,7 +16831,7 @@ "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0." }, "AWS::GameLift::Fleet LocationConfiguration": { - "Location": "An AWS Region code, such as `us-west-2` .", + "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "LocationCapacity": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" }, "AWS::GameLift::Fleet ResourceCreationLimitPolicy": { @@ -16506,7 +16930,7 @@ }, "AWS::GameLift::Location": { "LocationName": "A descriptive name for the custom location.", - "Tags": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." + "Tags": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." }, "AWS::GameLift::Location Tag": { "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", @@ -16582,6 +17006,7 @@ "Tags": "Add tags for a cross-account attachment.\n\nFor more information, see [Tagging in AWS Global Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html) in the *AWS Global Accelerator Developer Guide* ." }, "AWS::GlobalAccelerator::CrossAccountAttachment Resource": { + "Cidr": "An IP address range, in CIDR format, that is specified as resource. The address must be provisioned and advertised in AWS Global Accelerator by following the bring your own IP address (BYOIP) process for Global Accelerator\n\nFor more information, see [Bring your own IP addresses (BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html) in the AWS Global Accelerator Developer Guide.", "EndpointId": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", "Region": "The AWS Region where a shared endpoint resource is located." }, @@ -18025,7 +18450,7 @@ "AWS::IAM::OIDCProvider": { "ClientIdList": "A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", "Tags": "A list of tags that are attached to the specified IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see [Tagging IAM resources](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the *IAM User Guide* .", - "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "Url": "The URL that the IAM OIDC provider resource object is associated with. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) ." }, "AWS::IAM::OIDCProvider Tag": { @@ -20348,7 +20773,7 @@ "AssetModelDescription": "A description for the asset model.", "AssetModelExternalId": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", - "AssetModelName": "A unique, friendly name for the asset model.", + "AssetModelName": "A unique name for the asset model.", "AssetModelProperties": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "AssetModelType": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." @@ -20439,7 +20864,7 @@ }, "AWS::IoTSiteWise::Gateway": { "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", - "GatewayName": "A unique, friendly name for the gateway.", + "GatewayName": "A unique name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, @@ -21904,6 +22329,7 @@ "ElasticsearchDestinationConfiguration": "An Amazon ES destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon ES destination to an Amazon S3 or Amazon Redshift destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "ExtendedS3DestinationConfiguration": "An Amazon S3 destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Extended S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "HttpEndpointDestinationConfiguration": "Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination.", + "IcebergDestinationConfiguration": "Specifies the destination configure settings for Apache Iceberg Table.\n\nAmazon Data Firehose is in preview release and is subject to change.", "KinesisStreamSourceConfiguration": "When a Kinesis stream is used as the source for the delivery stream, a [KinesisStreamSourceConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html) containing the Kinesis stream ARN and the role ARN for the source stream.", "MSKSourceConfiguration": "The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.", "RedshiftDestinationConfiguration": "An Amazon Redshift destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Redshift destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", @@ -21962,6 +22388,9 @@ "IntervalInSeconds": "The length of time, in seconds, that Kinesis Data Firehose buffers incoming data before delivering it to the destination. For valid values, see the `IntervalInSeconds` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* .", "SizeInMBs": "The size of the buffer, in MBs, that Kinesis Data Firehose uses for incoming data before delivering it to the destination. For valid values, see the `SizeInMBs` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* ." }, + "AWS::KinesisFirehose::DeliveryStream CatalogConfiguration": { + "CatalogArn": "Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format `arn:aws:glue:region:account-id:catalog` .\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream CloudWatchLoggingOptions": { "Enabled": "Indicates whether CloudWatch Logs logging is enabled.", "LogGroupName": "The name of the CloudWatch Logs log group that contains the log stream that Kinesis Data Firehose will use.\n\nConditional. If you enable logging, you must specify this property.", @@ -21986,6 +22415,12 @@ "HiveJsonSerDe": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", "OpenXJsonSerDe": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe." }, + "AWS::KinesisFirehose::DeliveryStream DestinationTableConfiguration": { + "DestinationDatabaseName": "The name of the Apache Iceberg database.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "DestinationTableName": "Specifies the name of the Apache Iceberg Table.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "S3ErrorOutputPrefix": "The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "UniqueKeys": "A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream DocumentIdOptions": { "DefaultDocumentIdFormat": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance." }, @@ -22065,6 +22500,17 @@ "CommonAttributes": "Describes the metadata sent to the HTTP endpoint destination.", "ContentEncoding": "Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation." }, + "AWS::KinesisFirehose::DeliveryStream IcebergDestinationConfiguration": { + "BufferingHints": "", + "CatalogConfiguration": "Configuration describing where the destination Apache Iceberg Tables are persisted.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "CloudWatchLoggingOptions": "", + "DestinationTableConfigurationList": "Provides a list of `DestinationTableConfigurations` which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "ProcessingConfiguration": "", + "RetryOptions": "", + "RoleARN": "The Amazon Resource Name (ARN) of the the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "S3Configuration": "", + "s3BackupMode": "Describes how Firehose will backup records. Currently,S3 backup only supports `FailedDataOnly` for preview.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream InputFormatConfiguration": { "Deserializer": "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request." }, @@ -22167,8 +22613,13 @@ "OrcSerDe": "A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see [Apache ORC](https://docs.aws.amazon.com/https://orc.apache.org/docs/) .", "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) ." }, + "AWS::KinesisFirehose::DeliveryStream SnowflakeBufferingHints": { + "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.", + "SizeInMBs": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1." + }, "AWS::KinesisFirehose::DeliveryStream SnowflakeDestinationConfiguration": { "AccountUrl": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "BufferingHints": "Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.", "CloudWatchLoggingOptions": "", "ContentColumnName": "The name of the record content column", "DataLoadingOption": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", @@ -22176,7 +22627,7 @@ "KeyPassphrase": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", "MetaDataColumnName": "The name of the record metadata column", "PrivateKey": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", - "ProcessingConfiguration": "", + "ProcessingConfiguration": "Specifies configuration for Snowflake.", "RetryOptions": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", "RoleARN": "The Amazon Resource Name (ARN) of the Snowflake role", "S3BackupMode": "Choose an S3 backup mode", @@ -24142,7 +24593,7 @@ }, "AWS::MWAA::Environment": { "AirflowConfigurationOptions": "A list of key-value pairs containing the Airflow configuration options for your environment. For example, `core.default_timezone: utc` . To learn more, see [Apache Airflow configuration options](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html) .", - "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "DagS3Path": "The relative path to the DAGs folder on your Amazon S3 bucket. For example, `dags` . To learn more, see [Adding or updating DAGs](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html) .", "EndpointManagement": "Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to `SERVICE` , Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER` , you must create, and manage, the VPC endpoints in your VPC.", "EnvironmentClass": "The environment class type. Valid values: `mw1.small` , `mw1.medium` , `mw1.large` . To learn more, see [Amazon MWAA environment class](https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html) .", @@ -24521,6 +24972,7 @@ "MediaStreamOutputConfigurations": "The definition for each media stream that is associated with the output.", "MinLatency": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency.", "Name": "The name of the output. This value must be unique within the current flow.", + "OutputStatus": "", "Port": "The port to use when MediaConnect distributes content to the output.", "Protocol": "The protocol to use for the output.", "RemoteId": "The identifier that is assigned to the Zixi receiver. This parameter applies only to outputs that use Zixi pull.", @@ -26039,6 +26491,7 @@ "ChannelGroupName": "The name of the channel group associated with the channel configuration.", "ChannelName": "The name of the channel.", "Description": "The description of the channel.", + "InputType": "", "Tags": "The tags associated with the channel." }, "AWS::MediaPackageV2::Channel IngestEndpoint": { @@ -26069,6 +26522,7 @@ "ContainerType": "The container type associated with the origin endpoint configuration.", "DashManifests": "A DASH manifest configuration.", "Description": "The description associated with the origin endpoint.", + "ForceEndpointErrorConfiguration": "", "HlsManifests": "The HLS manfiests associated with the origin endpoint configuration.", "LowLatencyHlsManifests": "The low-latency HLS (LL-HLS) manifests associated with the origin endpoint.", "OriginEndpointName": "The name of the origin endpoint associated with the origin endpoint configuration.", @@ -26113,6 +26567,9 @@ "Start": "Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.", "TimeDelaySeconds": "Optionally specify the time delay for all of your manifest egress requests. Enter a value that is smaller than your endpoint's startover window. When you include time delay, note that you cannot use time delay query parameters for this manifest's endpoint URL." }, + "AWS::MediaPackageV2::OriginEndpoint ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": "" + }, "AWS::MediaPackageV2::OriginEndpoint HlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the HLS manifest configuration.", "FilterConfiguration": "", @@ -26630,7 +27087,7 @@ "LoggingConfiguration": "Defines how AWS Network Firewall performs logging for a `Firewall` ." }, "AWS::NetworkFirewall::LoggingConfiguration LogDestinationConfig": { - "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", "LogType": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs." }, @@ -27140,6 +27597,10 @@ "Key": "The tag key. Tag keys must be unique for the pipeline to which they are attached.", "Value": "The value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key value pair in a tag set of `project : Trinity` and `cost-center : Trinity`" }, + "AWS::OSIS::Pipeline VpcAttachmentOptions": { + "AttachToVpc": "Whether a VPC is attached to the pipeline.", + "CidrBlock": "The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs)." + }, "AWS::OSIS::Pipeline VpcEndpoint": { "VpcEndpointId": "The unique identifier of the endpoint.", "VpcId": "The ID for your VPC. AWS PrivateLink generates this value when you create a VPC.", @@ -27148,6 +27609,7 @@ "AWS::OSIS::Pipeline VpcOptions": { "SecurityGroupIds": "A list of security groups associated with the VPC endpoint.", "SubnetIds": "A list of subnet IDs associated with the VPC endpoint.", + "VpcAttachmentOptions": "Options for attaching a VPC to a pipeline.", "VpcEndpointManagement": "Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline." }, "AWS::Oam::Link": { @@ -28937,7 +29399,7 @@ "SaslScram512Auth": "The ARN of the Secrets Manager secret." }, "AWS::Pipes::Pipe SelfManagedKafkaAccessConfigurationVpc": { - "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "Subnets": "Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets." }, "AWS::Pipes::Pipe SingleMeasureMapping": { @@ -37774,7 +38236,7 @@ "AssociatedRoles": "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "AutoMinorVersionUpgrade": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", - "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37812,7 +38274,7 @@ "Port": "The port number on which the DB instances in the DB cluster accept connections.\n\nDefault:\n\n- When `EngineMode` is `provisioned` , `3306` (for both Aurora MySQL and Aurora PostgreSQL)\n- When `EngineMode` is `serverless` :\n\n- `3306` when `Engine` is `aurora` or `aurora-mysql`\n- `5432` when `Engine` is `aurora-postgresql`\n\n> The `No interruption` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredBackupWindow": "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.*\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\nConstraints: Minimum 30-minute window.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "ReadEndpoint": "This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:\n\n- `CreateDBInstance`\n- `DescribeDBInstances`\n- `DeleteDBInstance`\n\nFor the data structure that represents Amazon Aurora DB cluster endpoints, see `DBClusterEndpoint` .", "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n\nValid for: Aurora DB clusters only", "RestoreToTime": "The date and time to restore the DB cluster to.\n\nValid Values: Value must be a time in Universal Coordinated Time (UTC) format\n\nConstraints:\n\n- Must be before the latest restorable time for the DB instance\n- Must be specified if `UseLatestRestorableTime` parameter isn't provided\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled\n- Can't be specified if the `RestoreType` parameter is `copy-on-write`\n\nThis property must be used with `SourceDBClusterIdentifier` property. The resulting cluster will have the identifier that matches the value of the `DBclusterIdentifier` property.\n\nExample: `2015-03-07T23:45:00Z`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37824,7 +38286,7 @@ "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\n> If you specify both the `StorageEncrypted` and `SnapshotIdentifier` properties without specifying the `KmsKeyId` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", - "Tags": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "Tags": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "UseLatestRestorableTime": "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this DB cluster.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" }, @@ -37838,7 +38300,7 @@ }, "AWS::RDS::DBCluster MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) ." }, "AWS::RDS::DBCluster ReadEndpoint": { "Address": "The host address of the reader endpoint." @@ -37860,11 +38322,11 @@ "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBClusterParameterGroup": { - "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", - "Description": "A friendly description for this DB cluster parameter group.", - "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "Description": "The description for the DB cluster parameter group.", + "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "Parameters": "Provides a list of parameters for the DB cluster parameter group.", - "Tags": "An optional array of key-value pairs to apply to this DB cluster parameter group." + "Tags": "Tags to assign to the DB cluster parameter group." }, "AWS::RDS::DBClusterParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37876,7 +38338,7 @@ "AssociatedRoles": "The AWS Identity and Access Management (IAM) roles associated with the DB instance.\n\n*Amazon Aurora*\n\nNot applicable. The associated roles are managed by the DB cluster.", "AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.", "AutomaticBackupReplicationKmsKeyId": "The AWS KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination AWS Region , for example, `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE` .", - "AutomaticBackupReplicationRegion": "", + "AutomaticBackupReplicationRegion": "The AWS Region associated with the automated backup.", "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\nFor Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nConstraints:\n\n- The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment.\n- The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\nExample: `us-east-1d`", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", @@ -37885,19 +38347,19 @@ "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", "CopyTagsToSnapshot": "Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.", "CustomIAMInstanceProfile": "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance.\n\nThis setting is required for RDS Custom.\n\nConstraints:\n\n- The profile must exist in your account.\n- The profile must have an IAM role that Amazon EC2 has permissions to assume.\n- The instance profile name and the associated IAM role name must start with the prefix `AWSRDSCustom` .\n\nFor the list of permissions required for the IAM role, see [Configure IAM and your VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the *Amazon RDS User Guide* .", - "DBClusterIdentifier": "The identifier of the DB cluster that the instance will belong to.", + "DBClusterIdentifier": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "DBClusterSnapshotIdentifier": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", "DBInstanceClass": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "DBInstanceIdentifier": "A name for the DB instance. If you specify a name, AWS CloudFormation converts it to lowercase. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the DB instance. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "DBName": "The meaning of this parameter differs according to the database engine you use.\n\n> If you specify the `[DBSnapshotIdentifier](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsnapshotidentifier)` property, this property only applies to RDS for Oracle. \n\n*Amazon Aurora*\n\nNot applicable. The database name is managed by the DB cluster.\n\n*Db2*\n\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Can't be a word reserved by the specified database engine.\n\n*MySQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*MariaDB*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*PostgreSQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, the default `postgres` database is created in the DB instance.\n\nConstraints:\n\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Must contain 1 to 63 characters.\n- Can't be a word reserved by the specified database engine\n\n*Oracle*\n\nThe Oracle System ID (SID) of the created DB instance. If you specify `null` , the default value `ORCL` is used. You can't specify the string NULL, or any other reserved word, for `DBName` .\n\nDefault: `ORCL`\n\nConstraints:\n\n- Can't be longer than 8 characters\n\n*SQL Server*\n\nNot applicable. Must be null.", "DBParameterGroupName": "The name of an existing DB parameter group or a reference to an [AWS::RDS::DBParameterGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbparametergroup.html) resource created in the template.\n\nTo list all of the available DB parameter group names, use the following command:\n\n`aws rds describe-db-parameter-groups --query \"DBParameterGroups[].DBParameterGroupName\" --output text`\n\n> If any of the data members of the referenced parameter group are changed during an update, the DB instance might need to be restarted, which causes some interruption. If the parameter group contains static parameters, whether they were changed or not, an update triggers a reboot. \n\nIf you don't specify a value for `DBParameterGroupName` property, the default DB parameter group for the specified engine and engine version is used.", "DBSecurityGroups": "A list of the DB security groups to assign to the DB instance. The list can include both the name of existing DB security groups or references to AWS::RDS::DBSecurityGroup resources created in the template.\n\nIf you set DBSecurityGroups, you must not set VPCSecurityGroups, and vice versa. Also, note that the DBSecurityGroups property exists only for backwards compatibility with older regions and is no longer recommended for providing security information to an RDS DB instance. Instead, use VPCSecurityGroups.\n\n> If you specify this property, AWS CloudFormation sends only the following properties (if specified) to Amazon RDS during create operations:\n> \n> - `AllocatedStorage`\n> - `AutoMinorVersionUpgrade`\n> - `AvailabilityZone`\n> - `BackupRetentionPeriod`\n> - `CharacterSetName`\n> - `DBInstanceClass`\n> - `DBName`\n> - `DBParameterGroupName`\n> - `DBSecurityGroups`\n> - `DBSubnetGroupName`\n> - `Engine`\n> - `EngineVersion`\n> - `Iops`\n> - `LicenseModel`\n> - `MasterUsername`\n> - `MasterUserPassword`\n> - `MultiAZ`\n> - `OptionGroupName`\n> - `PreferredBackupWindow`\n> - `PreferredMaintenanceWindow`\n> \n> All other properties are ignored. Specify a virtual private cloud (VPC) security group if you want to submit other properties, such as `StorageType` , `StorageEncrypted` , or `KmsKeyId` . If you're already using the `DBSecurityGroups` property, you can't use these other properties by updating your DB instance to use a VPC security group. You must recreate the DB instance.", - "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", - "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "DBSystemId": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to `RDSCDB` . The Oracle SID is also the name of your CDB.", "DedicatedLogVolume": "Indicates whether the DB instance has a dedicated log volume (DLV) enabled.", "DeleteAutomatedBackups": "A value that indicates whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted.\n\n*Amazon Aurora*\n\nNot applicable. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the DB cluster are not deleted.", - "DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "DeletionProtection": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "Domain": "The Active Directory directory ID to create the DB instance in. Currently, only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.\n\nFor more information, see [Kerberos Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the *Amazon RDS User Guide* .", "DomainAuthSecretArn": "The ARN for the Secrets Manager secret with the credentials for the user joining the domain.\n\nExample: `arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456`", "DomainDnsIps": "The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers.\n\nConstraints:\n\n- Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list.\n\nExample: `123.124.125.126,234.235.236.237`", @@ -37919,15 +38381,15 @@ "MasterUserSecret": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide.*", "MasterUsername": "The master user name for the DB instance.\n\n> If you specify the `SourceDBInstanceIdentifier` or `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the source DB instance or snapshot.\n> \n> When migrating a self-managed Db2 database, we recommend that you use the same master username as your self-managed Db2 instance name. \n\n*Amazon Aurora*\n\nNot applicable. The name for the master user is managed by the DB cluster.\n\n*RDS for Db2*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MariaDB*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Microsoft SQL Server*\n\nConstraints:\n\n- Must be 1 to 128 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MySQL*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Oracle*\n\nConstraints:\n\n- Must be 1 to 30 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for PostgreSQL*\n\nConstraints:\n\n- Must be 1 to 63 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.", "MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.\n\nFor more information about this setting, including limitations that apply to it, see [Managing capacity automatically with Amazon RDS storage autoscaling](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (Storage is managed by the DB cluster.)\n- RDS Custom", - "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "MonitoringRoleArn": "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting Up and Enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , then you must supply a `MonitoringRoleArn` value.\n\nThis setting doesn't apply to RDS Custom DB instances.", - "MultiAZ": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "MultiAZ": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "NcharCharacterSetName": "The name of the NCHAR character set for the Oracle DB instance.\n\nThis setting doesn't apply to RDS Custom DB instances.", "NetworkType": "The network type of the DB instance.\n\nValid values:\n\n- `IPV4`\n- `DUAL`\n\nThe network type is determined by the `DBSubnetGroup` specified for the DB instance. A `DBSubnetGroup` can support only the IPv4 protocol or the IPv4 and IPv6 protocols ( `DUAL` ).\n\nFor more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon RDS User Guide.*", "OptionGroupName": "Indicates that the DB instance should be associated with the specified option group.\n\nPermanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance once it is associated with a DB instance.", "PerformanceInsightsKMSKeyId": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you do not specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account. Your AWS account has a different default KMS key for each AWS Region.\n\nFor information about enabling Performance Insights, see [EnablePerformanceInsights](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-enableperformanceinsights) .", "PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data.\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS returns an error.", - "Port": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "Port": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, using the `BackupRetentionPeriod` parameter. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) in the *Amazon RDS User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\n*Amazon Aurora*\n\nNot applicable. The daily time range for creating automated backups is managed by the DB cluster.", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Instance Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) in the *Amazon RDS User Guide.*\n\n> This property applies when AWS CloudFormation initially creates the DB instance. If you use AWS CloudFormation to update the DB instance, those updates are applied immediately. \n\nConstraints: Minimum 30-minute window.", "ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.\n\nThis setting doesn't apply to Amazon Aurora or RDS Custom DB instances.", @@ -37943,7 +38405,7 @@ "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` or `SourceDbiResourceId` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SourceDBInstanceAutomatedBackupsArn` property, don't specify this property. The value is inherited from the source DB instance automated backup.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", - "Tags": "An optional array of key-value pairs to apply to this DB instance.", + "Tags": "Tags to assign to the DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/db2-time-zone) and [RDS for SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", @@ -37964,7 +38426,7 @@ }, "AWS::RDS::DBInstance MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) ." }, "AWS::RDS::DBInstance ProcessorFeature": { "Name": "The name of the processor feature. Valid names are `coreCount` and `threadsPerCore` .", @@ -37977,9 +38439,9 @@ "AWS::RDS::DBParameterGroup": { "DBParameterGroupName": "The name of the DB parameter group.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens.\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nIf you don't specify a value for `DBParameterGroupName` property, a name is automatically created for the DB parameter group.\n\n> This value is stored as a lowercase string.", "Description": "Provides the customer-specified description for this DB parameter group.", - "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", - "Parameters": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", - "Tags": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection." + "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "Parameters": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "Tags": "Tags to assign to the DB parameter group." }, "AWS::RDS::DBParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37989,7 +38451,7 @@ "Auth": "The authorization mechanism that the proxy uses.", "DBProxyName": "The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region . An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.", "DebugLogging": "Specifies whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.", - "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "IdleClientTimeout": "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.", "RequireTLS": "Specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in AWS Secrets Manager.", @@ -37998,30 +38460,30 @@ "VpcSubnetIds": "One or more VPC subnet IDs to associate with the new proxy." }, "AWS::RDS::DBProxy AuthFormat": { - "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "ClientPasswordAuthType": "Specifies the details of authentication used by a proxy to log in as a specific database user.", "Description": "A user-specified description about the authentication used by a proxy to log in as a specific database user.", - "IAMAuth": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "IAMAuth": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "SecretArn": "The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager." }, "AWS::RDS::DBProxy TagFormat": { - "Key": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyEndpoint": { "DBProxyEndpointName": "The name of the DB proxy endpoint to create.", "DBProxyName": "The name of the DB proxy associated with the DB proxy endpoint that you create.", "Tags": "An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy.", - "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "VpcSecurityGroupIds": "The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC.", "VpcSubnetIds": "The VPC subnet IDs for the DB proxy endpoint that you create. You can specify a different set of subnet IDs than for the original DB proxy." }, "AWS::RDS::DBProxyEndpoint TagFormat": { - "Key": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "Metadata assigned to a DB instance consisting of a key-value pair." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyTargetGroup": { - "ConnectionPoolConfigurationInfo": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "ConnectionPoolConfigurationInfo": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "DBClusterIdentifiers": "One or more DB cluster identifiers.", "DBInstanceIdentifiers": "One or more DB instance identifiers.", "DBProxyName": "The identifier of the `DBProxy` that is associated with the `DBProxyTargetGroup` .", @@ -38036,9 +38498,9 @@ }, "AWS::RDS::DBSecurityGroup": { "DBSecurityGroupIngress": "Ingress rules to be applied to the DB security group.", - "EC2VpcId": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "EC2VpcId": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "GroupDescription": "Provides the description of the DB security group.", - "Tags": "An optional array of key-value pairs to apply to this DB security group." + "Tags": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* ." }, "AWS::RDS::DBSecurityGroup Ingress": { "CIDRIP": "The IP range to authorize.", @@ -38059,9 +38521,9 @@ }, "AWS::RDS::DBSubnetGroup": { "DBSubnetGroupDescription": "The description for the DB subnet group.", - "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "SubnetIds": "The EC2 Subnet IDs for the DB subnet group.", - "Tags": "An optional array of key-value pairs to apply to this DB subnet group." + "Tags": "Tags to assign to the DB subnet group." }, "AWS::RDS::DBSubnetGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -38071,8 +38533,8 @@ "Enabled": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", "EventCategories": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", - "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", - "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", + "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "SubscriptionName": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", "Tags": "An optional array of key-value pairs to apply to this subscription." }, @@ -38096,7 +38558,7 @@ "IntegrationName": "The name of the integration.", "KMSKeyId": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", "SourceArn": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", - "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "Tags": "An optional array of key-value pairs to apply to this integration.", "TargetArn": "The ARN of the Redshift data warehouse to use as the target for replication." }, "AWS::RDS::Integration Tag": { @@ -38106,18 +38568,18 @@ "AWS::RDS::OptionGroup": { "EngineName": "Specifies the name of the engine that this option group should be associated with.\n\nValid Values:\n\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.", - "OptionConfigurations": "A list of options and the settings for each option.", + "OptionConfigurations": "A list of all available options for an option group.", "OptionGroupDescription": "The description of the option group.", "OptionGroupName": "The name of the option group to be created.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nExample: `myoptiongroup`\n\nIf you don't specify a value for `OptionGroupName` property, a name is automatically created for the option group.\n\n> This value is stored as a lowercase string.", - "Tags": "An optional array of key-value pairs to apply to this option group." + "Tags": "Tags to assign to the option group." }, "AWS::RDS::OptionGroup OptionConfiguration": { - "DBSecurityGroupMemberships": "A list of DBSecurityGroupMembership name strings used for this option.", + "DBSecurityGroupMemberships": "A list of DB security groups used for this option.", "OptionName": "The configuration of options to include in a group.", "OptionSettings": "The option settings to include in an option group.", "OptionVersion": "The version for the option.", "Port": "The optional port for the option.", - "VpcSecurityGroupMemberships": "A list of VpcSecurityGroupMembership name strings used for this option." + "VpcSecurityGroupMemberships": "A list of VPC security group names used for this option." }, "AWS::RDS::OptionGroup OptionSetting": { "Name": "The name of the option that has settings that you can set.", @@ -39281,7 +39743,7 @@ "PublicAccessBlockConfiguration": "Configuration that defines how Amazon S3 handles public access.", "ReplicationConfiguration": "Configuration for replicating objects in an S3 bucket. To enable replication, you must also enable versioning by using the `VersioningConfiguration` property.\n\nAmazon S3 can store replicated objects in a single destination bucket or multiple destination buckets. The destination bucket or buckets must already exist.", "Tags": "An arbitrary set of tags (key-value pairs) for this S3 bucket.", - "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "WebsiteConfiguration": "Information used to configure the bucket as a static website. For more information, see [Hosting Websites on Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) ." }, "AWS::S3::Bucket AbortIncompleteMultipartUpload": { @@ -40514,7 +40976,7 @@ "OperatingSystem": "Defines the operating system the patch baseline applies to. The default value is `WINDOWS` .", "PatchGroups": "The name of the patch group to be registered with the patch baseline.", "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", - "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "Sources": "Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only.", "Tags": "Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to." }, @@ -40827,6 +41289,7 @@ }, "AWS::SageMaker::App ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -41085,11 +41548,13 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain RSessionAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a RSession app.", @@ -41471,6 +41936,20 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::MlflowTrackingServer": { + "ArtifactStoreUri": "", + "AutomaticModelRegistration": "", + "MlflowVersion": "", + "RoleArn": "", + "Tags": "", + "TrackingServerName": "", + "TrackingServerSize": "", + "WeeklyMaintenanceWindowStart": "" + }, + "AWS::SageMaker::MlflowTrackingServer Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::Model": { "Containers": "Specifies the containers in the inference pipeline.", "EnableNetworkIsolation": "Isolates the model container. No inbound or outbound network calls can be made to or from the model container.", @@ -41493,6 +41972,9 @@ "ModelPackageName": "The name or Amazon Resource Name (ARN) of the model package to use to create the model.", "MultiModelConfig": "Specifies additional configuration for multi-model endpoints." }, + "AWS::SageMaker::Model HubAccessConfig": { + "HubContentArn": "" + }, "AWS::SageMaker::Model ImageConfig": { "RepositoryAccessMode": "Set this to one of the following values:\n\n- `Platform` - The model image is hosted in Amazon ECR.\n- `Vpc` - The model image is hosted in a private Docker registry in your VPC.", "RepositoryAuthConfig": "(Optional) Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified `Vpc` as the value for the `RepositoryAccessMode` field, and the private Docker registry where the model image is hosted requires authentication." @@ -41514,6 +41996,7 @@ }, "AWS::SageMaker::Model S3DataSource": { "CompressionType": "", + "HubAccessConfig": "", "ModelAccessConfig": "", "S3DataType": "If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.\n\nIf you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.\n\nIf you choose `AugmentedManifestFile` , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. `AugmentedManifestFile` can only be used if the Channel's input mode is `Pipe` .", "S3Uri": "Depending on the value specified for the `S3DataType` , identifies either a key name prefix or a manifest. For example:\n\n- A key name prefix might look like this: `s3://bucketname/exampleprefix/`\n- A manifest might look like this: `s3://bucketname/example.manifest`\n\nA manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of `S3Uri` . Note that the prefix must be a valid non-empty `S3Uri` that precludes users from specifying a manifest whose individual `S3Uri` is sourced from different S3 buckets.\n\nThe following code example shows a valid manifest format:\n\n`[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},`\n\n`\"relative/path/to/custdata-1\",`\n\n`\"relative/path/custdata-2\",`\n\n`...`\n\n`\"relative/path/custdata-N\"`\n\n`]`\n\nThis JSON is equivalent to the following `S3Uri` list:\n\n`s3://customer_bucket/some/prefix/relative/path/to/custdata-1`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-2`\n\n`...`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-N`\n\nThe complete set of `S3Uri` in this manifest is the input data for the channel for this data source. The object that each `S3Uri` points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.\n\nYour input bucket must be located in same AWS region as your training job." @@ -42409,17 +42892,20 @@ "EbsVolumeSizeInGb": "The size of an EBS storage volume for a space." }, "AWS::SageMaker::Space JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space OwnershipSettings": { "OwnerUserProfileName": "The user profile who is the owner of the space." }, "AWS::SageMaker::Space ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -42449,6 +42935,16 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::StudioLifecycleConfig": { + "StudioLifecycleConfigAppType": "The App type to which the Lifecycle Configuration is attached.", + "StudioLifecycleConfigContent": "", + "StudioLifecycleConfigName": "The name of the Amazon SageMaker Studio Lifecycle Configuration.", + "Tags": "" + }, + "AWS::SageMaker::StudioLifecycleConfig Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::UserProfile": { "DomainId": "The domain ID.", "SingleSignOnUserIdentifier": "A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is \"UserName\". If the Domain's AuthMode is IAM Identity Center , this field is required. If the Domain's AuthMode is not IAM Identity Center , this field cannot be specified.", @@ -42495,11 +42991,13 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile RStudioServerProAppSettings": { "AccessStatus": "Indicates whether the current user has access to the `RStudioServerPro` app.", @@ -42507,6 +43005,7 @@ }, "AWS::SageMaker::UserProfile ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -42870,8 +43369,8 @@ "AdminAccountId": "The AWS account identifier of the account to designate as the Security Hub administrator account." }, "AWS::SecurityHub::FindingAggregator": { - "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Indicates to aggregate findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.", - "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region." + "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Aggregates findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.\n- `NO_REGIONS` - Aggregates no data because no Regions are selected as linked Regions.", + "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region.\n\nAn `InvalidInputException` error results if you populate this field while `RegionLinkingMode` is `NO_REGIONS` ." }, "AWS::SecurityHub::Hub": { "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", @@ -43491,14 +43990,14 @@ "ObjectKey": "The key name of an object in Amazon S3. For more information about Amazon S3 objects and object keys, see [Uploading, downloading, and working with objects in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/uploading-downloading-objects.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::StepFunctions::Activity": { - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the activity.\n\nActivity configuration is immutable, and resource names must be unique. To set customer managed keys for encryption, you must create a *new Activity* . If you attempt to change the configuration in your CFN template for an existing activity, you will receive an `ActivityAlreadyExists` exception.\n\nTo update your activity to include customer managed keys, set a new activity name within your AWS CloudFormation template.", "Name": "The name of the activity.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\nTo enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.", "Tags": "The list of tags to add to a resource.\n\nTags may only contain Unicode letters, digits, white space, or these symbols: `_ . : / = + - @` ." }, "AWS::StepFunctions::Activity EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for an activity." }, "AWS::StepFunctions::Activity TagsEntry": { "Key": "The `key` for a key-value pair in a tag entry.", @@ -43509,7 +44008,7 @@ "DefinitionS3Location": "The name of the S3 bucket where the state machine definition is stored. The state machine definition must be a JSON or YAML file.", "DefinitionString": "The Amazon States Language definition of the state machine. The state machine definition must be in JSON. See [Amazon States Language](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) .", "DefinitionSubstitutions": "A map (string to string) that specifies the mappings for placeholder variables in the state machine definition. This enables the customer to inject values obtained at runtime, for example from intrinsic functions, in the state machine definition. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the state machine.", "LoggingConfiguration": "Defines what execution history events are logged and where they are logged.\n\n> By default, the `level` is set to `OFF` . For more information see [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role to use for this state machine.", "StateMachineName": "The name of the state machine.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", @@ -43521,9 +44020,9 @@ "LogGroupArn": "The ARN of the the CloudWatch log group to which you want your logs emitted to. The ARN must end with `:*`" }, "AWS::StepFunctions::StateMachine EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for a state machine." }, "AWS::StepFunctions::StateMachine LogDestination": { "CloudWatchLogsLogGroup": "An object describing a CloudWatch log group. For more information, see [AWS::Logs::LogGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html) in the AWS CloudFormation User Guide." @@ -43552,7 +44051,7 @@ "RoutingConfiguration": "The routing configuration of an alias. Routing configuration splits [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) requests between one or two versions of the same state machine.\n\nUse `RoutingConfiguration` if you want to explicitly set the alias [weights](https://docs.aws.amazon.com/step-functions/latest/apireference/API_RoutingConfigurationListItem.html#StepFunctions-Type-RoutingConfigurationListItem-weight) . Weight is the percentage of traffic you want to route to a state machine version.\n\n> `RoutingConfiguration` and `DeploymentPreference` are mutually exclusive properties. You must define only one of these properties." }, "AWS::StepFunctions::StateMachineAlias DeploymentPreference": { - "Alarms": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "Alarms": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "Interval": "The time in minutes between each traffic shifting increment.", "Percentage": "The percentage of traffic to shift to the new version in each increment.", "StateMachineVersionArn": "The Amazon Resource Name (ARN) of the [`AWS::StepFunctions::StateMachineVersion`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachineversion.html) resource that will be the final version to which the alias points to when the traffic shifting is complete.\n\nWhile performing gradual deployments, you can only provide a single state machine version ARN. To explicitly set version weights in a CloudFormation template, use `RoutingConfiguration` instead.", @@ -45291,7 +45790,7 @@ "DesiredSoftwareSetId": "The ID of the software set to apply.", "DesktopArn": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", "DesktopEndpoint": "The URL for the identity provider login (only for environments that use AppStream 2.0).", - "DeviceCreationTags": "\"The tag keys and optional values for the newly created devices for this environment.\"", + "DeviceCreationTags": "The tag keys and optional values for the newly created devices for this environment.", "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the environment.", "MaintenanceWindow": "A specification for a time window to apply software updates.", "Name": "The name of the environment.", @@ -45394,6 +45893,7 @@ "CookieSynchronizationConfiguration": "The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.", "CopyAllowed": "Specifies whether the user can copy text from the streaming session to the local device.", "CustomerManagedKey": "The customer managed key used to encrypt sensitive information in the user settings.", + "DeepLinkAllowed": "Specifies whether the user can use deep links that open automatically when connecting to a session.", "DisconnectTimeoutInMinutes": "The amount of time that a streaming session remains active after users disconnect.", "DownloadAllowed": "Specifies whether the user can download files from the streaming session to the local device.", "IdleDisconnectTimeoutInMinutes": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 2eb1dfd67..3bc8a4533 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -26156,7 +26156,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -33226,7 +33226,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33790,7 +33790,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33904,7 +33904,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -35460,7 +35460,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36294,12 +36294,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36334,11 +36334,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36807,7 +36807,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36839,7 +36839,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36897,7 +36897,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37020,22 +37020,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37043,7 +37043,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37059,12 +37059,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39277,7 +39277,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -39600,7 +39600,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::One::UKey`\n- `AWS::One::User`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::RUM::AppMonitor`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", "title": "Field", "type": "string" }, @@ -44503,7 +44503,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44586,7 +44586,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be masked in the response.", "title": "SecretToken", "type": "string" } @@ -64622,7 +64622,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64704,7 +64704,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65351,7 +65351,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65700,7 +65700,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -69794,7 +69794,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71163,7 +71163,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -77649,7 +77649,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77872,7 +77872,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78197,7 +78197,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -82778,7 +82778,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82859,37 +82859,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82897,7 +82897,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82933,7 +82933,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", "title": "EncryptionType", "type": "string" }, @@ -90340,12 +90340,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90383,7 +90383,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90396,7 +90396,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90437,12 +90437,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90618,7 +90618,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90633,7 +90633,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90744,7 +90744,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90887,22 +90887,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90912,7 +90912,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90930,7 +90930,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90955,7 +90955,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90978,7 +90978,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90986,7 +90986,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91001,7 +91001,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91055,7 +91055,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91075,7 +91075,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91088,12 +91088,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91212,7 +91212,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91436,7 +91436,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91497,7 +91497,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91518,7 +91518,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -95049,7 +95049,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95136,7 +95136,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95146,7 +95146,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95294,7 +95294,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95349,7 +95349,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95668,7 +95668,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95889,7 +95889,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -103339,7 +103339,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103585,7 +103585,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104178,7 +104178,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -114591,7 +114591,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -129666,7 +129666,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -130201,7 +130201,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -140464,7 +140464,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -153407,7 +153407,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -167328,7 +167328,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -182758,7 +182758,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224578,7 +224578,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224761,7 +224761,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224819,7 +224819,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224903,7 +224903,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225009,17 +225009,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225032,7 +225032,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225129,7 +225129,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225174,7 +225174,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225212,12 +225212,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225232,7 +225232,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225343,7 +225343,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225353,7 +225353,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225383,7 +225383,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225469,7 +225469,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225585,7 +225585,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225654,12 +225654,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225667,7 +225667,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225753,7 +225753,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225831,7 +225831,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225846,7 +225846,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225862,12 +225862,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225928,7 +225928,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -225981,12 +225981,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226030,7 +226030,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226165,7 +226165,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226178,7 +226178,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226364,7 +226364,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226380,7 +226380,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226469,12 +226469,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226681,7 +226681,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226767,7 +226767,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226785,7 +226785,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226825,7 +226825,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226856,7 +226856,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -235610,7 +235610,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -242311,7 +242311,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -261018,7 +261018,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" },