diff --git a/.all-contributorsrc b/.all-contributorsrc index 0cd6820d705c..c1799ef26828 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -895,6 +895,69 @@ "contributions": [ "code" ] + }, + { + "login": "faucct", + "name": "Nikita Sokolov", + "avatar_url": "https://avatars.githubusercontent.com/u/5202503?v=4", + "profile": "https://github.com/faucct", + "contributions": [ + "code" + ] + }, + { + "login": "sugmanue", + "name": "Manuel Sugawara", + "avatar_url": "https://avatars.githubusercontent.com/u/108146565?v=4", + "profile": "https://github.com/sugmanue", + "contributions": [ + "code" + ] + }, + { + "login": "anirudh9391", + "name": "Anirudh", + "avatar_url": "https://avatars.githubusercontent.com/u/15699250?v=4", + "profile": "https://github.com/anirudh9391", + "contributions": [ + "code" + ] + }, + { + "login": "haydenbaker", + "name": "Hayden Baker", + "avatar_url": "https://avatars.githubusercontent.com/u/26096419?v=4", + "profile": "https://github.com/haydenbaker", + "contributions": [ + "code" + ] + }, + { + "login": "gosar", + "name": "Jaykumar Gosar", + "avatar_url": "https://avatars.githubusercontent.com/u/5666661?v=4", + "profile": "https://github.com/gosar", + "contributions": [ + "code" + ] + }, + { + "login": "graebm", + "name": "Michael Graeb", + "avatar_url": "https://avatars.githubusercontent.com/u/24399397?v=4", + "profile": "https://github.com/graebm", + "contributions": [ + "code" + ] + }, + { + "login": "mgrundie-r7", + "name": "Michael Grundie", + "avatar_url": "https://avatars.githubusercontent.com/u/103498312?v=4", + "profile": "https://github.com/mgrundie-r7", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.brazil.json b/.brazil.json index 9d9a3f897c06..e554078b10e0 100644 --- a/.brazil.json +++ b/.brazil.json @@ -101,7 +101,8 @@ "test-utils": { "skipImport": true }, "tests-coverage-reporting": { "skipImport": true }, "third-party": { "skipImport": true }, - "third-party-slf4j-api": { "skipImport": true } + "third-party-slf4j-api": { "skipImport": true }, + "crt-unavailable-tests": { "skipImport": true } }, "dependencies": { diff --git a/.changes/2.23.0.json b/.changes/2.23.x/2.23.0.json similarity index 100% rename from .changes/2.23.0.json rename to .changes/2.23.x/2.23.0.json diff --git a/.changes/2.23.x/2.23.1.json b/.changes/2.23.x/2.23.1.json new file mode 100644 index 000000000000..d659e4256df5 --- /dev/null +++ b/.changes/2.23.x/2.23.1.json @@ -0,0 +1,60 @@ +{ + "version": "2.23.1", + "date": "2024-01-11", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix IllegalArgumentException in FullJitterBackoffStrategy when base delay and max backoff time are zero." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Add ConflictException to Update APIs of AWS IoT Software Package Catalog" + }, + { + "type": "feature", + "category": "AWS IoT FleetWise", + "contributor": "", + "description": "The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType" + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "contributor": "", + "description": "Doc only update for Secrets Manager" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Adding AppSync as an EventBridge Target" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.10.json b/.changes/2.23.x/2.23.10.json new file mode 100644 index 000000000000..275399877c37 --- /dev/null +++ b/.changes/2.23.x/2.23.10.json @@ -0,0 +1,36 @@ +{ + "version": "2.23.10", + "date": "2024-01-24", + "entries": [ + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "DeviceSerialNumber parameter is now optional in StartConnection API" + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "Add DeprecationDate and SoftwareVersion to response of ListGateways." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation updates for Amazon ECS." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for Aurora Limitless Database." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.11.json b/.changes/2.23.x/2.23.11.json new file mode 100644 index 000000000000..6feccf4f3018 --- /dev/null +++ b/.changes/2.23.x/2.23.11.json @@ -0,0 +1,18 @@ +{ + "version": "2.23.11", + "date": "2024-01-25", + "entries": [ + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support for IPv6-only instance plans." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.12.json b/.changes/2.23.x/2.23.12.json new file mode 100644 index 000000000000..bfd14cd9caff --- /dev/null +++ b/.changes/2.23.x/2.23.12.json @@ -0,0 +1,36 @@ +{ + "version": "2.23.12", + "date": "2024-01-26", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed bug where the ProfileCredentialsProvider would re-read the credentials file with each request by default." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Improved performance of chunk-encoded streaming uploads, like S3's PutObject." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Update list and string length limits for predefined attributes." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs." + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds ECR container image scanning based on their lastRecordedPullTime." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.13.json b/.changes/2.23.x/2.23.13.json new file mode 100644 index 000000000000..8d54dd73b598 --- /dev/null +++ b/.changes/2.23.x/2.23.13.json @@ -0,0 +1,54 @@ +{ + "version": "2.23.13", + "date": "2024-01-29", + "entries": [ + { + "type": "bugfix", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "S3 client configured with crossRegionEnabled(true) will now use us-east-1 regional endpoint instead of the global endpoint. See [#4720](https://github.com/aws/aws-sdk-java-v2/issues/4720)." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "contributor": "", + "description": "Comprehend PII analysis now supports Spanish input documents." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "contributor": "", + "description": "Modified description of createaddress to include direction to add path when providing a JSON file." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "This release adds MAINTENANCE environment status for Amazon MWAA environments." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.14.json b/.changes/2.23.x/2.23.14.json new file mode 100644 index 000000000000..b47f5e067ec8 --- /dev/null +++ b/.changes/2.23.x/2.23.14.json @@ -0,0 +1,24 @@ +{ + "version": "2.23.14", + "date": "2024-01-30", + "entries": [ + { + "type": "feature", + "category": "Amazon DataZone", + "contributor": "", + "description": "Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Update the SDKs for text changes in the APIs." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Reduce memory usage when request-level plugins aren't used." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.15.json b/.changes/2.23.x/2.23.15.json new file mode 100644 index 000000000000..82b690010410 --- /dev/null +++ b/.changes/2.23.x/2.23.15.json @@ -0,0 +1,42 @@ +{ + "version": "2.23.15", + "date": "2024-01-31", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Update page size limits for GetJobRuns and GetTriggers APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Reduce how many times input data is copied when writing to chunked encoded operations, like S3's PutObject." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "This release enables unhealthy target draining intervals for Network Load Balancers." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.16.json b/.changes/2.23.x/2.23.16.json new file mode 100644 index 000000000000..3ecf6a199421 --- /dev/null +++ b/.changes/2.23.x/2.23.16.json @@ -0,0 +1,48 @@ +{ + "version": "2.23.16", + "date": "2024-02-01", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes support for broadcast-mixed audio description tracks." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. For a list of individual services affected, please check the committed files." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features" + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "contributor": "", + "description": "This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain Query", + "contributor": "", + "description": "This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction." + }, + { + "type": "feature", + "category": "Amazon Neptune Graph", + "contributor": "", + "description": "Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.17.json b/.changes/2.23.x/2.23.17.json new file mode 100644 index 000000000000..1dbeba63d1ca --- /dev/null +++ b/.changes/2.23.x/2.23.17.json @@ -0,0 +1,36 @@ +{ + "version": "2.23.17", + "date": "2024-02-02", + "entries": [ + { + "type": "feature", + "category": "AWS CRT-based S3 Client", + "contributor": "", + "description": "Allow users to configure future completion executor on the AWS CRT-based S3 client via `S3CrtAsyncClientBuilder#futureCompletionExecutor`. See [#4879](https://github.com/aws/aws-sdk-java-v2/issues/4879)" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Adds setting to disable making EC2 Instance Metadata Service (IMDS) calls without a token header when prefetching a token does not work. This feature can be configured through environment variables (AWS_EC2_METADATA_V1_DISABLED), system property (aws.disableEc2MetadataV1) or AWS config file (ec2_metadata_v1_disabled). When you configure this setting to true, no calls without token headers will be made to IMDS." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.18.json b/.changes/2.23.x/2.23.18.json new file mode 100644 index 000000000000..a1ebc6bf59f4 --- /dev/null +++ b/.changes/2.23.x/2.23.18.json @@ -0,0 +1,18 @@ +{ + "version": "2.23.18", + "date": "2024-02-05", + "entries": [ + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Added definitions of various WorkSpace states" + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.19.json b/.changes/2.23.x/2.23.19.json new file mode 100644 index 000000000000..f2dc9e342a49 --- /dev/null +++ b/.changes/2.23.x/2.23.19.json @@ -0,0 +1,42 @@ +{ + "version": "2.23.19", + "date": "2024-02-06", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "Support for environment variables in AppSync GraphQL APIs" + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release is a documentation only update to address customer issues." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain." + }, + { + "type": "feature", + "category": "Amazon OpenSearch Service", + "contributor": "", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.2.json b/.changes/2.23.x/2.23.2.json new file mode 100644 index 000000000000..3b3fc6570f88 --- /dev/null +++ b/.changes/2.23.x/2.23.2.json @@ -0,0 +1,60 @@ +{ + "version": "2.23.2", + "date": "2024-01-12", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix shading of artifacts in the `bundle` by not `org.apache.log4j.*` packages. This allows proper binding of `commons-logging` to Log4J and enables dependencies that use commons logging (e.g. Apache HTTP Client) to properly bind to Log4j." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls." + }, + { + "type": "feature", + "category": "AWS Supply Chain", + "contributor": "", + "description": "This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers." + }, + { + "type": "feature", + "category": "Amazon Connect Participant Service", + "contributor": "", + "description": "Introduce new Supervisor participant role" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Supervisor Barge for Chat is now supported through the MonitorContact API." + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.20.json b/.changes/2.23.x/2.23.20.json new file mode 100644 index 000000000000..27973be63027 --- /dev/null +++ b/.changes/2.23.x/2.23.20.json @@ -0,0 +1,54 @@ +{ + "version": "2.23.20", + "date": "2024-02-07", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT Sync HTTP Client", + "contributor": "", + "description": "Fixed an issue where `CancellationException` was thrown incorrectly from AWS CRT Sync HTTP client when execution time exceeded the total configured API call attempt timeout or API call timeout. Now it throws `ApiCallAttemptTimeoutException`/`ApiCallTimeoutException` accordingly. See [#4820](https://github.com/aws/aws-sdk-java-v2/issues/4820)" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Bump `aws-crt` version to `0.29.9`" + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AWS DataSync now supports manifests for specifying files or objects to transfer." + }, + { + "type": "feature", + "category": "Amazon CloudFront KeyValueStore", + "contributor": "", + "description": "Switching CloudFront KeyValueStore onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "This release introduces a new bot replication feature as part of Lex Global Resiliency offering. This feature leverages a new set of APIs that allow customers to create bot replicas and replicate changes to bots across regions." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "LisRecommendations API to fetch Amazon Redshift Advisor recommendations." + }, + { + "type": "feature", + "category": "AWS CRT-based S3 Client", + "contributor": "", + "description": "Exposes a setting to set the memory limit when making asynchronous calls with the CRT-based S3 client" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.21.json b/.changes/2.23.x/2.23.21.json new file mode 100644 index 000000000000..9dfcec53758d --- /dev/null +++ b/.changes/2.23.x/2.23.21.json @@ -0,0 +1,24 @@ +{ + "version": "2.23.21", + "date": "2024-02-08", + "entries": [ + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter \"WorkspaceName\"." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.3.json b/.changes/2.23.x/2.23.3.json new file mode 100644 index 000000000000..e5182afe5abc --- /dev/null +++ b/.changes/2.23.x/2.23.3.json @@ -0,0 +1,12 @@ +{ + "version": "2.23.3", + "date": "2024-01-13", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.4.json b/.changes/2.23.x/2.23.4.json new file mode 100644 index 000000000000..e8d459b39140 --- /dev/null +++ b/.changes/2.23.x/2.23.4.json @@ -0,0 +1,66 @@ +{ + "version": "2.23.4", + "date": "2024-01-16", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Revert release of LogTargetTypes" + }, + { + "type": "feature", + "category": "AWS IoT FleetWise", + "contributor": "", + "description": "Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Documentation updates for AWS Security Hub" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "contributor": "", + "description": "Documentation updates for Amazon Personalize." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "contributor": "", + "description": "Documentation updates for Amazon Personalize" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Propagating client apiCallTimeout values to S3Express createSession calls. If existing, this value overrides the default timeout value of 10s when making the nested S3Express session credentials call." + }, + { + "type": "feature", + "category": "Payment Cryptography Control Plane", + "contributor": "", + "description": "Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.5.json b/.changes/2.23.x/2.23.5.json new file mode 100644 index 000000000000..279c0fabd431 --- /dev/null +++ b/.changes/2.23.x/2.23.5.json @@ -0,0 +1,36 @@ +{ + "version": "2.23.5", + "date": "2024-01-17", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT HTTP Client", + "contributor": "", + "description": "Fixed the issue in the AWS CRT HTTP client where the application could crash if stream.incrementWindow was invoked on a closed stream" + }, + { + "type": "feature", + "category": "AWS Backup Storage, Amazon CodeCatalyst, Amazon Cognito Identity, Amazon Cognito Identity Provider, AWS Identity and Access Management (IAM), Amazon Kinesis, AWS Elemental MediaStore Data Plane, Amazon Transcribe Service, Amazon Transcribe Streaming Service", + "contributor": "", + "description": "Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0." + }, + { + "type": "feature", + "category": "AWS DynamoDB Enhanced Client", + "contributor": "", + "description": "Added support for `@DynamoDBAutoGeneratedUUID` to facilitate the automatic updating of DynamoDB attributes with random UUID." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Updating note for enabling streams for UpdateTable." + }, + { + "type": "feature", + "category": "Amazon Keyspaces", + "contributor": "", + "description": "This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.6.json b/.changes/2.23.x/2.23.6.json new file mode 100644 index 000000000000..d222dbdbb2ff --- /dev/null +++ b/.changes/2.23.x/2.23.6.json @@ -0,0 +1,54 @@ +{ + "version": "2.23.6", + "date": "2024-01-18", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT HTTP Client", + "contributor": "", + "description": "Fixed the issue in the AWS CRT sync HTTP client where the connection was left open after the stream was aborted." + }, + { + "type": "feature", + "category": "AWS B2B Data Interchange", + "contributor": "", + "description": "Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "GetMetricDataV2 now supports 3 groupings" + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "contributor": "", + "description": "Allow support for Snowflake as a Kinesis Data Firehose delivery destination." + }, + { + "type": "feature", + "category": "Amazon SageMaker Feature Store Runtime", + "contributor": "", + "description": "Increase BatchGetRecord limits from 10 items to 100 items" + }, + { + "type": "feature", + "category": "Elastic Disaster Recovery Service", + "contributor": "", + "description": "Removed invalid and unnecessary default values." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.7.json b/.changes/2.23.x/2.23.7.json new file mode 100644 index 000000000000..4542440c718e --- /dev/null +++ b/.changes/2.23.x/2.23.7.json @@ -0,0 +1,42 @@ +{ + "version": "2.23.7", + "date": "2024-01-19", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "Release CodeBuild Reserved Capacity feature" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "anirudh9391", + "description": "Allowing SDK plugins to read and modify S3's crossRegionEnabled and SQS's checksumValidationEnabled" + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API." + }, + { + "type": "feature", + "category": "Amazon Q Connect", + "contributor": "", + "description": "Increased Quick Response name max length to 100" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.8.json b/.changes/2.23.x/2.23.8.json new file mode 100644 index 000000000000..6b1e50098a71 --- /dev/null +++ b/.changes/2.23.x/2.23.8.json @@ -0,0 +1,66 @@ +{ + "version": "2.23.8", + "date": "2024-01-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT HTTP Client", + "contributor": "", + "description": "Fixed a thread safety issue that could cause application to crash in the edge case where the SDK attempted to invoke `incrementWindow` after the stream is closed in AWS CRT HTTP Client." + }, + { + "type": "feature", + "category": "AWS AppConfig Data", + "contributor": "", + "description": "Fix FIPS Endpoints in aws-us-gov." + }, + { + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Doc-only update around removing AL1 from list of available AMIs for Cloud9" + }, + { + "type": "feature", + "category": "AWS Organizations", + "contributor": "", + "description": "Doc only update for quota increase change" + }, + { + "type": "feature", + "category": "Amazon CloudFront KeyValueStore", + "contributor": "", + "description": "This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore." + }, + { + "type": "feature", + "category": "Amazon Connect Cases", + "contributor": "", + "description": "This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Documentation updates for Amazon EC2." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity." + }, + { + "type": "feature", + "category": "FinSpace User Environment Management service", + "contributor": "", + "description": "Allow customer to set zip default through command line arguments." + } + ] +} \ No newline at end of file diff --git a/.changes/2.23.x/2.23.9.json b/.changes/2.23.x/2.23.9.json new file mode 100644 index 000000000000..22699a5db363 --- /dev/null +++ b/.changes/2.23.x/2.23.9.json @@ -0,0 +1,12 @@ +{ + "version": "2.23.9", + "date": "2024-01-23", + "entries": [ + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds support for CIS scans on EC2 instances." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.0.json b/.changes/2.24.0.json new file mode 100644 index 000000000000..6fe6e6c07f8d --- /dev/null +++ b/.changes/2.24.0.json @@ -0,0 +1,72 @@ +{ + "version": "2.24.0", + "date": "2024-02-09", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "contributor": "", + "description": "Fix bug where PUT fails when using SSE-C with Checksum when using S3AsyncClient with multipart enabled. Enable CRC32 for putObject when using multipart client if checksum validation is not disabled and checksum is not set by user" + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "This feature allows Batch to support configuration of repository credentials for jobs running on ECS" + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain." + }, + { + "type": "feature", + "category": "AWS Price List Service", + "contributor": "", + "description": "Add Throttling Exception to all APIs." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation only update for Amazon ECS." + }, + { + "type": "feature", + "category": "Amazon Prometheus Service", + "contributor": "", + "description": "Overall documentation updates." + }, + { + "type": "feature", + "category": "Braket", + "contributor": "", + "description": "Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device." + }, + { + "type": "feature", + "category": "Cost Optimization Hub", + "contributor": "", + "description": "Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated internal core logic for signing properties with non-default values to be codegen based instead of set at runtime." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Overriding signer properties for S3 through the deprecated non-public execution attributes in S3SignerExecutionAttribute no longer works with this release. The recommended approach is to use plugins in order to change these settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.1.json b/.changes/2.24.1.json new file mode 100644 index 000000000000..0cbdcd7f967f --- /dev/null +++ b/.changes/2.24.1.json @@ -0,0 +1,30 @@ +{ + "version": "2.24.1", + "date": "2024-02-12", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "This release enables PutMetricData API request payload compression by default." + }, + { + "type": "feature", + "category": "Amazon Neptune Graph", + "contributor": "", + "description": "Adding a new option \"parameters\" for data plane api ExecuteQuery to support running parameterized query via SDK." + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.2.json b/.changes/2.24.2.json new file mode 100644 index 000000000000..8afef3107c35 --- /dev/null +++ b/.changes/2.24.2.json @@ -0,0 +1,54 @@ +{ + "version": "2.24.2", + "date": "2024-02-13", + "entries": [ + { + "type": "bugfix", + "category": "URL Connection Client", + "contributor": "", + "description": "Fix a bug where headers with multiple values don't have all values for that header sent on the wire. This leads to signature mismatch exceptions.\n\nFixes [#4746](https://github.com/aws/aws-sdk-java-v2/issues/4746)." + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "contributor": "", + "description": "AWS Marketplace Catalog API now supports setting intent on requests" + }, + { + "type": "feature", + "category": "AWS Resource Explorer", + "contributor": "", + "description": "Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "akiesler", + "description": "Add additional logical operator ('and' and 'or') methods to DynamoDB Expression" + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support to upgrade the major version of a database." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Automatically trim object metadata keys of whitespace for `PutObject` and `CreateMultipartUpload`." + }, + { + "type": "feature", + "category": "Amazon Security Lake", + "contributor": "", + "description": "Documentation updates for Security Lake" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.3.json b/.changes/2.24.3.json new file mode 100644 index 000000000000..3200357f20cb --- /dev/null +++ b/.changes/2.24.3.json @@ -0,0 +1,48 @@ +{ + "version": "2.24.3", + "date": "2024-02-14", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed an issue where NPE would be thrown if there was an empty event in the input for an event streaming operation." + }, + { + "type": "bugfix", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Fix for Issue [#4912](https://github.com/aws/aws-sdk-java-v2/issues/4912) where client region with AWS_GLOBAL calls failed for cross region access." + }, + { + "type": "feature", + "category": "AWS Control Tower", + "contributor": "", + "description": "Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Switching half of the AWS service clients onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. For a list of individual services affected, please check the committed files." + }, + { + "type": "feature", + "category": "Amazon Lookout for Equipment", + "contributor": "", + "description": "This feature allows customers to see pointwise model diagnostics results for their models." + }, + { + "type": "feature", + "category": "QBusiness", + "contributor": "", + "description": "This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.4.json b/.changes/2.24.4.json new file mode 100644 index 000000000000..5aa3ce143562 --- /dev/null +++ b/.changes/2.24.4.json @@ -0,0 +1,66 @@ +{ + "version": "2.24.4", + "date": "2024-02-15", + "entries": [ + { + "type": "feature", + "category": "AWS Artifact", + "contributor": "", + "description": "This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "Add ability to override timeout on action level." + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "contributor": "", + "description": "Doc only update for Secrets Manager" + }, + { + "type": "feature", + "category": "Amazon Detective", + "contributor": "", + "description": "Doc only updates for content enhancement" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive." + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server." + }, + { + "type": "feature", + "category": "Amazon OpenSearch Service", + "contributor": "", + "description": "Adds additional supported instance types." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Burcu (tr-TR)" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.5.json b/.changes/2.24.5.json new file mode 100644 index 000000000000..dddde424c866 --- /dev/null +++ b/.changes/2.24.5.json @@ -0,0 +1,48 @@ +{ + "version": "2.24.5", + "date": "2024-02-16", + "entries": [ + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Documentation-only updates for Lambda to clarify a number of existing actions and properties." + }, + { + "type": "feature", + "category": "Amazon Connect Participant Service", + "contributor": "", + "description": "Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript." + }, + { + "type": "feature", + "category": "Amazon EMR", + "contributor": "", + "description": "adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce" + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "contributor": "", + "description": "This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Doc only update for a valid option in DB parameter group" + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "contributor": "", + "description": "This release marks phone numbers as sensitive inputs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.6.json b/.changes/2.24.6.json new file mode 100644 index 000000000000..6e695f57af45 --- /dev/null +++ b/.changes/2.24.6.json @@ -0,0 +1,48 @@ +{ + "version": "2.24.6", + "date": "2024-02-19", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate." + }, + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Documentation updates for the AWS Config CLI" + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "contributor": "", + "description": "MediaTailor: marking #AdBreak.OffsetMillis as required." + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "contributor": "", + "description": "Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary." + }, + { + "type": "feature", + "category": "Amazon Keyspaces", + "contributor": "", + "description": "Documentation updates for Amazon Keyspaces" + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Add support for pause/resume upload for TransferManager with Java-based S3Client that has multipart enabled" + }, + { + "type": "feature", + "category": "chatbot", + "contributor": "", + "description": "This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.7.json b/.changes/2.24.7.json new file mode 100644 index 000000000000..c039dbcc2fff --- /dev/null +++ b/.changes/2.24.7.json @@ -0,0 +1,24 @@ +{ + "version": "2.24.7", + "date": "2024-02-20", + "entries": [ + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Add .NET 8 (dotnet8) Runtime support to AWS Lambda." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Publishing quick fix for doc only update." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "contributor": "", + "description": "This release updates a few Firehose related APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.8.json b/.changes/2.24.8.json new file mode 100644 index 000000000000..b1fbf8334e5d --- /dev/null +++ b/.changes/2.24.8.json @@ -0,0 +1,36 @@ +{ + "version": "2.24.8", + "date": "2024-02-21", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add content-length header in Json and Xml Protocol Marshaller for String and Binary explicit Payloads." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "MediaLive now supports the ability to restart pipelines in a running channel." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "contributor": "", + "description": "Increase the maximum length of descriptions for Inputs, Detector Models, and Alarm Models" + }, + { + "type": "feature", + "category": "Amazon Lookout for Equipment", + "contributor": "", + "description": "This release adds a field exposing model quality to read APIs for models. It also adds a model quality field to the API response when creating an inference scheduler." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "This release adds support for sharing Systems Manager parameters with other AWS accounts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.24.9.json b/.changes/2.24.9.json new file mode 100644 index 000000000000..329912dd615e --- /dev/null +++ b/.changes/2.24.9.json @@ -0,0 +1,24 @@ +{ + "version": "2.24.9", + "date": "2024-02-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS CRT-based S3 client", + "contributor": "", + "description": "Fixed memory leak issue when a request was cancelled in the AWS CRT-based S3 client." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Internet Monitor", + "contributor": "", + "description": "This release adds IPv4 prefixes to health events" + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams", + "contributor": "", + "description": "Increasing NextToken parameter length restriction for List APIs from 512 to 1024." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSSDKforJavav2-787c575.json b/.changes/next-release/feature-AWSSDKforJavav2-787c575.json new file mode 100644 index 000000000000..840c545b4531 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-787c575.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "S3 Transfer Manager", + "contributor": "", + "description": "Make Transfer Manager work by default with S3AsyncClient when multipart configuration is enabled." +} diff --git a/.changes/next-release/feature-S3TransferManager-2e987ba.json b/.changes/next-release/feature-S3TransferManager-2e987ba.json new file mode 100644 index 000000000000..72f2cadc85c3 --- /dev/null +++ b/.changes/next-release/feature-S3TransferManager-2e987ba.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "S3 Transfer Manager", + "contributor": "", + "description": "Enable multipart configuration by default when creating a new S3TranferManager instance using the .create() method" +} diff --git a/.github/workflows/codebuild-ci.yml b/.github/workflows/codebuild-ci.yml index 13089a44cede..44f75f5f1698 100644 --- a/.github/workflows/codebuild-ci.yml +++ b/.github/workflows/codebuild-ci.yml @@ -1,10 +1,6 @@ name: AWS CodeBuild CI on: pull_request: - paths-ignore: - - '**.md' - - '.all-contributorsrc' - - 'docs/**' push: branches: - master @@ -133,4 +129,4 @@ jobs: - name: Run endpoints test uses: aws-actions/aws-codebuild-run-build@v1 with: - project-name: aws-sdk-java-v2-endpoints-test \ No newline at end of file + project-name: aws-sdk-java-v2-endpoints-test diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a78d7a27527..f3188a6da72d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,513 +1,270 @@ #### 👋 _Looking for changelogs for older versions? You can find them in the [changelogs](./changelogs) directory._ -# __2.23.20__ __2024-02-07__ -## __AWS CRT Sync HTTP Client__ +# __2.24.9__ __2024-02-22__ +## __AWS CRT-based S3 client__ - ### Bugfixes - - Fixed an issue where `CancellationException` was thrown incorrectly from AWS CRT Sync HTTP client when execution time exceeded the total configured API call attempt timeout or API call timeout. Now it throws `ApiCallAttemptTimeoutException`/`ApiCallTimeoutException` accordingly. See [#4820](https://github.com/aws/aws-sdk-java-v2/issues/4820) + - Fixed memory leak issue when a request was cancelled in the AWS CRT-based S3 client. -## __AWS CRT-based S3 Client__ +## __Amazon CloudWatch Internet Monitor__ - ### Features - - Exposes a setting to set the memory limit when making asynchronous calls with the CRT-based S3 client + - This release adds IPv4 prefixes to health events -## __AWS DataSync__ +## __Amazon Kinesis Video Streams__ - ### Features - - AWS DataSync now supports manifests for specifying files or objects to transfer. + - Increasing NextToken parameter length restriction for List APIs from 512 to 1024. -## __AWS SDK for Java v2__ - - ### Features - - Bump `aws-crt` version to `0.29.9` - - Updated endpoint and partition metadata. - -## __Amazon CloudFront KeyValueStore__ - - ### Features - - Switching CloudFront KeyValueStore onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. - -## __Amazon Lex Model Building V2__ +# __2.24.8__ __2024-02-21__ +## __AWS Elemental MediaLive__ - ### Features - - This release introduces a new bot replication feature as part of Lex Global Resiliency offering. This feature leverages a new set of APIs that allow customers to create bot replicas and replicate changes to bots across regions. + - MediaLive now supports the ability to restart pipelines in a running channel. -## __Amazon Redshift__ +## __AWS IoT Events__ - ### Features - - LisRecommendations API to fetch Amazon Redshift Advisor recommendations. + - Increase the maximum length of descriptions for Inputs, Detector Models, and Alarm Models -# __2.23.19__ __2024-02-06__ -## __AWS AppSync__ - - ### Features - - Support for environment variables in AppSync GraphQL APIs - -## __AWS WAFV2__ - - ### Features - - You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API. - -## __Amazon CloudWatch Logs__ - - ### Features - - This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action. - -## __Amazon EC2 Container Service__ - - ### Features - - This release is a documentation only update to address customer issues. - -## __Amazon Elasticsearch Service__ - - ### Features - - This release adds clear visibility to the customers on the changes that they make on the domain. - -## __Amazon OpenSearch Service__ - - ### Features - - This release adds clear visibility to the customers on the changes that they make on the domain. - -# __2.23.18__ __2024-02-05__ -## __AWS Glue__ - - ### Features - - Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections +## __AWS SDK for Java v2__ + - ### Bugfixes + - Add content-length header in Json and Xml Protocol Marshaller for String and Binary explicit Payloads. -## __Amazon WorkSpaces__ +## __Amazon Lookout for Equipment__ - ### Features - - Added definitions of various WorkSpace states + - This release adds a field exposing model quality to read APIs for models. It also adds a model quality field to the API response when creating an inference scheduler. -# __2.23.17__ __2024-02-02__ -## __AWS CRT-based S3 Client__ +## __Amazon Simple Systems Manager (SSM)__ - ### Features - - Allow users to configure future completion executor on the AWS CRT-based S3 client via `S3CrtAsyncClientBuilder#futureCompletionExecutor`. See [#4879](https://github.com/aws/aws-sdk-java-v2/issues/4879) + - This release adds support for sharing Systems Manager parameters with other AWS accounts. -## __AWS SDK for Java v2__ +# __2.24.7__ __2024-02-20__ +## __AWS Lambda__ - ### Features - - Adds setting to disable making EC2 Instance Metadata Service (IMDS) calls without a token header when prefetching a token does not work. This feature can be configured through environment variables (AWS_EC2_METADATA_V1_DISABLED), system property (aws.disableEc2MetadataV1) or AWS config file (ec2_metadata_v1_disabled). When you configure this setting to true, no calls without token headers will be made to IMDS. - - Updated endpoint and partition metadata. + - Add .NET 8 (dotnet8) Runtime support to AWS Lambda. ## __Amazon DynamoDB__ - ### Features - - Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. + - Publishing quick fix for doc only update. -## __Amazon SageMaker Service__ +## __Amazon Kinesis Firehose__ - ### Features - - Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings. + - This release updates a few Firehose related APIs. -# __2.23.16__ __2024-02-01__ -## __AWS Elemental MediaConvert__ +# __2.24.6__ __2024-02-19__ +## __AWS Amplify__ - ### Features - - This release includes support for broadcast-mixed audio description tracks. + - This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate. -## __AWS SDK for Java v2__ +## __AWS Config__ - ### Features - - Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. For a list of individual services affected, please check the committed files. - - Updated endpoint and partition metadata. + - Documentation updates for the AWS Config CLI -## __Amazon Cognito Identity Provider__ +## __AWS MediaTailor__ - ### Features - - Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features + - MediaTailor: marking #AdBreak.OffsetMillis as required. ## __Amazon Interactive Video Service__ - ### Features - - This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List. - -## __Amazon Managed Blockchain Query__ - - ### Features - - This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction. + - Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary. -## __Amazon Neptune Graph__ - - ### Features - - Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary. - -# __2.23.15__ __2024-01-31__ -## __AWS CloudFormation__ - - ### Features - - CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack. - -## __AWS Glue__ - - ### Features - - Update page size limits for GetJobRuns and GetTriggers APIs. - -## __AWS SDK for Java v2__ - - ### Features - - Reduce how many times input data is copied when writing to chunked encoded operations, like S3's PutObject. - - Updated endpoint and partition metadata. - -## __Amazon Simple Systems Manager (SSM)__ - - ### Features - - This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled. - -## __Elastic Load Balancing__ +## __Amazon Keyspaces__ - ### Features - - This release enables unhealthy target draining intervals for Network Load Balancers. + - Documentation updates for Amazon Keyspaces -# __2.23.14__ __2024-01-30__ -## __Amazon DataZone__ +## __Amazon S3__ - ### Features - - Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects + - Add support for pause/resume upload for TransferManager with Java-based S3Client that has multipart enabled -## __Amazon Route 53__ +## __chatbot__ - ### Features - - Update the SDKs for text changes in the APIs. + - This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK. -## __Amazon S3__ +# __2.24.5__ __2024-02-16__ +## __AWS Lambda__ - ### Features - - Reduce memory usage when request-level plugins aren't used. + - Documentation-only updates for Lambda to clarify a number of existing actions and properties. -# __2.23.13__ __2024-01-29__ ## __AWS SDK for Java v2__ - ### Features - Updated endpoint and partition metadata. -## __Amazon Comprehend__ +## __Amazon Connect Participant Service__ - ### Features - - Comprehend PII analysis now supports Spanish input documents. + - Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript. -## __Amazon Elastic Compute Cloud__ +## __Amazon EMR__ - ### Features - - EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. + - adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce -## __Amazon Import/Export Snowball__ +## __Amazon Kinesis Firehose__ - ### Features - - Modified description of createaddress to include direction to add path when providing a JSON file. + - This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations. ## __Amazon Relational Database Service__ - ### Features - - Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience. + - Doc only update for a valid option in DB parameter group -## __Amazon Simple Storage Service__ - - ### Bugfixes - - S3 client configured with crossRegionEnabled(true) will now use us-east-1 regional endpoint instead of the global endpoint. See [#4720](https://github.com/aws/aws-sdk-java-v2/issues/4720). - -## __AmazonMWAA__ +## __Amazon Simple Notification Service__ - ### Features - - This release adds MAINTENANCE environment status for Amazon MWAA environments. + - This release marks phone numbers as sensitive inputs. -## __Auto Scaling__ +# __2.24.4__ __2024-02-15__ +## __AWS Artifact__ - ### Features - - EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. + - This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings. -# __2.23.12__ __2024-01-26__ -## __AWS SDK for Java v2__ +## __AWS CodePipeline__ - ### Features - - Improved performance of chunk-encoded streaming uploads, like S3's PutObject. - - - ### Bugfixes - - Fixed bug where the ProfileCredentialsProvider would re-read the credentials file with each request by default. + - Add ability to override timeout on action level. -## __Amazon Connect Service__ - - ### Features - - Update list and string length limits for predefined attributes. - -## __Amazon SageMaker Service__ - - ### Features - - Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs. - -## __Inspector2__ - - ### Features - - This release adds ECR container image scanning based on their lastRecordedPullTime. - -# __2.23.11__ __2024-01-25__ -## __AWS Certificate Manager Private Certificate Authority__ - - ### Features - - AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled. - -## __Amazon Lightsail__ - - ### Features - - This release adds support for IPv6-only instance plans. - -# __2.23.10__ __2024-01-24__ -## __AWS Outposts__ - - ### Features - - DeviceSerialNumber parameter is now optional in StartConnection API - -## __AWS Storage Gateway__ - - ### Features - - Add DeprecationDate and SoftwareVersion to response of ListGateways. - -## __Amazon EC2 Container Service__ - - ### Features - - Documentation updates for Amazon ECS. - -## __Amazon Elastic Compute Cloud__ - - ### Features - - Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs. - -## __Amazon Relational Database Service__ - - ### Features - - This release adds support for Aurora Limitless Database. - -# __2.23.9__ __2024-01-23__ -## __Inspector2__ - - ### Features - - This release adds support for CIS scans on EC2 instances. - -# __2.23.8__ __2024-01-22__ -## __AWS AppConfig Data__ - - ### Features - - Fix FIPS Endpoints in aws-us-gov. - -## __AWS CRT HTTP Client__ - - ### Bugfixes - - Fixed a thread safety issue that could cause application to crash in the edge case where the SDK attempted to invoke `incrementWindow` after the stream is closed in AWS CRT HTTP Client. - -## __AWS Cloud9__ +## __AWS SDK for Java v2__ - ### Features - - Doc-only update around removing AL1 from list of available AMIs for Cloud9 + - Updated endpoint and partition metadata. -## __AWS Organizations__ +## __AWS Secrets Manager__ - ### Features - - Doc only update for quota increase change + - Doc only update for Secrets Manager -## __Amazon CloudFront KeyValueStore__ +## __Amazon Detective__ - ### Features - - This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore. + - Doc only updates for content enhancement -## __Amazon Connect Cases__ +## __Amazon GuardDuty__ - ### Features - - This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's. + - Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive. -## __Amazon EC2 Container Service__ +## __Amazon HealthLake__ - ### Features - - This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services. + - This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server. -## __Amazon Elastic Compute Cloud__ +## __Amazon OpenSearch Service__ - ### Features - - Documentation updates for Amazon EC2. + - Adds additional supported instance types. -## __Amazon Relational Database Service__ +## __Amazon Polly__ - ### Features - - Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity. + - Amazon Polly adds 1 new voice - Burcu (tr-TR) -## __FinSpace User Environment Management service__ +## __Amazon SageMaker Service__ - ### Features - - Allow customer to set zip default through command line arguments. + - This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares. -# __2.23.7__ __2024-01-19__ -## __AWS CodeBuild__ +# __2.24.3__ __2024-02-14__ +## __AWS Control Tower__ - ### Features - - Release CodeBuild Reserved Capacity feature + - Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance. ## __AWS SDK for Java v2__ - ### Features - - Allowing SDK plugins to read and modify S3's crossRegionEnabled and SQS's checksumValidationEnabled - - Contributed by: [@anirudh9391](https://github.com/anirudh9391) + - Switching half of the AWS service clients onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. For a list of individual services affected, please check the committed files. - Updated endpoint and partition metadata. -## __Amazon Athena__ - - ### Features - - Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown. - -## __Amazon DynamoDB__ - - ### Features - - This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. - -## __Amazon Q Connect__ - - ### Features - - Increased Quick Response name max length to 100 - -## __Contributors__ -Special thanks to the following contributors to this release: - -[@anirudh9391](https://github.com/anirudh9391) -# __2.23.6__ __2024-01-18__ -## __AWS B2B Data Interchange__ - - ### Features - - Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs. - -## __AWS CRT HTTP Client__ - ### Bugfixes - - Fixed the issue in the AWS CRT sync HTTP client where the connection was left open after the stream was aborted. - -## __AWS CloudTrail__ - - ### Features - - This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights. - -## __AWS SDK for Java v2__ - - ### Features - - Updated endpoint and partition metadata. + - Fixed an issue where NPE would be thrown if there was an empty event in the input for an event streaming operation. -## __Amazon Connect Service__ +## __Amazon Lookout for Equipment__ - ### Features - - GetMetricDataV2 now supports 3 groupings + - This feature allows customers to see pointwise model diagnostics results for their models. -## __Amazon Kinesis Firehose__ - - ### Features - - Allow support for Snowflake as a Kinesis Data Firehose delivery destination. - -## __Amazon SageMaker Feature Store Runtime__ - - ### Features - - Increase BatchGetRecord limits from 10 items to 100 items - -## __Elastic Disaster Recovery Service__ - - ### Features - - Removed invalid and unnecessary default values. - -# __2.23.5__ __2024-01-17__ -## __AWS Backup Storage, Amazon CodeCatalyst, Amazon Cognito Identity, Amazon Cognito Identity Provider, AWS Identity and Access Management (IAM), Amazon Kinesis, AWS Elemental MediaStore Data Plane, Amazon Transcribe Service, Amazon Transcribe Streaming Service__ - - ### Features - - Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. - -## __AWS CRT HTTP Client__ +## __Amazon Simple Storage Service__ - ### Bugfixes - - Fixed the issue in the AWS CRT HTTP client where the application could crash if stream.incrementWindow was invoked on a closed stream - -## __AWS DynamoDB Enhanced Client__ - - ### Features - - Added support for `@DynamoDBAutoGeneratedUUID` to facilitate the automatic updating of DynamoDB attributes with random UUID. - -## __Amazon DynamoDB__ - - ### Features - - Updating note for enabling streams for UpdateTable. + - Fix for Issue [#4912](https://github.com/aws/aws-sdk-java-v2/issues/4912) where client region with AWS_GLOBAL calls failed for cross region access. -## __Amazon Keyspaces__ +## __QBusiness__ - ### Features - - This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs + - This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API. -# __2.23.4__ __2024-01-16__ -## __AWS IoT__ +# __2.24.2__ __2024-02-13__ +## __AWS Marketplace Catalog Service__ - ### Features - - Revert release of LogTargetTypes + - AWS Marketplace Catalog API now supports setting intent on requests -## __AWS IoT FleetWise__ +## __AWS Resource Explorer__ - ### Features - - Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field. + - Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default. ## __AWS SDK for Java v2__ - ### Features - Updated endpoint and partition metadata. -## __AWS SecurityHub__ - - ### Features - - Documentation updates for AWS Security Hub - -## __Amazon Macie 2__ - - ### Features - - This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects. - -## __Amazon Personalize__ - - ### Features - - Documentation updates for Amazon Personalize. - -## __Amazon Personalize Runtime__ +## __Amazon DynamoDB__ - ### Features - - Documentation updates for Amazon Personalize + - Add additional logical operator ('and' and 'or') methods to DynamoDB Expression + - Contributed by: [@akiesler](https://github.com/akiesler) -## __Amazon Rekognition__ +## __Amazon Lightsail__ - ### Features - - This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses. + - This release adds support to upgrade the major version of a database. ## __Amazon S3__ - ### Features - - Propagating client apiCallTimeout values to S3Express createSession calls. If existing, this value overrides the default timeout value of 10s when making the nested S3Express session credentials call. - -## __Payment Cryptography Control Plane__ - - ### Features - - Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations. - -# __2.23.3__ __2024-01-13__ -## __Amazon SageMaker Service__ - - ### Features - - This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts. - -# __2.23.2__ __2024-01-12__ -## __AWS S3 Control__ - - ### Features - - S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls. + - Automatically trim object metadata keys of whitespace for `PutObject` and `CreateMultipartUpload`. -## __AWS SDK for Java v2__ +## __Amazon Security Lake__ - ### Features - - Updated endpoint and partition metadata. + - Documentation updates for Security Lake +## __URL Connection Client__ - ### Bugfixes - - Fix shading of artifacts in the `bundle` by not `org.apache.log4j.*` packages. This allows proper binding of `commons-logging` to Log4J and enables dependencies that use commons logging (e.g. Apache HTTP Client) to properly bind to Log4j. + - Fix a bug where headers with multiple values don't have all values for that header sent on the wire. This leads to signature mismatch exceptions. -## __AWS Supply Chain__ - - ### Features - - This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob. + Fixes [#4746](https://github.com/aws/aws-sdk-java-v2/issues/4746). + +## __Contributors__ +Special thanks to the following contributors to this release: -## __AWS Transfer Family__ +[@akiesler](https://github.com/akiesler) +# __2.24.1__ __2024-02-12__ +## __AWS AppSync__ - ### Features - - AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers. + - Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs. -## __Amazon Connect Participant Service__ +## __Amazon CloudWatch__ - ### Features - - Introduce new Supervisor participant role + - This release enables PutMetricData API request payload compression by default. -## __Amazon Connect Service__ +## __Amazon Neptune Graph__ - ### Features - - Supervisor Barge for Chat is now supported through the MonitorContact API. + - Adding a new option "parameters" for data plane api ExecuteQuery to support running parameterized query via SDK. -## __Amazon Location Service__ +## __Amazon Route 53 Domains__ - ### Features - - Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type. + - This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API. -## __AmazonMWAA__ +# __2.24.0__ __2024-02-09__ +## __AWS Batch__ - ### Features - - This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters. + - This feature allows Batch to support configuration of repository credentials for jobs running on ECS -# __2.23.1__ __2024-01-11__ ## __AWS IoT__ - ### Features - - Add ConflictException to Update APIs of AWS IoT Software Package Catalog + - This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain. -## __AWS IoT FleetWise__ +## __AWS Price List Service__ - ### Features - - The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType + - Add Throttling Exception to all APIs. ## __AWS SDK for Java v2__ - ### Features - Updated endpoint and partition metadata. - - - ### Bugfixes - - Fix IllegalArgumentException in FullJitterBackoffStrategy when base delay and max backoff time are zero. - -## __AWS Secrets Manager__ - - ### Features - - Doc only update for Secrets Manager + - Updated internal core logic for signing properties with non-default values to be codegen based instead of set at runtime. ## __Amazon EC2 Container Service__ - ### Features - - This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. - -## __Amazon Elastic Compute Cloud__ - - ### Features - - This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. - -## __Amazon EventBridge__ - - ### Features - - Adding AppSync as an EventBridge Target - -## __Amazon WorkSpaces__ - - ### Features - - Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update - -# __2.23.0__ __2024-01-10__ -## __AWS CRT HTTP Client__ - - ### Bugfixes - - Fixed the issue where `AWS_ERROR_HTTP_CONNECTION_CLOSED` was not retried by the SDK. - -## __AWS SDK for Java v2__ - - ### Features - - Updated endpoint and partition metadata. - -## __Amazon CloudWatch Logs__ - - ### Features - - Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional "selectionCriteria" parameter for resource selection. + - Documentation only update for Amazon ECS. -## __Amazon Connect Wisdom Service__ +## __Amazon Prometheus Service__ - ### Features - - QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. + - Overall documentation updates. -## __Amazon Location Service__ - - ### Features - - This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap. - -## __Amazon Q Connect__ - - ### Features - - QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. - -## __Amazon Route 53__ +## __Amazon S3__ - ### Features - - Route53 now supports geoproximity routing in AWS regions + - Overriding signer properties for S3 through the deprecated non-public execution attributes in S3SignerExecutionAttribute no longer works with this release. The recommended approach is to use plugins in order to change these settings. -## __Amazon S3__ - ### Bugfixes - - Fixes a bug in DeleteObjects to properly encode the key in the request. + - Fix bug where PUT fails when using SSE-C with Checksum when using S3AsyncClient with multipart enabled. Enable CRC32 for putObject when using multipart client if checksum validation is not disabled and checksum is not set by user -## __AmazonConnectCampaignService__ +## __Braket__ - ### Features - - Minor pattern updates for Campaign and Dial Request API fields. + - Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device. -## __Redshift Serverless__ +## __Cost Optimization Hub__ - ### Features - - Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl + - Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API. diff --git a/README.md b/README.md index 48edb2281f46..74da76c4de48 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-99-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-106-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.23.20 + 2.24.9 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.23.20 + 2.24.9 software.amazon.awssdk s3 - 2.23.20 + 2.24.9 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.23.20 + 2.24.9 ``` @@ -319,6 +319,15 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Michael Dimchuk
Michael Dimchuk

💻 + Nikita Sokolov
Nikita Sokolov

💻 + Anirudh
Anirudh

💻 + Manuel Sugawara
Manuel Sugawara

💻 + Hayden Baker
Hayden Baker

💻 + Jaykumar Gosar
Jaykumar Gosar

💻 + Michael Graeb
Michael Graeb

💻 + + + Michael Grundie
Michael Grundie

💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 318add245879..11f5e9515d8d 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index cda189bf07f1..9a8f18979371 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index 2501196d933d..f3dcadd6ee8c 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 34c705134ca7..aaf82a78fd20 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 9031355fe360..52b2e388f8b3 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../pom.xml aws-sdk-java @@ -1923,6 +1923,16 @@ Amazon AutoScaling, etc). supplychain ${awsjavasdk.version} + + software.amazon.awssdk + artifact + ${awsjavasdk.version} + + + software.amazon.awssdk + chatbot + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index a4838c688a72..fc642c695d58 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index 98f46b7202f0..206dedad342c 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../pom.xml bom @@ -2103,6 +2103,16 @@ supplychain ${awsjavasdk.version} + + software.amazon.awssdk + artifact + ${awsjavasdk.version} + + + software.amazon.awssdk + chatbot + ${awsjavasdk.version} + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml index 7d0333b86eac..17c617c0d12b 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml @@ -53,4 +53,15 @@ + + + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml index e06660009921..532429b1cfd2 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml @@ -359,6 +359,14 @@ + + + + + + + + diff --git a/buildspecs/release-javadoc.yml b/buildspecs/release-javadoc.yml index a5dc4b3157d4..62132efd1cca 100644 --- a/buildspecs/release-javadoc.yml +++ b/buildspecs/release-javadoc.yml @@ -18,7 +18,7 @@ phases: commands: - python ./scripts/doc_crosslinks/generate_cross_link_data.py --apiDefinitionsBasePath ./services/ --apiDefinitionsRelativeFilePath src/main/resources/codegen-resources/service-2.json --templateFilePath ./scripts/doc_crosslinks/crosslink_redirect.html --outputFilePath ./scripts/crosslink_redirect.html - mvn install -P quick -T1C - - mvn clean install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:s3-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting,!:sdk-native-image-test,!:ruleset-testing-core,!:old-client-version-compatibility-test' + - mvn clean install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:s3-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting,!:sdk-native-image-test,!:ruleset-testing-core,!:old-client-version-compatibility-test,!:crt-unavailable-tests' - RELEASE_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - - aws s3 sync target/site/apidocs/ $DOC_PATH/$RELEASE_VERSION/ --acl="public-read" diff --git a/buildspecs/release-to-github.yml b/buildspecs/release-to-github.yml index 36cbaaebe466..f0b04621129a 100644 --- a/buildspecs/release-to-github.yml +++ b/buildspecs/release-to-github.yml @@ -24,6 +24,7 @@ phases: - | if [ `git ls-remote --tags public | grep refs/tags/$RELEASE_VERSION | wc -l` -ge "1" ]; then git push public :refs/tags/$RELEASE_VERSION + git tag --delete $RELEASE_VERSION fi - git tag -a $RELEASE_VERSION -m "AWS SDK for Java v2 ($RELEASE_VERSION)" - diff --git a/buildspecs/release-to-maven.yml b/buildspecs/release-to-maven.yml index 66a9fcde6525..061b1737de35 100644 --- a/buildspecs/release-to-maven.yml +++ b/buildspecs/release-to-maven.yml @@ -34,7 +34,7 @@ phases: awk 'BEGIN { var=ENVIRON["SDK_SIGNING_GPG_KEYNAME"] } { gsub("\\$SDK_SIGNING_GPG_KEYNAME", var, $0); print }' > \ $SETTINGS_XML - mvn clean deploy -B -s $SETTINGS_XML -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests,!:sdk-native-image-test,!:auth-tests,!:s3-benchmarks,!:region-testing,!:old-client-version-compatibility-test -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true + mvn clean deploy -B -s $SETTINGS_XML -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests,!:sdk-native-image-test,!:auth-tests,!:s3-benchmarks,!:region-testing,!:old-client-version-compatibility-test,!:crt-unavailable-tests -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true else echo "This version was already released." fi diff --git a/bundle-logging-bridge/pom.xml b/bundle-logging-bridge/pom.xml index 878d647f3fbc..c11ea5e3a3ad 100644 --- a/bundle-logging-bridge/pom.xml +++ b/bundle-logging-bridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bundle-logging-bridge jar diff --git a/bundle-sdk/pom.xml b/bundle-sdk/pom.xml index afec05ab42ed..bc10495df65a 100644 --- a/bundle-sdk/pom.xml +++ b/bundle-sdk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bundle-sdk jar diff --git a/bundle/pom.xml b/bundle/pom.xml index 161864a7f212..3e22aba47bff 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bundle jar diff --git a/changelogs/2.23.x-CHANGELOG.md b/changelogs/2.23.x-CHANGELOG.md new file mode 100644 index 000000000000..b86babc7694b --- /dev/null +++ b/changelogs/2.23.x-CHANGELOG.md @@ -0,0 +1,525 @@ +# __2.23.21__ __2024-02-08__ +## __AWS CodePipeline__ + - ### Features + - Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths. + +## __Amazon QuickSight__ + - ### Features + - General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update + +## __Amazon WorkSpaces__ + - ### Features + - This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter "WorkspaceName". + +# __2.23.20__ __2024-02-07__ +## __AWS CRT Sync HTTP Client__ + - ### Bugfixes + - Fixed an issue where `CancellationException` was thrown incorrectly from AWS CRT Sync HTTP client when execution time exceeded the total configured API call attempt timeout or API call timeout. Now it throws `ApiCallAttemptTimeoutException`/`ApiCallTimeoutException` accordingly. See [#4820](https://github.com/aws/aws-sdk-java-v2/issues/4820) + +## __AWS CRT-based S3 Client__ + - ### Features + - Exposes a setting to set the memory limit when making asynchronous calls with the CRT-based S3 client + +## __AWS DataSync__ + - ### Features + - AWS DataSync now supports manifests for specifying files or objects to transfer. + +## __AWS SDK for Java v2__ + - ### Features + - Bump `aws-crt` version to `0.29.9` + - Updated endpoint and partition metadata. + +## __Amazon CloudFront KeyValueStore__ + - ### Features + - Switching CloudFront KeyValueStore onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. + +## __Amazon Lex Model Building V2__ + - ### Features + - This release introduces a new bot replication feature as part of Lex Global Resiliency offering. This feature leverages a new set of APIs that allow customers to create bot replicas and replicate changes to bots across regions. + +## __Amazon Redshift__ + - ### Features + - LisRecommendations API to fetch Amazon Redshift Advisor recommendations. + +# __2.23.19__ __2024-02-06__ +## __AWS AppSync__ + - ### Features + - Support for environment variables in AppSync GraphQL APIs + +## __AWS WAFV2__ + - ### Features + - You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API. + +## __Amazon CloudWatch Logs__ + - ### Features + - This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action. + +## __Amazon EC2 Container Service__ + - ### Features + - This release is a documentation only update to address customer issues. + +## __Amazon Elasticsearch Service__ + - ### Features + - This release adds clear visibility to the customers on the changes that they make on the domain. + +## __Amazon OpenSearch Service__ + - ### Features + - This release adds clear visibility to the customers on the changes that they make on the domain. + +# __2.23.18__ __2024-02-05__ +## __AWS Glue__ + - ### Features + - Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections + +## __Amazon WorkSpaces__ + - ### Features + - Added definitions of various WorkSpace states + +# __2.23.17__ __2024-02-02__ +## __AWS CRT-based S3 Client__ + - ### Features + - Allow users to configure future completion executor on the AWS CRT-based S3 client via `S3CrtAsyncClientBuilder#futureCompletionExecutor`. See [#4879](https://github.com/aws/aws-sdk-java-v2/issues/4879) + +## __AWS SDK for Java v2__ + - ### Features + - Adds setting to disable making EC2 Instance Metadata Service (IMDS) calls without a token header when prefetching a token does not work. This feature can be configured through environment variables (AWS_EC2_METADATA_V1_DISABLED), system property (aws.disableEc2MetadataV1) or AWS config file (ec2_metadata_v1_disabled). When you configure this setting to true, no calls without token headers will be made to IMDS. + - Updated endpoint and partition metadata. + +## __Amazon DynamoDB__ + - ### Features + - Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings. + +# __2.23.16__ __2024-02-01__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes support for broadcast-mixed audio description tracks. + +## __AWS SDK for Java v2__ + - ### Features + - Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. For a list of individual services affected, please check the committed files. + - Updated endpoint and partition metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features + +## __Amazon Interactive Video Service__ + - ### Features + - This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List. + +## __Amazon Managed Blockchain Query__ + - ### Features + - This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction. + +## __Amazon Neptune Graph__ + - ### Features + - Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary. + +# __2.23.15__ __2024-01-31__ +## __AWS CloudFormation__ + - ### Features + - CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack. + +## __AWS Glue__ + - ### Features + - Update page size limits for GetJobRuns and GetTriggers APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Reduce how many times input data is copied when writing to chunked encoded operations, like S3's PutObject. + - Updated endpoint and partition metadata. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled. + +## __Elastic Load Balancing__ + - ### Features + - This release enables unhealthy target draining intervals for Network Load Balancers. + +# __2.23.14__ __2024-01-30__ +## __Amazon DataZone__ + - ### Features + - Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects + +## __Amazon Route 53__ + - ### Features + - Update the SDKs for text changes in the APIs. + +## __Amazon S3__ + - ### Features + - Reduce memory usage when request-level plugins aren't used. + +# __2.23.13__ __2024-01-29__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Comprehend__ + - ### Features + - Comprehend PII analysis now supports Spanish input documents. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. + +## __Amazon Import/Export Snowball__ + - ### Features + - Modified description of createaddress to include direction to add path when providing a JSON file. + +## __Amazon Relational Database Service__ + - ### Features + - Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience. + +## __Amazon Simple Storage Service__ + - ### Bugfixes + - S3 client configured with crossRegionEnabled(true) will now use us-east-1 regional endpoint instead of the global endpoint. See [#4720](https://github.com/aws/aws-sdk-java-v2/issues/4720). + +## __AmazonMWAA__ + - ### Features + - This release adds MAINTENANCE environment status for Amazon MWAA environments. + +## __Auto Scaling__ + - ### Features + - EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. + +# __2.23.12__ __2024-01-26__ +## __AWS SDK for Java v2__ + - ### Features + - Improved performance of chunk-encoded streaming uploads, like S3's PutObject. + + - ### Bugfixes + - Fixed bug where the ProfileCredentialsProvider would re-read the credentials file with each request by default. + +## __Amazon Connect Service__ + - ### Features + - Update list and string length limits for predefined attributes. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs. + +## __Inspector2__ + - ### Features + - This release adds ECR container image scanning based on their lastRecordedPullTime. + +# __2.23.11__ __2024-01-25__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for IPv6-only instance plans. + +# __2.23.10__ __2024-01-24__ +## __AWS Outposts__ + - ### Features + - DeviceSerialNumber parameter is now optional in StartConnection API + +## __AWS Storage Gateway__ + - ### Features + - Add DeprecationDate and SoftwareVersion to response of ListGateways. + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation updates for Amazon ECS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for Aurora Limitless Database. + +# __2.23.9__ __2024-01-23__ +## __Inspector2__ + - ### Features + - This release adds support for CIS scans on EC2 instances. + +# __2.23.8__ __2024-01-22__ +## __AWS AppConfig Data__ + - ### Features + - Fix FIPS Endpoints in aws-us-gov. + +## __AWS CRT HTTP Client__ + - ### Bugfixes + - Fixed a thread safety issue that could cause application to crash in the edge case where the SDK attempted to invoke `incrementWindow` after the stream is closed in AWS CRT HTTP Client. + +## __AWS Cloud9__ + - ### Features + - Doc-only update around removing AL1 from list of available AMIs for Cloud9 + +## __AWS Organizations__ + - ### Features + - Doc only update for quota increase change + +## __Amazon CloudFront KeyValueStore__ + - ### Features + - This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore. + +## __Amazon Connect Cases__ + - ### Features + - This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for Amazon EC2. + +## __Amazon Relational Database Service__ + - ### Features + - Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity. + +## __FinSpace User Environment Management service__ + - ### Features + - Allow customer to set zip default through command line arguments. + +# __2.23.7__ __2024-01-19__ +## __AWS CodeBuild__ + - ### Features + - Release CodeBuild Reserved Capacity feature + +## __AWS SDK for Java v2__ + - ### Features + - Allowing SDK plugins to read and modify S3's crossRegionEnabled and SQS's checksumValidationEnabled + - Contributed by: [@anirudh9391](https://github.com/anirudh9391) + - Updated endpoint and partition metadata. + +## __Amazon Athena__ + - ### Features + - Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown. + +## __Amazon DynamoDB__ + - ### Features + - This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. + +## __Amazon Q Connect__ + - ### Features + - Increased Quick Response name max length to 100 + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@anirudh9391](https://github.com/anirudh9391) +# __2.23.6__ __2024-01-18__ +## __AWS B2B Data Interchange__ + - ### Features + - Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs. + +## __AWS CRT HTTP Client__ + - ### Bugfixes + - Fixed the issue in the AWS CRT sync HTTP client where the connection was left open after the stream was aborted. + +## __AWS CloudTrail__ + - ### Features + - This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - GetMetricDataV2 now supports 3 groupings + +## __Amazon Kinesis Firehose__ + - ### Features + - Allow support for Snowflake as a Kinesis Data Firehose delivery destination. + +## __Amazon SageMaker Feature Store Runtime__ + - ### Features + - Increase BatchGetRecord limits from 10 items to 100 items + +## __Elastic Disaster Recovery Service__ + - ### Features + - Removed invalid and unnecessary default values. + +# __2.23.5__ __2024-01-17__ +## __AWS Backup Storage, Amazon CodeCatalyst, Amazon Cognito Identity, Amazon Cognito Identity Provider, AWS Identity and Access Management (IAM), Amazon Kinesis, AWS Elemental MediaStore Data Plane, Amazon Transcribe Service, Amazon Transcribe Streaming Service__ + - ### Features + - Switching a set of services onto the new SRA (Smithy Reference Architecture) identity and auth logic that was released in v2.21.0. + +## __AWS CRT HTTP Client__ + - ### Bugfixes + - Fixed the issue in the AWS CRT HTTP client where the application could crash if stream.incrementWindow was invoked on a closed stream + +## __AWS DynamoDB Enhanced Client__ + - ### Features + - Added support for `@DynamoDBAutoGeneratedUUID` to facilitate the automatic updating of DynamoDB attributes with random UUID. + +## __Amazon DynamoDB__ + - ### Features + - Updating note for enabling streams for UpdateTable. + +## __Amazon Keyspaces__ + - ### Features + - This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs + +# __2.23.4__ __2024-01-16__ +## __AWS IoT__ + - ### Features + - Revert release of LogTargetTypes + +## __AWS IoT FleetWise__ + - ### Features + - Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Documentation updates for AWS Security Hub + +## __Amazon Macie 2__ + - ### Features + - This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects. + +## __Amazon Personalize__ + - ### Features + - Documentation updates for Amazon Personalize. + +## __Amazon Personalize Runtime__ + - ### Features + - Documentation updates for Amazon Personalize + +## __Amazon Rekognition__ + - ### Features + - This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses. + +## __Amazon S3__ + - ### Features + - Propagating client apiCallTimeout values to S3Express createSession calls. If existing, this value overrides the default timeout value of 10s when making the nested S3Express session credentials call. + +## __Payment Cryptography Control Plane__ + - ### Features + - Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations. + +# __2.23.3__ __2024-01-13__ +## __Amazon SageMaker Service__ + - ### Features + - This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts. + +# __2.23.2__ __2024-01-12__ +## __AWS S3 Control__ + - ### Features + - S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix shading of artifacts in the `bundle` by not `org.apache.log4j.*` packages. This allows proper binding of `commons-logging` to Log4J and enables dependencies that use commons logging (e.g. Apache HTTP Client) to properly bind to Log4j. + +## __AWS Supply Chain__ + - ### Features + - This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob. + +## __AWS Transfer Family__ + - ### Features + - AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers. + +## __Amazon Connect Participant Service__ + - ### Features + - Introduce new Supervisor participant role + +## __Amazon Connect Service__ + - ### Features + - Supervisor Barge for Chat is now supported through the MonitorContact API. + +## __Amazon Location Service__ + - ### Features + - Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type. + +## __AmazonMWAA__ + - ### Features + - This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters. + +# __2.23.1__ __2024-01-11__ +## __AWS IoT__ + - ### Features + - Add ConflictException to Update APIs of AWS IoT Software Package Catalog + +## __AWS IoT FleetWise__ + - ### Features + - The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix IllegalArgumentException in FullJitterBackoffStrategy when base delay and max backoff time are zero. + +## __AWS Secrets Manager__ + - ### Features + - Doc only update for Secrets Manager + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. + +## __Amazon EventBridge__ + - ### Features + - Adding AppSync as an EventBridge Target + +## __Amazon WorkSpaces__ + - ### Features + - Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update + +# __2.23.0__ __2024-01-10__ +## __AWS CRT HTTP Client__ + - ### Bugfixes + - Fixed the issue where `AWS_ERROR_HTTP_CONNECTION_CLOSED` was not retried by the SDK. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch Logs__ + - ### Features + - Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional "selectionCriteria" parameter for resource selection. + +## __Amazon Connect Wisdom Service__ + - ### Features + - QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. + +## __Amazon Location Service__ + - ### Features + - This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap. + +## __Amazon Q Connect__ + - ### Features + - QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. + +## __Amazon Route 53__ + - ### Features + - Route53 now supports geoproximity routing in AWS regions + +## __Amazon S3__ + - ### Bugfixes + - Fixes a bug in DeleteObjects to properly encode the key in the request. + +## __AmazonConnectCampaignService__ + - ### Features + - Minor pattern updates for Campaign and Dial Request API fields. + +## __Redshift Serverless__ + - ### Features + - Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl + diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index d6e727db4892..3a100a349f71 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 185b447cc3e6..55c2de9946c8 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 60334be30802..5d3146bef495 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index b82a84271d81..412b5b6f2caa 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/AuthType.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/AuthType.java index 1b1a73d5728f..b7cd768456c6 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/AuthType.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/AuthType.java @@ -36,6 +36,10 @@ public enum AuthType { this.value = value; } + public String value() { + return value; + } + public static AuthType fromValue(String value) { String normalizedValue = StringUtils.lowerCase(value); return Arrays.stream(values()) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenKnowledgeIndex.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenKnowledgeIndex.java new file mode 100644 index 000000000000..d8bf80517ca1 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenKnowledgeIndex.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; + +/** + * Knowledge index to get access to the configured service auth schemes and operations overrides. This index is optimized for + * code generation of switch statements therefore the data is grouped by operations that share the same auth schemes. + */ +public final class AuthSchemeCodegenKnowledgeIndex { + /** + * We delegate this value to {@link ModelAuthSchemeKnowledgeIndex#operationsToMetadata()}. We just wrap the results in an + * interface that easier to use for the layer that does the code generation. + */ + private final Map, List> operationsToAuthSchemes; + + private AuthSchemeCodegenKnowledgeIndex(IntermediateModel intermediateModel) { + this.operationsToAuthSchemes = ModelAuthSchemeKnowledgeIndex.of(intermediateModel).operationsToMetadata(); + } + + /** + * Creates a new {@link AuthSchemeCodegenKnowledgeIndex} using the given {@code intermediateModel}.. + */ + public static AuthSchemeCodegenKnowledgeIndex of(IntermediateModel intermediateModel) { + return new AuthSchemeCodegenKnowledgeIndex(intermediateModel); + } + + /** + * Returns the service defaults auth schemes. These can be overridden by operation. + * + * @return the service defaults auth schemes. + */ + public List serviceDefaultAuthSchemes() { + return operationsToAuthSchemes.get(Collections.emptyList()); + } + + /** + * Returns true if there are auth scheme overrides per operation. + * + * @return true if there are auth scheme overrides per operation + */ + public boolean hasPerOperationAuthSchemesOverrides() { + // The map at least contains one key-value pair (keyed with Collections.emptyList()). + // If we have more than that then we have at least one override. + return operationsToAuthSchemes.size() > 1; + } + + /** + * Traverses each group of operations with the same set of auth schemes. + * + * @param consumer The consumer to call for each group of operations with the same set of auth schemes. + */ + public void forEachOperationsOverridesGroup(BiConsumer, List> consumer) { + for (Map.Entry, List> kvp : operationsToAuthSchemes.entrySet()) { + if (kvp.getKey().isEmpty()) { + // We are traversing operation groups, ignore service wide defaults. + continue; + } + consumer.accept(kvp.getKey(), kvp.getValue()); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadata.java index 90149c984fa5..158e70dc0bba 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadata.java @@ -15,87 +15,18 @@ package software.amazon.awssdk.codegen.poet.auth.scheme; -import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.CodeBlock; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; -import software.amazon.awssdk.codegen.model.service.AuthType; -import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; -import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; -import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; -import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; +import java.util.function.Supplier; import software.amazon.awssdk.utils.Validate; +/** + * Represents a modeled auth scheme option. + */ public final class AuthSchemeCodegenMetadata { - - static final AuthSchemeCodegenMetadata SIGV4 = builder() - .schemeId(AwsV4AuthScheme.SCHEME_ID) - .authSchemeClass(AwsV4AuthScheme.class) - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("SERVICE_SIGNING_NAME") - .valueEmitter((spec, utils) -> spec.addCode("$S", utils.signingName())) - .build()) - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("REGION_NAME") - .valueEmitter((spec, utils) -> spec.addCode("$L", "params.region().id()")) - .build()) - .build(); - - static final AuthSchemeCodegenMetadata SIGV4_UNSIGNED_BODY = - SIGV4.toBuilder() - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("PAYLOAD_SIGNING_ENABLED") - .valueEmitter((spec, utils) -> spec.addCode("$L", false)) - .build()) - .build(); - - static final AuthSchemeCodegenMetadata S3 = - SIGV4.toBuilder() - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("DOUBLE_URL_ENCODE") - .valueEmitter((spec, utils) -> spec.addCode("$L", "false")) - .build()) - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("NORMALIZE_PATH") - .valueEmitter((spec, utils) -> spec.addCode("$L", "false")) - .build()) - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("PAYLOAD_SIGNING_ENABLED") - .valueEmitter((spec, utils) -> spec.addCode("$L", false)) - .build()) - .build(); - - static final AuthSchemeCodegenMetadata S3V4 = - SIGV4.toBuilder() - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("DOUBLE_URL_ENCODE") - .valueEmitter((spec, utils) -> spec.addCode("$L", "false")) - .build()) - .addProperty(SignerPropertyValueProvider.builder() - .containingClass(AwsV4HttpSigner.class) - .fieldName("NORMALIZE_PATH") - .valueEmitter((spec, utils) -> spec.addCode("$L", "false")) - .build()) - .build(); - - static final AuthSchemeCodegenMetadata BEARER = builder() - .schemeId(BearerAuthScheme.SCHEME_ID) - .authSchemeClass(BearerAuthScheme.class) - .build(); - - static final AuthSchemeCodegenMetadata NO_AUTH = builder() - .schemeId(NoAuthAuthScheme.SCHEME_ID) - .authSchemeClass(NoAuthAuthScheme.class) - .build(); - private final String schemeId; private final List properties; private final Class authSchemeClass; @@ -122,30 +53,11 @@ public Builder toBuilder() { return new Builder(this); } - private static Builder builder() { + public static Builder builder() { return new Builder(); } - public static AuthSchemeCodegenMetadata fromAuthType(AuthType type) { - switch (type) { - case BEARER: - return BEARER; - case NONE: - return NO_AUTH; - case V4: - return SIGV4; - case V4_UNSIGNED_BODY: - return SIGV4_UNSIGNED_BODY; - case S3: - return S3; - case S3V4: - return S3V4; - default: - throw new IllegalArgumentException("Unknown auth type: " + type); - } - } - - private static class Builder { + public static class Builder { private String schemeId; private List properties = new ArrayList<>(); private Class authSchemeClass; @@ -169,6 +81,12 @@ public Builder addProperty(SignerPropertyValueProvider property) { return this; } + public Builder properties(List properties) { + this.properties.clear(); + this.properties.addAll(properties); + return this; + } + public Builder authSchemeClass(Class authSchemeClass) { this.authSchemeClass = authSchemeClass; return this; @@ -182,12 +100,14 @@ public AuthSchemeCodegenMetadata build() { static class SignerPropertyValueProvider { private final Class containingClass; private final String fieldName; - private final BiConsumer valueEmitter; + private final BiConsumer valueEmitter; + private final Supplier valueSupplier; SignerPropertyValueProvider(Builder builder) { this.containingClass = Validate.paramNotNull(builder.containingClass, "containingClass"); this.valueEmitter = Validate.paramNotNull(builder.valueEmitter, "valueEmitter"); this.fieldName = Validate.paramNotNull(builder.fieldName, "fieldName"); + this.valueSupplier = builder.valueSupplier; } public Class containingClass() { @@ -198,18 +118,28 @@ public String fieldName() { return fieldName; } - public void emitValue(MethodSpec.Builder spec, AuthSchemeSpecUtils utils) { + public boolean isConstant() { + return valueSupplier != null; + } + + public Object value() { + return valueSupplier.get(); + } + + public void emitValue(CodeBlock.Builder spec, AuthSchemeSpecUtils utils) { valueEmitter.accept(spec, utils); } - private static Builder builder() { + + public static Builder builder() { return new Builder(); } static class Builder { private Class containingClass; private String fieldName; - private BiConsumer valueEmitter; + private BiConsumer valueEmitter; + private Supplier valueSupplier; public Builder containingClass(Class containingClass) { this.containingClass = containingClass; @@ -221,11 +151,19 @@ public Builder fieldName(String fieldName) { return this; } - public Builder valueEmitter(BiConsumer valueEmitter) { + public Builder valueEmitter(BiConsumer valueEmitter) { this.valueEmitter = valueEmitter; return this; } + public Builder constantValueSupplier(Supplier valueSupplier) { + this.valueSupplier = valueSupplier; + if (valueEmitter == null) { + valueEmitter = (spec, utils) -> spec.add("$L", valueSupplier.get()); + } + return this; + } + public SignerPropertyValueProvider build() { return new SignerPropertyValueProvider(this); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadataExt.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadataExt.java new file mode 100644 index 000000000000..2063aa506771 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeCodegenMetadataExt.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import static software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.Builder; +import static software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.builder; + +import com.squareup.javapoet.CodeBlock; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import software.amazon.awssdk.codegen.model.service.AuthType; +import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.SignerPropertyValueProvider; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; +import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.SignerProperty; + +/** + * Extension and utility methods for the {@link AuthSchemeCodegenMetadata} class. + */ +public final class AuthSchemeCodegenMetadataExt { + + static final AuthSchemeCodegenMetadata SIGV4 = + builder() + .schemeId(AwsV4AuthScheme.SCHEME_ID) + .authSchemeClass(AwsV4AuthScheme.class) + .addProperty(SignerPropertyValueProvider.builder() + .containingClass(AwsV4HttpSigner.class) + .fieldName( + "SERVICE_SIGNING_NAME") + .valueEmitter((spec, utils) -> spec.add("$S", utils.signingName())) + .build()) + .addProperty(SignerPropertyValueProvider.builder() + .containingClass(AwsV4HttpSigner.class) + .fieldName( + "REGION_NAME") + .valueEmitter((spec, utils) -> spec.add("$L", "params.region().id()")) + .build()) + .build(); + + static final AuthSchemeCodegenMetadata BEARER = builder() + .schemeId(BearerAuthScheme.SCHEME_ID) + .authSchemeClass(BearerAuthScheme.class) + .build(); + + static final AuthSchemeCodegenMetadata NO_AUTH = builder() + .schemeId(NoAuthAuthScheme.SCHEME_ID) + .authSchemeClass(NoAuthAuthScheme.class) + .build(); + + + private AuthSchemeCodegenMetadataExt() { + } + + /** + * Creates a new auth scheme codegen metadata instance using the defaults for the given {@link AuthType} defaults. + */ + public static AuthSchemeCodegenMetadata fromAuthType(AuthType type) { + switch (type) { + case BEARER: + return BEARER; + case NONE: + return NO_AUTH; + default: + String authTypeName = type.value(); + SigV4SignerDefaults defaults = AuthTypeToSigV4Default.authTypeToDefaults().get(authTypeName); + if (defaults == null) { + throw new IllegalArgumentException("Unknown auth type: " + type); + } + return fromConstants(defaults); + } + } + + /** + * Transforms a {@link SigV4SignerDefaults} instance to an {@link AuthSchemeCodegenMetadata} instance. + */ + public static AuthSchemeCodegenMetadata fromConstants(SigV4SignerDefaults constants) { + Builder builder = SIGV4.toBuilder(); + for (SignerPropertyValueProvider property : propertiesFromConstants(constants)) { + builder.addProperty(property); + } + return builder.build(); + } + + /** + * Renders the AuthSchemeCodegenMetadata as to create a new {@link AuthSchemeOption} using the configured values. + */ + public static CodeBlock codegenNewAuthOption( + AuthSchemeCodegenMetadata metadata, + AuthSchemeSpecUtils authSchemeSpecUtils + ) { + CodeBlock.Builder builder = CodeBlock.builder(); + builder.add("$T.builder().schemeId($S)", + AuthSchemeOption.class, metadata.schemeId()); + builder.add(codegenSignerProperties(authSchemeSpecUtils, metadata.properties())); + return builder.build(); + } + + /** + * Renders a chain of calls to {@link AuthSchemeOption.Builder#putSignerProperty(SignerProperty, Object)} for each of the + * given properties. + */ + public static CodeBlock codegenSignerProperties( + AuthSchemeSpecUtils authSchemeSpecUtils, + List properties + ) { + CodeBlock.Builder builder = CodeBlock.builder(); + for (SignerPropertyValueProvider property : properties) { + builder.add("\n.putSignerProperty($T.$N, ", property.containingClass(), property.fieldName()); + property.emitValue(builder, authSchemeSpecUtils); + builder.add(")"); + } + return builder.build(); + } + + /** + * Renders a chain of calls to {@link AuthSchemeOption.Builder#putSignerPropertyIfAbsent(SignerProperty, Object)} for each of + * the given properties. + */ + public static CodeBlock codegenSignerPropertiesIfAbsent( + AuthSchemeSpecUtils authSchemeSpecUtils, + List properties + ) { + CodeBlock.Builder builder = CodeBlock.builder(); + for (SignerPropertyValueProvider property : properties) { + builder.add("\n.putSignerPropertyIfAbsent($T.$N, ", property.containingClass(), property.fieldName()); + property.emitValue(builder, authSchemeSpecUtils); + builder.add(")"); + } + return builder.build(); + } + + private static List propertiesFromConstants(SigV4SignerDefaults constants) { + List properties = new ArrayList<>(); + if (constants.payloadSigningEnabled() != null) { + properties.add(from("PAYLOAD_SIGNING_ENABLED", constants::payloadSigningEnabled)); + } + if (constants.doubleUrlEncode() != null) { + properties.add(from("DOUBLE_URL_ENCODE", constants::doubleUrlEncode)); + } + if (constants.normalizePath() != null) { + properties.add(from("NORMALIZE_PATH", constants::normalizePath)); + } + if (constants.chunkEncodingEnabled() != null) { + properties.add(from("CHUNK_ENCODING_ENABLED", constants::chunkEncodingEnabled)); + } + return properties; + } + + private static SignerPropertyValueProvider from(String name, Supplier valueSupplier) { + return SignerPropertyValueProvider.builder() + .containingClass(AwsV4HttpSigner.class) + .fieldName(name) + .constantValueSupplier(valueSupplier) + .build(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java index 026e91a36742..6ab69cb24a92 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java @@ -52,6 +52,7 @@ import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.Identity; import software.amazon.awssdk.identity.spi.IdentityProvider; @@ -274,6 +275,19 @@ private MethodSpec generateTrySelectAuthScheme() { .endControlFlow(); } + builder.addStatement("$T signer", + ParameterizedTypeName.get(ClassName.get(HttpSigner.class), TypeVariableName.get("T"))); + builder.beginControlFlow("try"); + { + builder.addStatement("signer = authScheme.signer()"); + builder.nextControlFlow("catch (RuntimeException e)"); + builder.addStatement("discardedReasons.add(() -> String.format($S, authOption.schemeId(), e.getMessage()))", + "'%s' signer could not be retrieved: %s") + .addStatement("return null") + .endControlFlow(); + } + + builder.addStatement("$T.Builder identityRequestBuilder = $T.builder()", ResolveIdentityRequest.class, ResolveIdentityRequest.class); @@ -294,7 +308,7 @@ private MethodSpec generateTrySelectAuthScheme() { MetricUtils.class) .endControlFlow(); - builder.addStatement("return new $T<>(identity, authScheme.signer(), authOption)", SelectedAuthScheme.class); + builder.addStatement("return new $T<>(identity, signer, authOption)", SelectedAuthScheme.class); return builder.build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java index 3b767b1c2235..bc5255695ad1 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java @@ -62,7 +62,7 @@ private MethodSpec resolveAuthSchemeMethod() { b.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT); b.addParameter(authSchemeSpecUtils.parametersInterfaceName(), "authSchemeParams"); b.returns(authSchemeSpecUtils.resolverReturnType()); - b.addJavadoc(resolveMethodJavadoc()); + b.addJavadoc("Resolve the auth schemes based on the given set of parameters."); return b.build(); } @@ -75,7 +75,7 @@ private MethodSpec resolveAuthSchemeConsumerBuilderMethod() { b.addModifiers(Modifier.PUBLIC, Modifier.DEFAULT); b.addParameter(consumerType, "consumer"); b.returns(authSchemeSpecUtils.resolverReturnType()); - b.addJavadoc(resolveMethodJavadoc()); + b.addJavadoc("Resolve the auth schemes based on the given set of parameters."); b.addStatement("$T builder = $T.builder()", parametersBuilderInterface, parametersInterface); b.addStatement("consumer.accept(builder)"); @@ -104,12 +104,4 @@ private CodeBlock interfaceJavadoc() { return b.build(); } - - private CodeBlock resolveMethodJavadoc() { - CodeBlock.Builder b = CodeBlock.builder(); - - b.add("Resolve the auth schemes based on the given set of parameters."); - - return b.build(); - } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java index 4c997473960f..5724e2b78f57 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java @@ -19,27 +19,17 @@ import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeName; import java.util.Collections; -import java.util.Comparator; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; -import java.util.TreeSet; -import java.util.stream.Collectors; -import java.util.stream.Stream; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.intermediate.OperationModel; -import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.utils.AuthUtils; -import software.amazon.awssdk.http.auth.aws.scheme.AwsV4aAuthScheme; -import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; public final class AuthSchemeSpecUtils { - private static final Set DEFAULT_AUTH_SCHEME_PARAMS = Collections.unmodifiableSet(setOf("region", "operation")); + private static final Set DEFAULT_AUTH_SCHEME_PARAMS = setOf("region", "operation"); private final IntermediateModel intermediateModel; private final boolean useSraAuth; private final Set allowedEndpointAuthSchemeParams; @@ -161,66 +151,10 @@ public String signingName() { return intermediateModel.getMetadata().getSigningName(); } - public Map, List> operationsToAuthType() { - Map, List> authSchemesToOperations = - intermediateModel.getOperations() - .entrySet() - .stream() - .filter(kvp -> !kvp.getValue().getAuth().isEmpty()) - .collect(Collectors.groupingBy(kvp -> kvp.getValue().getAuth(), - Collectors.mapping(Map.Entry::getKey, Collectors.toList()))); - - Map, List> operationsToAuthType = authSchemesToOperations - .entrySet() - .stream() - .sorted(Comparator.comparing(left -> left.getValue().get(0))) - .collect(Collectors.toMap(Map.Entry::getValue, - Map.Entry::getKey, (a, b) -> b, - LinkedHashMap::new)); - - List serviceDefaults = serviceDefaultAuthTypes(); - - // Get the list of operations that share the same auth schemes as the system defaults and remove it from the result. We - // will take care of all of these in the fallback `default` case. - List operationsWithDefaults = authSchemesToOperations.remove(serviceDefaults); - operationsToAuthType.remove(operationsWithDefaults); - operationsToAuthType.put(Collections.emptyList(), serviceDefaults); - return operationsToAuthType; - } - - public List serviceDefaultAuthTypes() { - List modeled = intermediateModel.getMetadata().getAuth(); - if (!modeled.isEmpty()) { - return modeled; - } - return Collections.singletonList(intermediateModel.getMetadata().getAuthType()); - } - - public Set> allServiceConcreteAuthSchemeClasses() { - Set> result = - Stream.concat(intermediateModel.getOperations() - .values() - .stream() - .map(OperationModel::getAuth) - .flatMap(List::stream), - intermediateModel.getMetadata().getAuth().stream()) - .map(AuthSchemeCodegenMetadata::fromAuthType) - .map(AuthSchemeCodegenMetadata::authSchemeClass) - .collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(Class::getSimpleName)))); - - if (useEndpointBasedAuthProvider()) { - // sigv4a is not modeled but needed for the endpoints based auth-scheme cases. - result.add(AwsV4aAuthScheme.class); - } - // Make the no-auth scheme available. - result.add(NoAuthAuthScheme.class); - return result; - } - - private static Set setOf(String v1, String v2) { - Set set = new HashSet<>(); - set.add(v1); - set.add(v2); - return set; + private static Set setOf(String val1, String val2) { + Set result = new HashSet<>(); + result.add(val1); + result.add(val2); + return Collections.unmodifiableSet(result); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthTypeToSigV4Default.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthTypeToSigV4Default.java new file mode 100644 index 000000000000..02f6a682e3f4 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthTypeToSigV4Default.java @@ -0,0 +1,157 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.codegen.model.service.AuthType; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.utils.Lazy; + +/** + * Contains maps from all sigv4 based {@link AuthType} to {@link SigV4SignerDefaults} that we can then transform for use in + * codegen. + */ +public final class AuthTypeToSigV4Default { + + public static final SigV4SignerDefaults SIGV4_DEFAULT = SigV4SignerDefaults + .builder() + .authType("v4") + .schemeId(AwsV4AuthScheme.SCHEME_ID) + .build(); + + private static final Lazy> AUTH_TYPE_TO_DEFAULTS = new Lazy<>( + () -> { + Map map = new LinkedHashMap<>(); + for (SigV4SignerDefaults sigv4FamilySignerConstants : knownAuthTypes()) { + if (map.put(sigv4FamilySignerConstants.authType(), sigv4FamilySignerConstants) != null) { + throw new IllegalStateException("Duplicate key: " + sigv4FamilySignerConstants.authType()); + } + } + return map; + }); + + private AuthTypeToSigV4Default() { + } + + /** + * Returns a mapping from an auth-type name to a set of AWS sigV4 default values.The auth-type names are the same as the + * {@link AuthType} enum values. + * + * @see SigV4SignerDefaults + */ + public static Map authTypeToDefaults() { + return AUTH_TYPE_TO_DEFAULTS.getValue(); + } + + /** + * Returns the list fo all known auth types to s3v4Defaults instances. + * + * @return + */ + public static List knownAuthTypes() { + return Arrays.asList( + sigv4Default(), + s3Defaults(), + s3v4Defaults(), + sigv4UnsignedPayload() + ); + } + + /** + * Set of default signer defaults. None is set by default. + */ + private static SigV4SignerDefaults sigv4Default() { + return SIGV4_DEFAULT; + } + + /** + * Set of default signer defaults for S3. Sets the following defaults signer properties + * + *
    + *
  • {@code doubleUrlEncode(false)} + *
  • {@code normalizePath(false)} + *
  • {@code payloadSigningEnabled(false)} + *
+ *

+ * Also overrides for the following operations + * + *

    + *
  • {@code UploadParts} Sets the defaults and also {@code chunkEncodingEnabled(true)}
  • + *
  • {@code PutObject} Sets the defaults and also {@code chunkEncodingEnabled(true)}
  • + *
+ */ + private static SigV4SignerDefaults s3Defaults() { + return sigv4Default() + .toBuilder() + .authType("s3") + .service("S3") + .doubleUrlEncode(Boolean.FALSE) + .normalizePath(Boolean.FALSE) + .payloadSigningEnabled(Boolean.FALSE) + .putOperation("UploadPart", + sigv4Default() + .toBuilder() + // Default S3 signer properties + .doubleUrlEncode(Boolean.FALSE) + .normalizePath(Boolean.FALSE) + .payloadSigningEnabled(Boolean.FALSE) + // Including chunkEncodingEnabled TRUE + .chunkEncodingEnabled(Boolean.TRUE) + .build()) + .putOperation("PutObject", + sigv4Default() + .toBuilder() + // Default S3 signer properties + .doubleUrlEncode(Boolean.FALSE) + .normalizePath(Boolean.FALSE) + .payloadSigningEnabled(Boolean.FALSE) + // Including chunkEncodingEnabled TRUE + .chunkEncodingEnabled(Boolean.TRUE) + .build()) + .build(); + } + + + /** + * Set of default signer defaults for auth-type s3v4. Currently only used by S3Control. + */ + private static SigV4SignerDefaults s3v4Defaults() { + return sigv4Default().toBuilder() + .authType("s3v4") + .doubleUrlEncode(false) + .normalizePath(false) + .build(); + } + + + /** + * Set of default signer defaults for auth-type s3v4. Currently only used by disable payload signing for some operations. Sets + * the following default signer property + * + *
    + *
  • {@code payloadSigningEnabled(false)} + *
+ */ + private static SigV4SignerDefaults sigv4UnsignedPayload() { + return sigv4Default().toBuilder() + .authType("v4-unsigned-body") + .payloadSigningEnabled(false) + .build(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/EndpointBasedAuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/EndpointBasedAuthSchemeProviderSpec.java index f680c8307936..c921153ac395 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/EndpointBasedAuthSchemeProviderSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/EndpointBasedAuthSchemeProviderSpec.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.poet.auth.scheme; import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.CodeBlock; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterizedTypeName; @@ -49,10 +50,12 @@ public class EndpointBasedAuthSchemeProviderSpec implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; private final EndpointRulesSpecUtils endpointRulesSpecUtils; + private final SigV4AuthSchemeCodegenKnowledgeIndex sigV4AuthSchemeCodegenKnowledgeIndex; public EndpointBasedAuthSchemeProviderSpec(IntermediateModel intermediateModel) { this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); this.endpointRulesSpecUtils = new EndpointRulesSpecUtils(intermediateModel); + this.sigV4AuthSchemeCodegenKnowledgeIndex = SigV4AuthSchemeCodegenKnowledgeIndex.of(intermediateModel); } @Override @@ -62,18 +65,23 @@ public ClassName className() { @Override public TypeSpec poetSpec() { - return PoetUtils.createClassBuilder(className()) - .addModifiers(Modifier.PUBLIC, Modifier.FINAL) - .addAnnotation(SdkInternalApi.class) - .addSuperinterface(authSchemeSpecUtils.providerInterfaceName()) - .addMethod(constructor()) - .addField(defaultInstance()) - .addField(modeledResolverInstance()) - .addField(endpointDelegateInstance()) - .addMethod(createMethod()) - .addMethod(resolveAuthSchemeMethod()) - .addMethod(endpointProvider()) - .build(); + TypeSpec.Builder builder = PoetUtils.createClassBuilder(className()) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .addAnnotation(SdkInternalApi.class) + .addSuperinterface(authSchemeSpecUtils.providerInterfaceName()) + .addMethod(constructor()) + .addField(defaultInstance()) + .addField(modeledResolverInstance()) + .addField(endpointDelegateInstance()) + .addMethod(createMethod()) + .addMethod(resolveAuthSchemeMethod()) + .addMethod(endpointProvider()); + + boolean applyServiceDefaults = sigV4AuthSchemeCodegenKnowledgeIndex.hasSigV4Overrides(); + if (applyServiceDefaults) { + builder.addMethod(addV4Defaults()); + } + return builder.build(); } private MethodSpec constructor() { @@ -180,12 +188,25 @@ private void addAuthSchemeSwitchSigV4Case(MethodSpec.Builder spec) { SigV4AuthScheme.class, Validate.class, SigV4AuthScheme.class, "Expecting auth scheme of class SigV4AuthScheme, got instead object of class %s"); - spec.addCode("options.add($T.builder().schemeId($S)", AuthSchemeOption.class, AwsV4AuthScheme.SCHEME_ID) - .addCode(".putSignerProperty($T.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName())", AwsV4HttpSigner.class) - .addCode(".putSignerProperty($T.REGION_NAME, sigv4AuthScheme.signingRegion())", AwsV4HttpSigner.class) - .addCode(".putSignerProperty($T.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding())", - AwsV4HttpSigner.class) - .addCode(".build());"); + CodeBlock.Builder block = CodeBlock.builder(); + block.add("$T.builder()", AuthSchemeOption.class) + .add("\n.schemeId($T.SCHEME_ID)", AwsV4AuthScheme.class) + .add("\n.putSignerProperty($T.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName())", AwsV4HttpSigner.class) + .add("\n.putSignerProperty($T.REGION_NAME, sigv4AuthScheme.signingRegion())", AwsV4HttpSigner.class) + .add("\n.putSignerProperty($T.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding())", + AwsV4HttpSigner.class); + + if (sigV4AuthSchemeCodegenKnowledgeIndex.hasSigV4Overrides()) { + spec.addCode("$1T sigv4AuthSchemeOption = applySigV4FamilyDefaults(", AuthSchemeOption.class) + .addCode(block.build()) + .addCode(", params)") + .addStatement(".build()"); + } else { + spec.addCode("$1T sigv4AuthSchemeOption = ", AuthSchemeOption.class) + .addCode(block.build()) + .addStatement(".build()"); + } + spec.addStatement("options.add(sigv4AuthSchemeOption)"); spec.addStatement("break"); } @@ -198,12 +219,25 @@ private void addAuthSchemeSwitchSigV4aCase(MethodSpec.Builder spec) { spec.addStatement("$1T regionSet = $1T.create(sigv4aAuthScheme.signingRegionSet())", RegionSet.class); - spec.addCode("options.add($T.builder().schemeId($S)", AuthSchemeOption.class, AwsV4aAuthScheme.SCHEME_ID) - .addCode(".putSignerProperty($T.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName())", AwsV4aHttpSigner.class) - .addCode(".putSignerProperty($T.REGION_SET, regionSet)", AwsV4aHttpSigner.class) - .addCode(".putSignerProperty($T.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding())", - AwsV4aHttpSigner.class) - .addCode(".build());"); + CodeBlock.Builder block = CodeBlock.builder(); + block.add("$1T.builder().schemeId($2T.SCHEME_ID)", AuthSchemeOption.class, + AwsV4aAuthScheme.class) + .add("\n.putSignerProperty($T.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName())", AwsV4HttpSigner.class) + .add("\n.putSignerProperty($T.REGION_SET, regionSet)", AwsV4aHttpSigner.class) + .add("\n.putSignerProperty($T.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding())", AwsV4HttpSigner.class); + + if (sigV4AuthSchemeCodegenKnowledgeIndex.hasSigV4Overrides()) { + spec.addCode("$1T sigv4aAuthSchemeOption = applySigV4FamilyDefaults(", AuthSchemeOption.class) + .addCode(block.build()) + .addCode(", params)") + .addStatement(".build()"); + + } else { + spec.addCode("$1T sigv4aAuthSchemeOption = ", AuthSchemeOption.class) + .addCode(block.build()) + .addStatement(".build()"); + } + spec.addStatement("options.add(sigv4aAuthSchemeOption)"); spec.addStatement("break"); } @@ -212,19 +246,34 @@ private void addAuthSchemeSwitchS3ExpressCase(MethodSpec.Builder spec) { ClassName s3ExpressEndpointAuthScheme = ClassName.get( authSchemeSpecUtils.baseClientPackageName() + ".endpoints.authscheme", "S3ExpressEndpointAuthScheme"); + spec.addStatement("$T s3ExpressAuthScheme = $T.isInstanceOf($T.class, authScheme, $S, authScheme.getClass().getName())", s3ExpressEndpointAuthScheme, Validate.class, s3ExpressEndpointAuthScheme, "Expecting auth scheme of class S3ExpressAuthScheme, got instead object of class %s"); ClassName s3ExpressAuthScheme = ClassName.get(authSchemeSpecUtils.baseClientPackageName() + ".s3express", "S3ExpressAuthScheme"); - spec.addCode("options.add($T.builder().schemeId($T.SCHEME_ID)", AuthSchemeOption.class, s3ExpressAuthScheme) - .addCode(".putSignerProperty($T.SERVICE_SIGNING_NAME, s3ExpressAuthScheme.signingName())", AwsV4HttpSigner.class) - .addCode(".putSignerProperty($T.REGION_NAME, s3ExpressAuthScheme.signingRegion())", AwsV4HttpSigner.class) - .addCode(".putSignerProperty($T.DOUBLE_URL_ENCODE, !s3ExpressAuthScheme.disableDoubleEncoding())", - AwsV4HttpSigner.class) - .addCode(".build());"); + + CodeBlock.Builder block = CodeBlock.builder(); + block.add("$1T.builder().schemeId($2T.SCHEME_ID)", AuthSchemeOption.class, s3ExpressAuthScheme) + .add("\n.putSignerProperty($T.SERVICE_SIGNING_NAME, s3ExpressAuthScheme.signingName())", AwsV4HttpSigner.class) + .add("\n.putSignerProperty($T.REGION_NAME, s3ExpressAuthScheme.signingRegion())", AwsV4HttpSigner.class) + .add("\n.putSignerProperty($T.DOUBLE_URL_ENCODE, !s3ExpressAuthScheme.disableDoubleEncoding())", + AwsV4HttpSigner.class); + + if (sigV4AuthSchemeCodegenKnowledgeIndex.hasSigV4Overrides()) { + spec.addCode("$1T s3ExpressAuthSchemeOption = applySigV4FamilyDefaults(", AuthSchemeOption.class) + .addCode(block.build()) + .addCode(", params)") + .addStatement(".build()"); + } else { + spec.addCode("$1T s3ExpressAuthSchemeOption = ", AuthSchemeOption.class) + .addCode(block.build()) + .addStatement(".build()"); + } + spec.addStatement("options.add(s3ExpressAuthSchemeOption)"); spec.addStatement("break"); + } private void addAuthSchemeSwitchDefaultCase(MethodSpec.Builder spec) { @@ -232,6 +281,55 @@ private void addAuthSchemeSwitchDefaultCase(MethodSpec.Builder spec) { spec.addStatement("throw new $T($S + name)", IllegalArgumentException.class, "Unknown auth scheme: "); } + + private MethodSpec addV4Defaults() { + MethodSpec.Builder spec = MethodSpec.methodBuilder("applySigV4FamilyDefaults") + .addModifiers(Modifier.PRIVATE, Modifier.STATIC) + .returns(AuthSchemeOption.Builder.class) + .addParameter(AuthSchemeOption.Builder.class, "option") + .addParameter(authSchemeSpecUtils.parametersInterfaceName(), "params"); + + // All the operations share the same set of auth schemes, no need to create a switch statement. + if (!sigV4AuthSchemeCodegenKnowledgeIndex.hasPerOperationSigV4Overrides()) { + AuthSchemeCodegenMetadata authType = sigV4AuthSchemeCodegenKnowledgeIndex.serviceSigV4Overrides(); + addAuthTypeProperties(spec, authType); + return spec.build(); + } + spec.beginControlFlow("switch(params.operation())"); + sigV4AuthSchemeCodegenKnowledgeIndex.forEachOperationsOverridesGroup((ops, scheme) -> { + if (!ops.isEmpty()) { + addCasesForOperations(spec, ops, scheme); + } + }); + AuthSchemeCodegenMetadata authType = sigV4AuthSchemeCodegenKnowledgeIndex.serviceSigV4Overrides(); + if (authType != null) { + addCasesForDefault(spec, authType); + } + spec.endControlFlow(); + return spec.build(); + } + + private void addCasesForOperations(MethodSpec.Builder spec, List operations, + AuthSchemeCodegenMetadata metadata) { + for (String name : operations) { + spec.addCode("case $S:\n", name); + } + addAuthTypeProperties(spec, metadata); + } + + private void addCasesForDefault(MethodSpec.Builder spec, + AuthSchemeCodegenMetadata metadata) { + spec.addCode("default:\n"); + addAuthTypeProperties(spec, metadata); + } + + private void addAuthTypeProperties(MethodSpec.Builder spec, AuthSchemeCodegenMetadata metadata) { + spec.addCode("option"); + spec.addCode(AuthSchemeCodegenMetadataExt.codegenSignerPropertiesIfAbsent(authSchemeSpecUtils, metadata.properties())); + spec.addStatement(""); + spec.addStatement("return option"); + } + private Map parameters() { return endpointRulesSpecUtils.parameters(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeClassesKnowledgeIndex.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeClassesKnowledgeIndex.java new file mode 100644 index 000000000000..92c6047301c7 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeClassesKnowledgeIndex.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4aAuthScheme; +import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; + +/** + * Knowledge index of the configured auth schemes concrete classes. + */ +public final class ModelAuthSchemeClassesKnowledgeIndex { + private final Set> serviceConcreteAuthSchemeClasses; + + private ModelAuthSchemeClassesKnowledgeIndex(IntermediateModel intermediateModel) { + this.serviceConcreteAuthSchemeClasses = + getServiceConcreteAuthSchemeClasses(ModelAuthSchemeKnowledgeIndex.of(intermediateModel).operationsToMetadata(), + intermediateModel.getCustomizationConfig().isEnableEndpointAuthSchemeParams()); + } + + /** + * Creates a new {@link AuthSchemeCodegenKnowledgeIndex} using the given {@code intermediateModel}.. + */ + public static ModelAuthSchemeClassesKnowledgeIndex of(IntermediateModel intermediateModel) { + return new ModelAuthSchemeClassesKnowledgeIndex(intermediateModel); + } + + /** + * Returns the set of all the service supported concrete auth scheme classes. + */ + public Set> serviceConcreteAuthSchemeClasses() { + return serviceConcreteAuthSchemeClasses; + } + + private static Set> getServiceConcreteAuthSchemeClasses( + Map, List> operationsToAuthSchemes, + boolean useEndpointBasedAuthProvider + ) { + Set> result = operationsToAuthSchemes + .values() + .stream() + .flatMap(Collection::stream) + .map(AuthSchemeCodegenMetadata::authSchemeClass) + .collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(Class::getSimpleName)))); + if (useEndpointBasedAuthProvider) { + // sigv4a is not modeled but needed for the endpoints based auth-scheme cases. + result.add(AwsV4aAuthScheme.class); + } + // Make the no-auth scheme available. + result.add(NoAuthAuthScheme.class); + return Collections.unmodifiableSet(result); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeKnowledgeIndex.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeKnowledgeIndex.java new file mode 100644 index 000000000000..2b0a39145e43 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelAuthSchemeKnowledgeIndex.java @@ -0,0 +1,165 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.codegen.model.service.AuthType; + +/** + * Knowledge index to get access to the configured service auth schemes and operations overrides. This index is optimized for + * code generation of switch statements therefore the data is grouped by operations that share the same auth schemes. This + * index is a building block for {@link AuthSchemeCodegenKnowledgeIndex} and {@link SigV4AuthSchemeCodegenKnowledgeIndex} + * indexes that have a friendly interface for the codegen use cases. + */ +public final class ModelAuthSchemeKnowledgeIndex { + private final IntermediateModel intermediateModel; + + private ModelAuthSchemeKnowledgeIndex(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + } + + /** + * Creates a new knowledge index using the given model. + */ + public static ModelAuthSchemeKnowledgeIndex of(IntermediateModel intermediateModel) { + return new ModelAuthSchemeKnowledgeIndex(intermediateModel); + } + + /** + * Returns a map from a list of operations to all the auth schemes that the operations accept. + * + * @return a map from a list of operations to all the auth schemes that the operations accept + */ + public Map, List> operationsToMetadata() { + List serviceDefaults = serviceDefaultAuthTypes(); + if (serviceDefaults.size() == 1) { + String authTypeName = serviceDefaults.get(0).value(); + SigV4SignerDefaults defaults = AuthTypeToSigV4Default.authTypeToDefaults().get(authTypeName); + if (areServiceWide(defaults)) { + return operationsToModeledMetadataFormSigV4Defaults(defaults); + } + } + return operationsToModeledMetadata(); + } + + /** + * Computes a map from operations to codegen metadata objects. The intermediate model is used to compute mappings to + * {@link AuthType} values for the service and for each operation that has an override. Then we group all the operations that + * share the same set of auth types together and finally convert the auth types to their corresponding codegen metadata + * instances that then we can use to codegen switch statements. The service wide codegen metadata instances are keyed using + * {@link Collections#emptyList()}. + */ + private Map, List> operationsToModeledMetadata() { + Map, List> operationsToAuthType = operationsToAuthType(); + Map, List> operationsToMetadata = new LinkedHashMap<>(); + operationsToAuthType.forEach((k, v) -> operationsToMetadata.put(k, authTypeToCodegenMetadata(v))); + return operationsToMetadata; + } + + /** + * Returns a map from list of operations to the list of auth-types modeled for those operations. The values are taken directly + * from the model {@link OperationModel#getAuth()} method. + */ + private Map, List> operationsToAuthType() { + Map, List> authSchemesToOperations = + intermediateModel.getOperations() + .entrySet() + .stream() + .filter(kvp -> !kvp.getValue().getAuth().isEmpty()) + .collect(Collectors.groupingBy(kvp -> kvp.getValue().getAuth(), + Collectors.mapping(Map.Entry::getKey, Collectors.toList()))); + + Map, List> operationsToAuthType = authSchemesToOperations + .entrySet() + .stream() + .sorted(Comparator.comparing(kvp -> kvp.getValue().get(0))) + .collect(Collectors.toMap(Map.Entry::getValue, + Map.Entry::getKey, (a, b) -> b, + LinkedHashMap::new)); + + List serviceDefaults = serviceDefaultAuthTypes(); + + // Get the list of operations that share the same auth schemes as the system defaults and remove it from the result. We + // will take care of all of these in the fallback `default` case. + List operationsWithDefaults = authSchemesToOperations.remove(serviceDefaults); + operationsToAuthType.remove(operationsWithDefaults); + operationsToAuthType.put(Collections.emptyList(), serviceDefaults); + return operationsToAuthType; + } + + + /** + * Similar to {@link #operationsToModeledMetadata()} computes a map from operations to codegen metadata objects. The service + * default list of codegen metadata is keyed with {@link Collections#emptyList()}. + */ + private Map, List> operationsToModeledMetadataFormSigV4Defaults( + SigV4SignerDefaults defaults + ) { + Map> defaultsToOperations = + defaults.operations() + .entrySet() + .stream() + .collect(Collectors.groupingBy(Map.Entry::getValue, + Collectors.mapping(Map.Entry::getKey, + Collectors.toList()))); + + Map, SigV4SignerDefaults> operationsToDefaults = + defaultsToOperations.entrySet() + .stream() + .sorted(Comparator.comparing(left -> left.getValue().get(0))) + .collect(Collectors.toMap(Map.Entry::getValue, + Map.Entry::getKey, (a, b) -> b, + LinkedHashMap::new)); + + Map, List> result = new LinkedHashMap<>(); + for (Map.Entry, SigV4SignerDefaults> kvp : operationsToDefaults.entrySet()) { + result.put(kvp.getKey(), + Arrays.asList(AuthSchemeCodegenMetadataExt.fromConstants(kvp.getValue()))); + } + result.put(Collections.emptyList(), Arrays.asList(AuthSchemeCodegenMetadataExt.fromConstants(defaults))); + return result; + } + + /** + * Returns the list of modeled top-level auth-types. + */ + private List serviceDefaultAuthTypes() { + List modeled = intermediateModel.getMetadata().getAuth(); + if (!modeled.isEmpty()) { + return modeled; + } + return Collections.singletonList(intermediateModel.getMetadata().getAuthType()); + } + + private List authTypeToCodegenMetadata(List authTypes) { + return authTypes.stream().map(AuthSchemeCodegenMetadataExt::fromAuthType).collect(Collectors.toList()); + } + + private boolean areServiceWide(SigV4SignerDefaults defaults) { + return defaults != null + && defaults.isServiceOverrideAuthScheme() + && Objects.equals(intermediateModel.getMetadata().getServiceName(), defaults.service()); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelBasedAuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelBasedAuthSchemeProviderSpec.java index 9f9f0abcf04b..79d5125e65c5 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelBasedAuthSchemeProviderSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/ModelBasedAuthSchemeProviderSpec.java @@ -15,9 +15,6 @@ package software.amazon.awssdk.codegen.poet.auth.scheme; -import static software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.SignerPropertyValueProvider; -import static software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.fromAuthType; - import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.MethodSpec; @@ -27,20 +24,20 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.poet.ClassSpec; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; public class ModelBasedAuthSchemeProviderSpec implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; + private final AuthSchemeCodegenKnowledgeIndex knowledgeIndex; public ModelBasedAuthSchemeProviderSpec(IntermediateModel intermediateModel) { this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); + this.knowledgeIndex = AuthSchemeCodegenKnowledgeIndex.of(intermediateModel); } @Override @@ -93,54 +90,43 @@ private MethodSpec resolveAuthSchemeMethod() { spec.addStatement("$T options = new $T<>()", ParameterizedTypeName.get(List.class, AuthSchemeOption.class), TypeName.get(ArrayList.class)); - Map, List> operationsToAuthType = authSchemeSpecUtils.operationsToAuthType(); - - // All the operations share the same set of auth schemes, no need to create a switch statement. - if (operationsToAuthType.size() == 1) { - List types = operationsToAuthType.get(Collections.emptyList()); - for (AuthType authType : types) { + if (knowledgeIndex.hasPerOperationAuthSchemesOverrides()) { + // We create a switch to return the auth schemes overrides per + // operation. + spec.beginControlFlow("switch(params.operation())"); + knowledgeIndex.forEachOperationsOverridesGroup((ops, schemes) -> addCasesForOperations(spec, ops, schemes)); + addCasesForOperations(spec, Collections.emptyList(), knowledgeIndex.serviceDefaultAuthSchemes()); + spec.endControlFlow(); + } else { + // All the operations share the same set of auth schemes, no need to create a switch statement. + List types = knowledgeIndex.serviceDefaultAuthSchemes(); + for (AuthSchemeCodegenMetadata authType : types) { addAuthTypeProperties(spec, authType); } - return spec.addStatement("return $T.unmodifiableList(options)", Collections.class) - .build(); } - spec.beginControlFlow("switch(params.operation())"); - operationsToAuthType.forEach((ops, schemes) -> { - if (!ops.isEmpty()) { - addCasesForOperations(spec, ops, schemes); - } - }); - addCasesForOperations(spec, Collections.emptyList(), operationsToAuthType.get(Collections.emptyList())); - spec.endControlFlow(); - return spec.addStatement("return $T.unmodifiableList(options)", Collections.class) .build(); } - private void addCasesForOperations(MethodSpec.Builder spec, List operations, List schemes) { + private void addCasesForOperations(MethodSpec.Builder spec, List operations, + List schemes) { if (operations.isEmpty()) { - spec.addCode("default:"); + spec.addCode("default:\n"); } else { for (String name : operations) { - spec.addCode("case $S:", name); + spec.addCode("case $S\n:", name); } } - for (AuthType authType : schemes) { - addAuthTypeProperties(spec, authType); + for (AuthSchemeCodegenMetadata metadata : schemes) { + addAuthTypeProperties(spec, metadata); } spec.addStatement("break"); } - public void addAuthTypeProperties(MethodSpec.Builder spec, AuthType authType) { - AuthSchemeCodegenMetadata metadata = fromAuthType(authType); - spec.addCode("options.add($T.builder().schemeId($S)", - AuthSchemeOption.class, metadata.schemeId()); - for (SignerPropertyValueProvider property : metadata.properties()) { - spec.addCode(".putSignerProperty($T.$N, ", property.containingClass(), property.fieldName()); - property.emitValue(spec, authSchemeSpecUtils); - spec.addCode(")"); - - } - spec.addCode(".build());\n"); + private void addAuthTypeProperties(MethodSpec.Builder spec, AuthSchemeCodegenMetadata metadata) { + spec.addCode("options.add("); + spec.addCode(AuthSchemeCodegenMetadataExt.codegenNewAuthOption(metadata, authSchemeSpecUtils)); + spec.addCode(".build()"); + spec.addCode(");\n"); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4AuthSchemeCodegenKnowledgeIndex.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4AuthSchemeCodegenKnowledgeIndex.java new file mode 100644 index 000000000000..832d349dc195 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4AuthSchemeCodegenKnowledgeIndex.java @@ -0,0 +1,168 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeCodegenMetadata.SignerPropertyValueProvider; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; + +/** + * Knowledge index to compute the sets of operations that share the same set of sigv4 overrides. + */ +public final class SigV4AuthSchemeCodegenKnowledgeIndex { + private final Map, AuthSchemeCodegenMetadata> operationsToSigv4AuthScheme; + + private SigV4AuthSchemeCodegenKnowledgeIndex(IntermediateModel intermediateModel) { + this.operationsToSigv4AuthScheme = + operationsToSigv4AuthScheme(ModelAuthSchemeKnowledgeIndex.of(intermediateModel).operationsToMetadata()); + } + + /** + * Creates a new knowledge index from the given model. + */ + public static SigV4AuthSchemeCodegenKnowledgeIndex of(IntermediateModel intermediateModel) { + return new SigV4AuthSchemeCodegenKnowledgeIndex(intermediateModel); + } + + /** + * Returns the service overrides for sigv4. This method returns null if there are none configured. The service may or may not + * support sigv4 regardless. + */ + public AuthSchemeCodegenMetadata serviceSigV4Overrides() { + return operationsToSigv4AuthScheme.get(Collections.emptyList()); + } + + /** + * Returns true if there are any sigv4 overrides per operation. + * + * @return true if there are auth scheme overrides per operation + */ + public boolean hasPerOperationSigV4Overrides() { + if (operationsToSigv4AuthScheme.containsKey(Collections.emptyList())) { + return operationsToSigv4AuthScheme.size() > 1; + } + return !operationsToSigv4AuthScheme.isEmpty(); + } + + /** + * Returns true if there are any service wide sigv4 overrides. + */ + public boolean hasServiceSigV4Overrides() { + return serviceSigV4Overrides() != null; + } + + /** + * Returns true if there are sigv4 signer overrides in the model. + */ + public boolean hasSigV4Overrides() { + return hasServiceSigV4Overrides() || hasPerOperationSigV4Overrides(); + } + + + /** + * Traverses each group of operations with the same set of auth schemes. + * + * @param consumer The consumer to call for each group of operations with the same set of auth schemes. + */ + public void forEachOperationsOverridesGroup(BiConsumer, AuthSchemeCodegenMetadata> consumer) { + for (Map.Entry, AuthSchemeCodegenMetadata> kvp : operationsToSigv4AuthScheme.entrySet()) { + if (kvp.getKey().isEmpty()) { + // Ignore service wide defaults. + continue; + } + consumer.accept(kvp.getKey(), kvp.getValue()); + } + } + + /** + * Returns a map that groups all operations that share the ame set of sigv4 signer properties with override values. The + * service wide default values are encoded using {@link Collections#emptyList()} as a key and the value may be null. + */ + private Map, AuthSchemeCodegenMetadata> operationsToSigv4AuthScheme( + Map, List> operationsToMetadata + ) { + Map, AuthSchemeCodegenMetadata> result = new HashMap<>(); + for (Map.Entry, List> kvp : operationsToMetadata.entrySet()) { + AuthSchemeCodegenMetadata sigv4 = sigV4AuthSchemeWithConstantOverrides(kvp.getValue()); + if (sigv4 != null) { + result.put(kvp.getKey(), sigv4); + } + } + return result; + } + + /** + * Finds the sigv4 auth scheme from the list and transforms it to remove any signer property that does not have a constant + * value. Returns null if there are no signer properties with constant values or if the sigv4 auth scheme is not found. + */ + private AuthSchemeCodegenMetadata sigV4AuthSchemeWithConstantOverrides(List authSchemes) { + AuthSchemeCodegenMetadata sigv4 = findSigV4AuthScheme(authSchemes); + if (sigv4 == null) { + return null; + } + List signerPropertiesWithConstantValues = + filterSignerPropertiesWithConstantValues(sigv4.properties()); + + // No signer properties with overrides, we return null: we are only + // interested when there are any properties with constant values for codegen. + if (signerPropertiesWithConstantValues.isEmpty()) { + return null; + } + // Return the auth scheme but only retain the properties with constant values. + return sigv4.toBuilder() + .properties(signerPropertiesWithConstantValues) + .build(); + } + + /** + * Returns a new list of singer properties with only those properties that use a constant value. + */ + private List filterSignerPropertiesWithConstantValues( + List properties + ) { + List result = null; + for (SignerPropertyValueProvider property : properties) { + if (property.isConstant()) { + if (result == null) { + result = new ArrayList<>(); + } + result.add(property); + } + } + if (result != null) { + return result; + } + return Collections.emptyList(); + } + + /** + * Filters out the auth scheme with scheme id "aws.auth#sigv4". Returns {@code null} if not found. + */ + private AuthSchemeCodegenMetadata findSigV4AuthScheme(List authSchemes) { + for (AuthSchemeCodegenMetadata metadata : authSchemes) { + if (metadata.schemeId().equals(AwsV4AuthScheme.SCHEME_ID)) { + return metadata; + } + } + return null; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4SignerDefaults.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4SignerDefaults.java new file mode 100644 index 000000000000..4d6a69786ab0 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/SigV4SignerDefaults.java @@ -0,0 +1,243 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.utils.Validate; + +/** + * Tracks a set of explicitly enabled signer properties for the family of AWS SigV4 signers. The currently supported attributes + * are {@code doubleUrlEncode}, {@code normalizePath}, {@code payloadSigningEnabled}, {@code chunkEncodingEnabled}. If the + * value is null then is not overridden. An auth type can also represent a service-wide set of defaults. + */ +public final class SigV4SignerDefaults { + private final String service; + private final String authType; + private final String schemeId; + private final Boolean doubleUrlEncode; + private final Boolean normalizePath; + private final Boolean payloadSigningEnabled; + private final Boolean chunkEncodingEnabled; + private final Map operations; + + private SigV4SignerDefaults(Builder builder) { + this.service = builder.service; + this.authType = Validate.notNull(builder.authType, "authType"); + this.schemeId = Validate.notNull(builder.schemeId, "schemeId"); + this.doubleUrlEncode = builder.doubleUrlEncode; + this.normalizePath = builder.normalizePath; + this.payloadSigningEnabled = builder.payloadSigningEnabled; + this.chunkEncodingEnabled = builder.chunkEncodingEnabled; + this.operations = Collections.unmodifiableMap(new HashMap<>(builder.operations)); + } + + public boolean isServiceOverrideAuthScheme() { + return service != null; + } + + public String service() { + return service; + } + + public String authType() { + return authType; + } + + public String schemeId() { + return schemeId; + } + + public Boolean doubleUrlEncode() { + return doubleUrlEncode; + } + + public Boolean normalizePath() { + return normalizePath; + } + + public Boolean payloadSigningEnabled() { + return payloadSigningEnabled; + } + + public Boolean chunkEncodingEnabled() { + return chunkEncodingEnabled; + } + + public Map operations() { + return operations; + } + + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SigV4SignerDefaults defaults = (SigV4SignerDefaults) o; + + if (!Objects.equals(service, defaults.service)) { + return false; + } + if (!authType.equals(defaults.authType)) { + return false; + } + if (!schemeId.equals(defaults.schemeId)) { + return false; + } + if (!Objects.equals(doubleUrlEncode, defaults.doubleUrlEncode)) { + return false; + } + if (!Objects.equals(normalizePath, defaults.normalizePath)) { + return false; + } + if (!Objects.equals(payloadSigningEnabled, defaults.payloadSigningEnabled)) { + return false; + } + if (!Objects.equals(chunkEncodingEnabled, defaults.chunkEncodingEnabled)) { + return false; + } + return operations.equals(defaults.operations); + } + + @Override + public int hashCode() { + int result = service != null ? service.hashCode() : 0; + result = 31 * result + authType.hashCode(); + result = 31 * result + schemeId.hashCode(); + result = 31 * result + (doubleUrlEncode != null ? doubleUrlEncode.hashCode() : 0); + result = 31 * result + (normalizePath != null ? normalizePath.hashCode() : 0); + result = 31 * result + (payloadSigningEnabled != null ? payloadSigningEnabled.hashCode() : 0); + result = 31 * result + (chunkEncodingEnabled != null ? chunkEncodingEnabled.hashCode() : 0); + result = 31 * result + operations.hashCode(); + return result; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private String authType; + private String service; + private String schemeId; + private Boolean doubleUrlEncode; + private Boolean normalizePath; + private Boolean payloadSigningEnabled; + private Boolean chunkEncodingEnabled; + + private Map operations = new HashMap<>(); + + public Builder() { + } + + public Builder(SigV4SignerDefaults other) { + this.service = other.service; + this.authType = Validate.notNull(other.authType, "name"); + this.schemeId = Validate.notNull(other.schemeId, "schemeId"); + this.doubleUrlEncode = other.doubleUrlEncode; + this.normalizePath = other.normalizePath; + this.payloadSigningEnabled = other.payloadSigningEnabled; + this.chunkEncodingEnabled = other.chunkEncodingEnabled; + this.operations.putAll(other.operations); + } + + public String service() { + return service; + } + + public Builder service(String service) { + this.service = service; + return this; + } + + public String authType() { + return authType; + } + + public Builder authType(String authType) { + this.authType = authType; + return this; + } + + public String schemeId() { + return schemeId; + } + + public Builder schemeId(String schemeId) { + this.schemeId = schemeId; + return this; + } + + public Boolean doubleUrlEncode() { + return doubleUrlEncode; + } + + public Builder doubleUrlEncode(Boolean doubleUrlEncode) { + this.doubleUrlEncode = doubleUrlEncode; + return this; + } + + public Boolean normalizePath() { + return normalizePath; + } + + public Builder normalizePath(Boolean normalizePath) { + this.normalizePath = normalizePath; + return this; + } + + public Boolean payloadSigningEnabled() { + return payloadSigningEnabled; + } + + public Builder payloadSigningEnabled(Boolean payloadSigningEnabled) { + this.payloadSigningEnabled = payloadSigningEnabled; + return this; + } + + public Boolean chunkEncodingEnabled() { + return chunkEncodingEnabled; + } + + public Builder chunkEncodingEnabled(Boolean chunkEncodingEnabled) { + this.chunkEncodingEnabled = chunkEncodingEnabled; + return this; + } + + public Map operations() { + return operations; + } + + public Builder putOperation(String name, SigV4SignerDefaults constants) { + this.operations.put(name, constants); + return this; + } + + public SigV4SignerDefaults build() { + return new SigV4SignerDefaults(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index b045552c5950..ed8418e1dc7c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -51,6 +51,7 @@ import software.amazon.awssdk.codegen.poet.ClassSpec; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; +import software.amazon.awssdk.codegen.poet.auth.scheme.ModelAuthSchemeClassesKnowledgeIndex; import software.amazon.awssdk.codegen.poet.model.ServiceClientConfigurationUtils; import software.amazon.awssdk.codegen.poet.rules.EndpointRulesSpecUtils; import software.amazon.awssdk.codegen.utils.AuthUtils; @@ -724,7 +725,8 @@ private MethodSpec authSchemesMethod() { .addModifiers(PRIVATE) .returns(returns); - Set> concreteAuthSchemeClasses = authSchemeSpecUtils.allServiceConcreteAuthSchemeClasses(); + ModelAuthSchemeClassesKnowledgeIndex index = ModelAuthSchemeClassesKnowledgeIndex.of(model); + Set> concreteAuthSchemeClasses = index.serviceConcreteAuthSchemeClasses(); builder.addStatement("$T schemes = new $T<>($L + this.additionalAuthSchemes.size())", returns, HashMap.class, concreteAuthSchemeClasses.size()); for (Class concreteAuthScheme : concreteAuthSchemeClasses) { @@ -744,7 +746,7 @@ private MethodSpec invokePluginsMethod() { .addParameter(SdkClientConfiguration.class, "config") .returns(SdkClientConfiguration.class); - builder.addStatement("$T internalPlugins = internalPlugins()", + builder.addStatement("$T internalPlugins = internalPlugins(config)", ParameterizedTypeName.get(List.class, SdkPlugin.class)); builder.addStatement("$T externalPlugins = plugins()", @@ -773,6 +775,7 @@ private MethodSpec internalPluginsMethod() { MethodSpec.Builder builder = MethodSpec.methodBuilder("internalPlugins") .addModifiers(PRIVATE) + .addParameter(SdkClientConfiguration.class, "config") .returns(parameterizedTypeName); List internalPlugins = model.getCustomizationConfig().getInternalPlugins(); @@ -784,14 +787,32 @@ private MethodSpec internalPluginsMethod() { builder.addStatement("$T internalPlugins = new $T<>()", parameterizedTypeName, ArrayList.class); for (String internalPlugin : internalPlugins) { - ClassName pluginClass = ClassName.bestGuess(internalPlugin); - builder.addStatement("internalPlugins.add(new $T())", pluginClass); + String arguments = internalPluginNewArguments(internalPlugin); + String internalPluginClass = internalPluginClass(internalPlugin); + ClassName pluginClass = ClassName.bestGuess(internalPluginClass); + builder.addStatement("internalPlugins.add(new $T($L))", pluginClass, arguments); } builder.addStatement("return internalPlugins"); return builder.build(); } + private String internalPluginClass(String internalPlugin) { + int openParenthesisIndex = internalPlugin.indexOf('('); + if (openParenthesisIndex == -1) { + return internalPlugin; + } + return internalPlugin.substring(0, openParenthesisIndex); + } + + private String internalPluginNewArguments(String internalPlugin) { + int openParenthesisIndex = internalPlugin.indexOf('('); + if (openParenthesisIndex == -1) { + return ""; + } + return internalPlugin.substring(openParenthesisIndex); + } + @Override public ClassName className() { return builderClassName; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointResolverInterceptorSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointResolverInterceptorSpec.java index 7a72cc7637e9..1d54bd7e7928 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointResolverInterceptorSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointResolverInterceptorSpec.java @@ -58,6 +58,7 @@ import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; +import software.amazon.awssdk.codegen.poet.auth.scheme.ModelAuthSchemeClassesKnowledgeIndex; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SelectedAuthScheme; import software.amazon.awssdk.core.exception.SdkClientException; @@ -96,7 +97,8 @@ public EndpointResolverInterceptorSpec(IntermediateModel model) { // We need to know whether the service has a dependency on the http-auth-aws module. Because we can't check that // directly, assume that if they're using AwsV4AuthScheme or AwsV4aAuthScheme that it's available. - Set> supportedAuthSchemes = new AuthSchemeSpecUtils(model).allServiceConcreteAuthSchemeClasses(); + Set> supportedAuthSchemes = + ModelAuthSchemeClassesKnowledgeIndex.of(model).serviceConcreteAuthSchemeClasses(); this.dependsOnHttpAuthAws = supportedAuthSchemes.contains(AwsV4AuthScheme.class) || supportedAuthSchemes.contains(AwsV4aAuthScheme.class); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java index ae6ea5a4df3c..253eadc0f59f 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java @@ -73,12 +73,61 @@ public void syncComposedDefaultClientBuilderClass() { validateBaseClientBuilderClassGeneration(composedClientJsonServiceModels(), "test-composed-sync-default-client-builder.java"); } + + @Test + void baseClientBuilderClass_sra() { + validateBaseClientBuilderClassGeneration(restJsonServiceModels(), "test-client-builder-class.java", true); + } + + @Test + void baseClientBuilderClassWithBearerAuth_sra() { + validateBaseClientBuilderClassGeneration(bearerAuthServiceModels(), "test-bearer-auth-client-builder-class.java", true); + } + + @Test + void baseClientBuilderClassWithNoAuthOperation_sra() { + validateBaseClientBuilderClassGeneration(operationWithNoAuth(), "test-no-auth-ops-client-builder-class.java", true); + } + + @Test + void baseClientBuilderClassWithNoAuthService_sra() { + validateBaseClientBuilderClassGeneration(serviceWithNoAuth(), "test-no-auth-service-client-builder-class.java", true); + } + + @Test + void baseClientBuilderClassWithInternalUserAgent_sra() { + validateBaseClientBuilderClassGeneration(internalConfigModels(), "test-client-builder-internal-defaults-class.java", + true); + } + + @Test + void baseQueryClientBuilderClass_sra() { + validateBaseClientBuilderClassGeneration(queryServiceModels(), "test-query-client-builder-class.java", true); + } + + @Test + void baseClientBuilderClassWithEndpointsAuthParams_sra() { + validateBaseClientBuilderClassGeneration(queryServiceModelsEndpointAuthParamsWithAllowList(), + "test-client-builder-endpoints-auth-params.java", true); + } + + @Test + void syncComposedDefaultClientBuilderClass_sra() { + validateBaseClientBuilderClassGeneration(composedClientJsonServiceModels(), + "test-composed-sync-default-client-builder.java", true); + } private void validateBaseClientBuilderClassGeneration(IntermediateModel model, String expectedClassName) { - model.getCustomizationConfig().setUseSraAuth(false); - validateGeneration(BaseClientBuilderClass::new, model, expectedClassName); + validateBaseClientBuilderClassGeneration(model, expectedClassName, false); + } - model.getCustomizationConfig().setUseSraAuth(true); - validateGeneration(BaseClientBuilderClass::new, model, "sra/" + expectedClassName); + private void validateBaseClientBuilderClassGeneration(IntermediateModel model, String expectedClassName, boolean sra) { + if (sra) { + model.getCustomizationConfig().setUseSraAuth(true); + validateGeneration(BaseClientBuilderClass::new, model, "sra/" + expectedClassName); + } else { + model.getCustomizationConfig().setUseSraAuth(false); + validateGeneration(BaseClientBuilderClass::new, model, expectedClassName); + } } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/mini-s3-auth-scheme-default-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/mini-s3-auth-scheme-default-provider.java index dd7224fd84c7..07447fafbad4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/mini-s3-auth-scheme-default-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/mini-s3-auth-scheme-default-provider.java @@ -40,11 +40,11 @@ public static DefaultMiniS3AuthSchemeProvider create() { public List resolveAuthScheme(MiniS3AuthSchemeParams params) { List options = new ArrayList<>(); options.add(AuthSchemeOption.builder().schemeId("aws.auth#sigv4") - .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, "mini-s3-service") - .putSignerProperty(AwsV4HttpSigner.REGION_NAME, params.region().id()) - .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, false) - .putSignerProperty(AwsV4HttpSigner.NORMALIZE_PATH, false) - .putSignerProperty(AwsV4HttpSigner.PAYLOAD_SIGNING_ENABLED, false).build()); + .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, "mini-s3-service") + .putSignerProperty(AwsV4HttpSigner.REGION_NAME, params.region().id()) + .putSignerProperty(AwsV4HttpSigner.PAYLOAD_SIGNING_ENABLED, false) + .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, false) + .putSignerProperty(AwsV4HttpSigner.NORMALIZE_PATH, false).build()); return Collections.unmodifiableList(options); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-interceptor.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-interceptor.java index 1db64c130b05..48edb00b1855 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-interceptor.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-interceptor.java @@ -22,6 +22,7 @@ import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.Identity; import software.amazon.awssdk.identity.spi.IdentityProvider; @@ -60,14 +61,12 @@ private SelectedAuthScheme selectAuthScheme(List> authSchemes = executionAttributes.getAttribute(SdkInternalExecutionAttribute.AUTH_SCHEMES); - IdentityProviders identityProviders = executionAttributes - .getAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS); + IdentityProviders identityProviders = executionAttributes.getAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS); List> discardedReasons = new ArrayList<>(); for (AuthSchemeOption authOption : authOptions) { AuthScheme authScheme = authSchemes.get(authOption.schemeId()); SelectedAuthScheme selectedAuthScheme = trySelectAuthScheme(authOption, authScheme, - identityProviders, discardedReasons, - metricCollector, executionAttributes); + identityProviders, discardedReasons, metricCollector, executionAttributes); if (selectedAuthScheme != null) { if (!discardedReasons.isEmpty()) { LOG.debug(() -> String.format("%s auth will be used, discarded: '%s'", authOption.schemeId(), @@ -90,8 +89,7 @@ private QueryAuthSchemeParams authSchemeParams(SdkRequest request, ExecutionAttr } private SelectedAuthScheme trySelectAuthScheme(AuthSchemeOption authOption, AuthScheme authScheme, - IdentityProviders identityProviders, List> discardedReasons, - MetricCollector metricCollector, + IdentityProviders identityProviders, List> discardedReasons, MetricCollector metricCollector, ExecutionAttributes executionAttributes) { if (authScheme == null) { discardedReasons.add(() -> String.format("'%s' is not enabled for this request.", authOption.schemeId())); @@ -103,6 +101,14 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem .add(() -> String.format("'%s' does not have an identity provider configured.", authOption.schemeId())); return null; } + HttpSigner signer; + try { + signer = authScheme.signer(); + } catch (RuntimeException e) { + discardedReasons.add(() -> String.format("'%s' signer could not be retrieved: %s", authOption.schemeId(), + e.getMessage())); + return null; + } ResolveIdentityRequest.Builder identityRequestBuilder = ResolveIdentityRequest.builder(); authOption.forEachIdentityProperty(identityRequestBuilder::putProperty); CompletableFuture identity; @@ -113,7 +119,7 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem identity = MetricUtils.reportDuration(() -> identityProvider.resolveIdentity(identityRequestBuilder.build()), metricCollector, metric); } - return new SelectedAuthScheme<>(identity, authScheme.signer(), authOption); + return new SelectedAuthScheme<>(identity, signer, authOption); } private SdkMetric getIdentityMetric(IdentityProvider identityProvider) { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider-without-allowlist.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider-without-allowlist.java index 8aa44f160393..97e874a1cf11 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider-without-allowlist.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider-without-allowlist.java @@ -10,6 +10,8 @@ import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4AuthScheme; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4aAuthScheme; import software.amazon.awssdk.endpoints.Endpoint; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4aAuthScheme; import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; import software.amazon.awssdk.http.auth.aws.signer.AwsV4aHttpSigner; import software.amazon.awssdk.http.auth.aws.signer.RegionSet; @@ -58,20 +60,22 @@ public List resolveAuthScheme(QueryAuthSchemeParams params) { SigV4AuthScheme sigv4AuthScheme = Validate.isInstanceOf(SigV4AuthScheme.class, authScheme, "Expecting auth scheme of class SigV4AuthScheme, got instead object of class %s", authScheme.getClass() .getName()); - options.add(AuthSchemeOption.builder().schemeId("aws.auth#sigv4") - .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName()) - .putSignerProperty(AwsV4HttpSigner.REGION_NAME, sigv4AuthScheme.signingRegion()) - .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding()).build()); + AuthSchemeOption sigv4AuthSchemeOption = AuthSchemeOption.builder().schemeId(AwsV4AuthScheme.SCHEME_ID) + .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName()) + .putSignerProperty(AwsV4HttpSigner.REGION_NAME, sigv4AuthScheme.signingRegion()) + .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding()).build(); + options.add(sigv4AuthSchemeOption); break; case "sigv4a": SigV4aAuthScheme sigv4aAuthScheme = Validate.isInstanceOf(SigV4aAuthScheme.class, authScheme, "Expecting auth scheme of class SigV4AuthScheme, got instead object of class %s", authScheme.getClass() .getName()); RegionSet regionSet = RegionSet.create(sigv4aAuthScheme.signingRegionSet()); - options.add(AuthSchemeOption.builder().schemeId("aws.auth#sigv4a") - .putSignerProperty(AwsV4aHttpSigner.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName()) - .putSignerProperty(AwsV4aHttpSigner.REGION_SET, regionSet) - .putSignerProperty(AwsV4aHttpSigner.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding()).build()); + AuthSchemeOption sigv4aAuthSchemeOption = AuthSchemeOption.builder().schemeId(AwsV4aAuthScheme.SCHEME_ID) + .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName()) + .putSignerProperty(AwsV4aHttpSigner.REGION_SET, regionSet) + .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding()).build(); + options.add(sigv4aAuthSchemeOption); break; default: throw new IllegalArgumentException("Unknown auth scheme: " + name); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider.java index 589880dbef0d..ab4ed5f48035 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-endpoint-provider.java @@ -10,6 +10,8 @@ import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4AuthScheme; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4aAuthScheme; import software.amazon.awssdk.endpoints.Endpoint; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4aAuthScheme; import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; import software.amazon.awssdk.http.auth.aws.signer.AwsV4aHttpSigner; import software.amazon.awssdk.http.auth.aws.signer.RegionSet; @@ -56,20 +58,22 @@ public List resolveAuthScheme(QueryAuthSchemeParams params) { SigV4AuthScheme sigv4AuthScheme = Validate.isInstanceOf(SigV4AuthScheme.class, authScheme, "Expecting auth scheme of class SigV4AuthScheme, got instead object of class %s", authScheme.getClass() .getName()); - options.add(AuthSchemeOption.builder().schemeId("aws.auth#sigv4") - .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName()) - .putSignerProperty(AwsV4HttpSigner.REGION_NAME, sigv4AuthScheme.signingRegion()) - .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding()).build()); + AuthSchemeOption sigv4AuthSchemeOption = AuthSchemeOption.builder().schemeId(AwsV4AuthScheme.SCHEME_ID) + .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4AuthScheme.signingName()) + .putSignerProperty(AwsV4HttpSigner.REGION_NAME, sigv4AuthScheme.signingRegion()) + .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4AuthScheme.disableDoubleEncoding()).build(); + options.add(sigv4AuthSchemeOption); break; case "sigv4a": SigV4aAuthScheme sigv4aAuthScheme = Validate.isInstanceOf(SigV4aAuthScheme.class, authScheme, "Expecting auth scheme of class SigV4AuthScheme, got instead object of class %s", authScheme.getClass() .getName()); RegionSet regionSet = RegionSet.create(sigv4aAuthScheme.signingRegionSet()); - options.add(AuthSchemeOption.builder().schemeId("aws.auth#sigv4a") - .putSignerProperty(AwsV4aHttpSigner.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName()) - .putSignerProperty(AwsV4aHttpSigner.REGION_SET, regionSet) - .putSignerProperty(AwsV4aHttpSigner.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding()).build()); + AuthSchemeOption sigv4aAuthSchemeOption = AuthSchemeOption.builder().schemeId(AwsV4aAuthScheme.SCHEME_ID) + .putSignerProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, sigv4aAuthScheme.signingName()) + .putSignerProperty(AwsV4aHttpSigner.REGION_SET, regionSet) + .putSignerProperty(AwsV4HttpSigner.DOUBLE_URL_ENCODE, !sigv4aAuthScheme.disableDoubleEncoding()).build(); + options.add(sigv4aAuthSchemeOption); break; default: throw new IllegalArgumentException("Unknown auth scheme: " + name); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-with-allowlist-auth-scheme-interceptor.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-with-allowlist-auth-scheme-interceptor.java index aee41328cd1f..942aa43d9aee 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-with-allowlist-auth-scheme-interceptor.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-with-allowlist-auth-scheme-interceptor.java @@ -22,6 +22,7 @@ import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.Identity; import software.amazon.awssdk.identity.spi.IdentityProvider; @@ -117,6 +118,14 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem .add(() -> String.format("'%s' does not have an identity provider configured.", authOption.schemeId())); return null; } + HttpSigner signer; + try { + signer = authScheme.signer(); + } catch (RuntimeException e) { + discardedReasons.add(() -> String.format("'%s' signer could not be retrieved: %s", authOption.schemeId(), + e.getMessage())); + return null; + } ResolveIdentityRequest.Builder identityRequestBuilder = ResolveIdentityRequest.builder(); authOption.forEachIdentityProperty(identityRequestBuilder::putProperty); CompletableFuture identity; @@ -127,7 +136,7 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem identity = MetricUtils.reportDuration(() -> identityProvider.resolveIdentity(identityRequestBuilder.build()), metricCollector, metric); } - return new SelectedAuthScheme<>(identity, authScheme.signer(), authOption); + return new SelectedAuthScheme<>(identity, signer, authOption); } private SdkMetric getIdentityMetric(IdentityProvider identityProvider) { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-without-allowlist-auth-scheme-interceptor.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-without-allowlist-auth-scheme-interceptor.java index f95cc1366cd3..3c48c7bd907d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-without-allowlist-auth-scheme-interceptor.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-without-allowlist-auth-scheme-interceptor.java @@ -22,6 +22,7 @@ import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.Identity; import software.amazon.awssdk.identity.spi.IdentityProvider; @@ -120,6 +121,14 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem .add(() -> String.format("'%s' does not have an identity provider configured.", authOption.schemeId())); return null; } + HttpSigner signer; + try { + signer = authScheme.signer(); + } catch (RuntimeException e) { + discardedReasons.add(() -> String.format("'%s' signer could not be retrieved: %s", authOption.schemeId(), + e.getMessage())); + return null; + } ResolveIdentityRequest.Builder identityRequestBuilder = ResolveIdentityRequest.builder(); authOption.forEachIdentityProperty(identityRequestBuilder::putProperty); CompletableFuture identity; @@ -130,7 +139,7 @@ private SelectedAuthScheme trySelectAuthScheme(AuthSchem identity = MetricUtils.reportDuration(() -> identityProvider.resolveIdentity(identityRequestBuilder.build()), metricCollector, metric); } - return new SelectedAuthScheme<>(identity, authScheme.signer(), authOption); + return new SelectedAuthScheme<>(identity, signer, authOption); } private SdkMetric getIdentityMetric(IdentityProvider identityProvider) { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java index d69b3bf46ee0..65fa6c74f696 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java @@ -127,7 +127,7 @@ private IdentityProvider defaultTokenProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -141,7 +141,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java index 139f9cb63620..8fc7ef906c62 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java @@ -214,7 +214,7 @@ protected final AttributeMap serviceHttpConfig() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -228,7 +228,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { List internalPlugins = new ArrayList<>(); internalPlugins.add(new InternalTestPlugin1()); internalPlugins.add(new InternalTestPlugin2()); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java index 843a92ab430e..0529fabd1584 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java @@ -149,7 +149,7 @@ private IdentityProvider defaultTokenProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -163,7 +163,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java index 2d02273d245b..b8bced3923a8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java @@ -124,7 +124,7 @@ private Map> authSchemes() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -138,7 +138,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java index 6d1252e85078..3ee6097742aa 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java @@ -158,7 +158,7 @@ private IdentityProvider defaultTokenProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -172,7 +172,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java index 487f9d1c42a9..62ded62ffa67 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java @@ -119,7 +119,7 @@ private Map> authSchemes() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -134,7 +134,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java index 9876e54cd444..e9881e8fc484 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java @@ -107,7 +107,7 @@ private Map> authSchemes() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -122,7 +122,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java index 2b2853cf98df..12e6fe65e94f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java @@ -146,7 +146,7 @@ private IdentityProvider defaultTokenProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -160,7 +160,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java index b323e0e4b91b..e77d35434d4c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java @@ -98,7 +98,7 @@ private Signer defaultTokenSigner() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -112,7 +112,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java index 8b44ec2ce87f..ee013e70a9c4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java @@ -188,7 +188,7 @@ protected final AttributeMap serviceHttpConfig() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -202,7 +202,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { List internalPlugins = new ArrayList<>(); internalPlugins.add(new InternalTestPlugin1()); internalPlugins.add(new InternalTestPlugin2()); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java index 945b796781ae..5e7c97377b7c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java @@ -120,7 +120,7 @@ private Signer defaultTokenSigner() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -134,7 +134,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java index 4185bbc4e72c..82a6d7ad5549 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java @@ -96,7 +96,7 @@ private JsonEndpointProvider defaultEndpointProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -110,7 +110,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java index 01f52c3553c7..bbffc0a4d47c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java @@ -132,7 +132,7 @@ private Signer defaultTokenSigner() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -146,7 +146,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java index 450339ce03fd..8ff06f85e88d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java @@ -88,7 +88,7 @@ private DatabaseEndpointProvider defaultEndpointProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -103,7 +103,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java index f35b5f3c2b0d..dc6570ae80bc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java @@ -73,7 +73,7 @@ private DatabaseEndpointProvider defaultEndpointProvider() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -88,7 +88,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java index 945b796781ae..5e7c97377b7c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java @@ -120,7 +120,7 @@ private Signer defaultTokenSigner() { @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { - List internalPlugins = internalPlugins(); + List internalPlugins = internalPlugins(config); List externalPlugins = plugins(); if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { return config; @@ -134,7 +134,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return configuration.build(); } - private List internalPlugins() { + private List internalPlugins(SdkClientConfiguration config) { return Collections.emptyList(); } diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index ee7bed443a26..6a87275e5ef7 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index e3d7240f9e86..410aa42aff2f 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 45b59ffbbb0a..cc5bef6ff28f 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 8e911a9a7433..8188891b9d0f 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT auth diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index d3f11212a770..610de3de1510 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java index d195e09774e8..f69b79469cce 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java @@ -45,13 +45,15 @@ public static ByteBuffer encodeEventStreamRequestToByteBuffer(SdkHttpFullRequest Map headers = new LinkedHashMap<>(); request.forEachHeader((name, value) -> headers.put(name, HeaderValue.fromString(firstIfPresent(value)))); - byte[] payload = null; + byte[] payload; if (request.contentStreamProvider().isPresent()) { try { payload = IoUtils.toByteArray(request.contentStreamProvider().get().newStream()); } catch (IOException e) { throw new UncheckedIOException(e); } + } else { + payload = new byte[0]; } return new Message(headers, payload).toByteBuffer(); diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtilsTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtilsTest.java new file mode 100644 index 000000000000..4f3636c34cb9 --- /dev/null +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtilsTest.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.client.handler; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import java.nio.ByteBuffer; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.utils.StringInputStream; + +public class AwsClientHandlerUtilsTest { + + @Test + void nonNullPayload_shouldEncodeToEmptyMessage() { + SdkHttpFullRequest request = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .uri(URI.create("http://localhost")) + .contentStreamProvider(() -> new StringInputStream("test")) + .build(); + ByteBuffer buffer = AwsClientHandlerUtils.encodeEventStreamRequestToByteBuffer(request); + assertThat(buffer).isNotNull(); + } + + @Test + void nullPayload_shouldEncodeToEmptyMessage() { + SdkHttpFullRequest request = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .uri(URI.create("http://localhost")).build(); + ByteBuffer buffer = AwsClientHandlerUtils.encodeEventStreamRequestToByteBuffer(request); + assertThat(buffer).isNotNull(); + } +} diff --git a/core/checksums-spi/pom.xml b/core/checksums-spi/pom.xml index 78889bfe5e29..850604fe30dc 100644 --- a/core/checksums-spi/pom.xml +++ b/core/checksums-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT checksums-spi diff --git a/core/checksums/pom.xml b/core/checksums/pom.xml index b20bb807ba9d..0143ad64c355 100644 --- a/core/checksums/pom.xml +++ b/core/checksums/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT checksums diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index 095836f22d96..5076bd7ad64d 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index e4955ee94ac2..9252bf1f6704 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/http-auth-aws-crt/pom.xml b/core/http-auth-aws-crt/pom.xml index 68e46b61dde6..bb1c34f5f121 100644 --- a/core/http-auth-aws-crt/pom.xml +++ b/core/http-auth-aws-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-auth-aws-crt diff --git a/core/http-auth-aws-eventstream/pom.xml b/core/http-auth-aws-eventstream/pom.xml index d92c7e644d03..99044da0683a 100644 --- a/core/http-auth-aws-eventstream/pom.xml +++ b/core/http-auth-aws-eventstream/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-auth-aws-eventstream diff --git a/core/http-auth-aws/pom.xml b/core/http-auth-aws/pom.xml index a07cdd1c78a5..2de25a1fb119 100644 --- a/core/http-auth-aws/pom.xml +++ b/core/http-auth-aws/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-auth-aws diff --git a/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/internal/scheme/DefaultAwsV4aAuthScheme.java b/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/internal/scheme/DefaultAwsV4aAuthScheme.java index 8be2e8c43e80..b00574dac8ae 100644 --- a/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/internal/scheme/DefaultAwsV4aAuthScheme.java +++ b/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/internal/scheme/DefaultAwsV4aAuthScheme.java @@ -52,10 +52,27 @@ public IdentityProvider identityProvider(IdentityProvide */ @Override public AwsV4aHttpSigner signer() { + if (SignerSingletonHolder.ERROR != null) { + throw SignerSingletonHolder.ERROR; + } return SignerSingletonHolder.INSTANCE; } private static class SignerSingletonHolder { - private static final AwsV4aHttpSigner INSTANCE = AwsV4aHttpSigner.create(); + private static final AwsV4aHttpSigner INSTANCE; + private static final RuntimeException ERROR; + + // Attempt to load the Sigv4a signer and cache the error if CRT is not available on the classpath. + static { + AwsV4aHttpSigner instance = null; + RuntimeException error = null; + try { + instance = AwsV4aHttpSigner.create(); + } catch (RuntimeException e) { + error = e; + } + INSTANCE = instance; + ERROR = error; + } } } diff --git a/core/http-auth-spi/pom.xml b/core/http-auth-spi/pom.xml index 28bf8a2f6838..b4cd2e8c3d44 100644 --- a/core/http-auth-spi/pom.xml +++ b/core/http-auth-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-auth-spi diff --git a/core/http-auth/pom.xml b/core/http-auth/pom.xml index 37c5deddd1b5..077dbca670e3 100644 --- a/core/http-auth/pom.xml +++ b/core/http-auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-auth diff --git a/core/identity-spi/pom.xml b/core/identity-spi/pom.xml index 96b798464652..9198c828e8dd 100644 --- a/core/identity-spi/pom.xml +++ b/core/identity-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT identity-spi diff --git a/core/imds/pom.xml b/core/imds/pom.xml index 8c3e2f407563..263290dd629c 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index df7c4b8234af..c1bd71dfda4c 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index a0a7a8e5359a..5203524c8a19 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index a0de1196b8ec..eddce1ee92f5 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 96f0e316accf..dca573c04799 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT profiles diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 26a69d63991e..c3529ee82230 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index f21a4a786c63..58ab7b091fd5 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java index b81110bd1026..92f34920d2c8 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/marshall/JsonProtocolMarshaller.java @@ -181,12 +181,16 @@ void doMarshall(SdkPojo pojo) { Object val = field.getValueOrDefault(pojo); if (isExplicitBinaryPayload(field)) { if (val != null) { - request.contentStreamProvider(((SdkBytes) val)::asInputStream); + SdkBytes sdkBytes = (SdkBytes) val; + request.contentStreamProvider(sdkBytes::asInputStream); + updateContentLengthHeader(sdkBytes.asByteArrayUnsafe().length); } } else if (isExplicitStringPayload(field)) { if (val != null) { byte[] content = ((String) val).getBytes(StandardCharsets.UTF_8); request.contentStreamProvider(() -> new ByteArrayInputStream(content)); + updateContentLengthHeader(content.length); + } } else if (isExplicitPayloadMember(field)) { marshallExplicitJsonPayload(field, val); @@ -196,6 +200,10 @@ void doMarshall(SdkPojo pojo) { } } + private void updateContentLengthHeader(int contentLength) { + request.putHeader(CONTENT_LENGTH, Integer.toString(contentLength)); + } + private boolean isExplicitBinaryPayload(SdkField field) { return isExplicitPayloadMember(field) && MarshallingType.SDK_BYTES.equals(field.marshallingType()); } diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index b740cbab539f..911d44211eb9 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index 3f071ead7b68..b4e50f915ff8 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java index c8f392251d65..2f6f6bb89eb7 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/marshall/XmlProtocolMarshaller.java @@ -91,8 +91,10 @@ void doMarshall(SdkPojo pojo) { Object val = field.getValueOrDefault(pojo); if (isBinary(field, val)) { - request.contentStreamProvider(((SdkBytes) val)::asInputStream); + SdkBytes sdkBytes = (SdkBytes) val; + request.contentStreamProvider(sdkBytes::asInputStream); setContentTypeHeaderIfNeeded("binary/octet-stream"); + request.putHeader(CONTENT_LENGTH, Integer.toString(sdkBytes.asByteArrayUnsafe().length)); } else if (isExplicitPayloadMember(field) && val instanceof String) { byte[] content = ((String) val).getBytes(StandardCharsets.UTF_8); diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 1813e85da3ae..38dd0b16bc41 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index 816e77c6d478..c0ce3def5a39 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 9db19a64f42a..54970928a4b1 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index c7687dd4ec62..c931c4beaa86 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -349,6 +349,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "acm-pca-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -364,6 +370,13 @@ "deprecated" : true, "hostname" : "acm-pca-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2246,10 +2259,58 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com" + } } }, "autoscaling" : { @@ -3199,6 +3260,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -5802,6 +5864,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "ec2-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5822,6 +5890,13 @@ "deprecated" : true, "hostname" : "ec2-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -6409,6 +6484,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com", @@ -6541,6 +6622,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com" + }, "fips-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -11569,9 +11657,12 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -12765,8 +12856,10 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -13288,6 +13381,13 @@ } } }, + "private-networks" : { + "endpoints" : { + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "profile" : { "endpoints" : { "af-south-1" : { }, @@ -14141,16 +14241,76 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "rekognition" : { @@ -14341,72 +14501,26 @@ } }, "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.aws", - "variants" : [ { - "dnsSuffix" : "api.aws", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, "endpoints" : { - "ap-northeast-1" : { - "hostname" : "resource-explorer-2.ap-northeast-1.api.aws" - }, - "ap-northeast-2" : { - "hostname" : "resource-explorer-2.ap-northeast-2.api.aws" - }, - "ap-northeast-3" : { - "hostname" : "resource-explorer-2.ap-northeast-3.api.aws" - }, - "ap-south-1" : { - "hostname" : "resource-explorer-2.ap-south-1.api.aws" - }, - "ap-southeast-1" : { - "hostname" : "resource-explorer-2.ap-southeast-1.api.aws" - }, - "ap-southeast-2" : { - "hostname" : "resource-explorer-2.ap-southeast-2.api.aws" - }, - "ap-southeast-3" : { - "hostname" : "resource-explorer-2.ap-southeast-3.api.aws" - }, - "ca-central-1" : { - "hostname" : "resource-explorer-2.ca-central-1.api.aws" - }, - "eu-central-1" : { - "hostname" : "resource-explorer-2.eu-central-1.api.aws" - }, - "eu-north-1" : { - "hostname" : "resource-explorer-2.eu-north-1.api.aws" - }, - "eu-west-1" : { - "hostname" : "resource-explorer-2.eu-west-1.api.aws" - }, - "eu-west-2" : { - "hostname" : "resource-explorer-2.eu-west-2.api.aws" - }, - "eu-west-3" : { - "hostname" : "resource-explorer-2.eu-west-3.api.aws" - }, - "me-south-1" : { - "hostname" : "resource-explorer-2.me-south-1.api.aws" - }, - "sa-east-1" : { - "hostname" : "resource-explorer-2.sa-east-1.api.aws" - }, - "us-east-1" : { - "hostname" : "resource-explorer-2.us-east-1.api.aws" - }, - "us-east-2" : { - "hostname" : "resource-explorer-2.us-east-2.api.aws" - }, - "us-west-1" : { - "hostname" : "resource-explorer-2.us-west-1.api.aws" - }, - "us-west-2" : { - "hostname" : "resource-explorer-2.us-west-2.api.aws" - } + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } } }, "resource-groups" : { @@ -21044,24 +21158,6 @@ "cn-north-1" : { } } }, - "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.amazonwebservices.com.cn", - "variants" : [ { - "dnsSuffix" : "api.amazonwebservices.com.cn", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, - "endpoints" : { - "cn-north-1" : { - "hostname" : "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn" - }, - "cn-northwest-1" : { - "hostname" : "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn" - } - } - }, "resource-groups" : { "endpoints" : { "cn-north-1" : { }, @@ -23279,6 +23375,12 @@ "us-gov-west-1" : { } } }, + "emr-serverless" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "es" : { "endpoints" : { "fips" : { @@ -24962,24 +25064,6 @@ } } }, - "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.aws", - "variants" : [ { - "dnsSuffix" : "api.aws", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, - "endpoints" : { - "us-gov-east-1" : { - "hostname" : "resource-explorer-2.us-gov-east-1.api.aws" - }, - "us-gov-west-1" : { - "hostname" : "resource-explorer-2.us-gov-west-1.api.aws" - } - } - }, "resource-groups" : { "defaults" : { "variants" : [ { @@ -26312,6 +26396,16 @@ } } }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, "api.sagemaker" : { "endpoints" : { "us-iso-east-1" : { } @@ -26343,6 +26437,11 @@ "us-iso-west-1" : { } } }, + "arc-zonal-shift" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "athena" : { "endpoints" : { "us-iso-east-1" : { } @@ -27122,6 +27221,16 @@ } } }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, "api.sagemaker" : { "endpoints" : { "us-isob-east-1" : { } @@ -27145,6 +27254,11 @@ "us-isob-east-1" : { } } }, + "arc-zonal-shift" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index c752acf1f71f..497856f510ea 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkAdvancedAsyncClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkAdvancedAsyncClientOption.java index ab9d87026abc..fb2a1135eeba 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkAdvancedAsyncClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkAdvancedAsyncClientOption.java @@ -45,10 +45,12 @@ public final class SdkAdvancedAsyncClientOption extends ClientOption { *
  • You want more fine-grained control over the {@link ThreadPoolExecutor} used, such as configuring the pool size * or sharing a single pool between multiple clients. *
  • You want to add instrumentation (i.e., metrics) around how the {@link Executor} is used. - *
  • You know, for certain, that all of your {@link CompletableFuture} usage is strictly non-blocking, and you wish to - * remove the minor overhead incurred by using a separate thread. In this case, you can use - * {@code Runnable::run} to execute the future-completion directly from within the I/O thread. * + * WARNING + *

    + * We strongly discourage using {@code Runnable::run}, which executes the future-completion directly from + * within the I/O thread because it may block the I/O thread and cause deadlock, especially if you are sending + * another SDK request in the {@link CompletableFuture} chain since the SDK may perform blocking calls in some cases. */ public static final SdkAdvancedAsyncClientOption FUTURE_COMPLETION_EXECUTOR = new SdkAdvancedAsyncClientOption<>(Executor.class); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java index 084a293f6344..434894a44c8c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.async.SdkPublisher; -import software.amazon.awssdk.utils.async.InputStreamSubscriber; +import software.amazon.awssdk.http.async.AbortableInputStreamSubscriber; /** * A {@link AsyncResponseTransformer} that allows performing blocking reads on the response data. @@ -50,7 +50,7 @@ public void onResponse(ResponseT response) { @Override public void onStream(SdkPublisher publisher) { - InputStreamSubscriber inputStreamSubscriber = new InputStreamSubscriber(); + AbortableInputStreamSubscriber inputStreamSubscriber = AbortableInputStreamSubscriber.builder().build(); publisher.subscribe(inputStreamSubscriber); future.complete(new ResponseInputStream<>(response, inputStreamSubscriber)); } diff --git a/docs/design/core/migration-tool/DecisionLog.md b/docs/design/core/migration-tool/DecisionLog.md new file mode 100644 index 000000000000..68aeba2281ab --- /dev/null +++ b/docs/design/core/migration-tool/DecisionLog.md @@ -0,0 +1,31 @@ +# Decision Log for AWS SDK fro Java v2 Migration Tool + +## Log Entry Template + +**Source**: (Meeting/aside/pair programming discussion/daily standup) to (discuss/implement) X + +**Attendees**: Anirudh, Anna-Karin, David, Dongie, Debora, Olivier, Matt, Jason, John, Zoe + +**Closed Decisions:** + +1. Question? Decision. Justification + +**Open Decisions:** + +1. (Old/Reopened/new) Question? + +## 01/26/2024 + +**Source:** Daily standup and offline discussion to discuss where we should host the source code of the v2 migration tool + +**Attendees:** Anna-Karin, David, Debora, Olivier, Matt, Jason, John, Zoe + +**Closed Decisions:** + +1. Should we host the source code in the same aws-sdk-java-v2 repo? Yes, because 1) no extra release infrastructure is needed since it can be released as part of the SDK release 2) it's easier to write scripts to generate recipes, for example, we need to write script to retrieve service IDs for all services and current version. 3) it has better discoverability. The only disadvantage is that it will increase the scope of the repo and increase the build and release time slightly. The alternatives are: 1) setting up a new GitHub repo, which require us to set up and maintain new CICD pipeline. 2) hosting the code internally, which would be a bad customer experience since the code is not public and there is no place for users to raise questions/PRs. + +2. Should we publish the tool to Maven central instead of vending a JAR through S3? Yes, because most customers, if not all, prefer to consume the library from the package manager instead of a JAR. + +**Open Decisions:** + +None diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 0e2e4e6e9891..2e92a3daa58b 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface @@ -90,6 +90,16 @@ byte-buddy test + + org.mockito + mockito-junit-jupiter + test + + + org.mockito + mockito-inline + test + diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriber.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriber.java new file mode 100644 index 000000000000..630869825700 --- /dev/null +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriber.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.async; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.http.Abortable; +import software.amazon.awssdk.utils.FunctionalUtils; +import software.amazon.awssdk.utils.async.InputStreamSubscriber; + +/** + * Wrapper of {@link InputStreamSubscriber} that also implements {@link Abortable}. It will invoke {@link #close()} + * when {@link #abort()} is invoked. Upon closing, the underlying {@link InputStreamSubscriber} will be closed, and additional + * action can be added via {@link Builder#doAfterClose(Runnable)}. + * + */ +@SdkProtectedApi +public final class AbortableInputStreamSubscriber extends InputStream implements Subscriber, Abortable { + private final InputStreamSubscriber delegate; + + private final Runnable doAfterClose; + + private AbortableInputStreamSubscriber(Builder builder) { + this(builder, new InputStreamSubscriber()); + } + + @SdkTestInternalApi + AbortableInputStreamSubscriber(Builder builder, InputStreamSubscriber delegate) { + this.delegate = delegate; + this.doAfterClose = builder.doAfterClose == null ? FunctionalUtils.noOpRunnable() : builder.doAfterClose; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public void abort() { + close(); + } + + @Override + public int read() throws IOException { + return delegate.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return delegate.read(b, off, len); + } + + @Override + public int read(byte[] b) throws IOException { + return delegate.read(b); + } + + @Override + public void onSubscribe(Subscription s) { + delegate.onSubscribe(s); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + delegate.onNext(byteBuffer); + } + + @Override + public void onError(Throwable t) { + delegate.onError(t); + } + + @Override + public void onComplete() { + delegate.onComplete(); + } + + @Override + public void close() { + delegate.close(); + FunctionalUtils.invokeSafely(() -> doAfterClose.run()); + } + + public static final class Builder { + private Runnable doAfterClose; + + /** + * Additional action to run when {@link #close()} is invoked + */ + public Builder doAfterClose(Runnable doAfterClose) { + this.doAfterClose = doAfterClose; + return this; + } + + public AbortableInputStreamSubscriber build() { + return new AbortableInputStreamSubscriber(this); + } + } +} diff --git a/http-client-spi/src/test/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriberTest.java b/http-client-spi/src/test/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriberTest.java new file mode 100644 index 000000000000..dc3e49ff7205 --- /dev/null +++ b/http-client-spi/src/test/java/software/amazon/awssdk/http/async/AbortableInputStreamSubscriberTest.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.async; + +import static org.mockito.Mockito.verify; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import software.amazon.awssdk.utils.async.InputStreamSubscriber; + +@ExtendWith(MockitoExtension.class) +public class AbortableInputStreamSubscriberTest { + + private AbortableInputStreamSubscriber abortableInputStreamSubscriber; + + @Mock + private Runnable onClose; + + @Mock + private InputStreamSubscriber inputStreamSubscriber; + + @BeforeEach + void setUp() { + abortableInputStreamSubscriber = new AbortableInputStreamSubscriber(AbortableInputStreamSubscriber.builder() + .doAfterClose(onClose), + inputStreamSubscriber); + + + } + + @Test + void close_closeConfigured_shouldInvokeOnClose() { + abortableInputStreamSubscriber.close(); + verify(inputStreamSubscriber).close(); + verify(onClose).run(); + } + + @Test + void abort_shouldInvokeOnClose() { + abortableInputStreamSubscriber = new AbortableInputStreamSubscriber(AbortableInputStreamSubscriber.builder() + .doAfterClose(onClose), + inputStreamSubscriber); + abortableInputStreamSubscriber.abort(); + verify(onClose).run(); + } + + @Test + void close_closeNotConfigured_shouldCloseDelegate() { + abortableInputStreamSubscriber = new AbortableInputStreamSubscriber(AbortableInputStreamSubscriber.builder(), + inputStreamSubscriber); + abortableInputStreamSubscriber.close(); + verify(inputStreamSubscriber).close(); + } +} diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 4be1be53e231..ce7da3e007e4 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 34e32979d9ae..f52424475406 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/response/InputStreamAdaptingHttpStreamResponseHandler.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/response/InputStreamAdaptingHttpStreamResponseHandler.java index b6b95307722e..66568efc2b6f 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/response/InputStreamAdaptingHttpStreamResponseHandler.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/response/InputStreamAdaptingHttpStreamResponseHandler.java @@ -31,9 +31,9 @@ import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AbortableInputStreamSubscriber; import software.amazon.awssdk.http.crt.AwsCrtHttpClient; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.async.InputStreamSubscriber; import software.amazon.awssdk.utils.async.SimplePublisher; /** @@ -87,8 +87,10 @@ public void onResponseHeaders(HttpStream stream, int responseStatusCode, int blo @Override public int onResponseBody(HttpStream stream, byte[] bodyBytesIn) { if (inputStreamSubscriber == null) { - inputStreamSubscriber = new AbortableInputStreamSubscriber(() -> responseHandlerHelper.closeConnection(stream), - new InputStreamSubscriber()); + inputStreamSubscriber = + AbortableInputStreamSubscriber.builder() + .doAfterClose(() -> responseHandlerHelper.closeConnection(stream)) + .build(); simplePublisher.subscribe(inputStreamSubscriber); // For response with a payload, we need to complete the future here to allow downstream to retrieve the data from // the stream directly. diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index 63da6f308d20..5021ee9298e4 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java index 4d653a45f729..eb3ecd09eb3c 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java @@ -314,7 +314,7 @@ private void onCancel() { try { SdkCancellationException e = new SdkCancellationException( "Subscriber cancelled before all events were published"); - log.warn(channelContext.channel(), () -> "Subscriber cancelled before all events were published"); + log.debug(channelContext.channel(), () -> "Subscriber cancelled before all events were published"); executeFuture.completeExceptionally(e); } finally { runAndLogError(channelContext.channel(), () -> "Could not release channel back to the pool", diff --git a/http-clients/pom.xml b/http-clients/pom.xml index efee0251e32b..996151459f5b 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 296f9b7995cd..92ff42131946 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index d1480cc0fdd3..bf56192cabf9 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index 8a6c6b90a3fe..7d21a5b37424 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 552fc9f7f176..120c4566d4ec 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -88,6 +88,7 @@ test/ruleset-testing-core test/old-client-version-compatibility-test test/bundle-logging-bridge-binding-test + test/crt-unavailable-tests ${scm.github.url} @@ -95,7 +96,7 @@ ${project.version} - 2.23.20 + 2.24.9 2.15.2 2.15.2 2.13.2 diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 6fa5f28877d6..66387c3b37c3 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 7ca674323746..d95e9845e8a3 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java index 1721bc75f17d..fa0f69ad9ed3 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/Expression.java @@ -15,9 +15,13 @@ package software.amazon.awssdk.enhanced.dynamodb; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.Map; +import java.util.stream.Collectors; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; @@ -46,6 +50,9 @@ @SdkPublicApi @ThreadSafe public final class Expression { + public static final String AND = "AND"; + public static final String OR = "OR"; + private final String expression; private final Map expressionValues; private final Map expressionNames; @@ -67,7 +74,7 @@ public static Builder builder() { } /** - * Coalesces two complete expressions into a single expression. The expression string will be joined using the + * Coalesces two complete expressions into a single expression. The expression string will be joined using the * supplied join token, and the ExpressionNames and ExpressionValues maps will be merged. * @param expression1 The first expression to coalesce * @param expression2 The second expression to coalesce @@ -93,6 +100,61 @@ public static Expression join(Expression expression1, Expression expression2, St .build(); } + /** + * @see #join(String, Collection) + */ + public static Expression and(Collection expressions) { + return join(AND, expressions); + } + + /** + * @see #join(String, Collection) + */ + public static Expression or(Collection expressions) { + return join(OR, expressions); + } + + /** + * @see #join(String, Collection) + */ + public static Expression join(String joinToken, Expression... expressions) { + return join(joinToken, Arrays.asList(expressions)); + } + + /** + * Coalesces multiple complete expressions into a single expression. The expression string will be joined using the + * supplied join token, and the ExpressionNames and ExpressionValues maps will be merged. + * @param joinToken The join token to be used to join the expression strings (e.g.: 'AND', 'OR') + * @param expressions The expressions to coalesce + * @return The coalesced expression + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionNames or ExpressionValues + */ + public static Expression join(String joinToken, Collection expressions) { + joinToken = joinToken.trim(); + if (expressions.isEmpty()) { + return null; + } + + if (expressions.size() == 1) { + return expressions.toArray(new Expression[] {})[0]; + } + + joinToken = ") " + joinToken + " ("; + String expression = expressions.stream() + .map(Expression::expression) + .collect(Collectors.joining(joinToken, "(", ")")); + + Builder builder = Expression.builder() + .expression(expression); + + expressions.forEach(expr -> { + builder.mergeExpressionValues(expr.expressionValues()) + .mergeExpressionNames(expr.expressionNames()); + }); + + return builder.build(); + } + /** * Coalesces two expression strings into a single expression string. The expression string will be joined using the * supplied join token. @@ -198,6 +260,28 @@ public Expression and(Expression expression) { return join(this, expression, " AND "); } + /** + * Coalesces multiple complete expressions into a single expression joined by 'AND'. + * + * @see #join(String, Collection) + */ + public Expression and(Expression... expressions) { + LinkedList expressionList = new LinkedList<>(Arrays.asList(expressions)); + expressionList.addFirst(this); + return join(AND, expressionList); + } + + /** + * Coalesces multiple complete expressions into a single expression joined by 'OR'. + * + * @see #join(String, Collection) + */ + public Expression or(Expression... expressions) { + LinkedList expressionList = new LinkedList<>(Arrays.asList(expressions)); + expressionList.addFirst(this); + return join(OR, expressionList); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -255,6 +339,33 @@ public Builder expressionValues(Map expressionValues) { return this; } + /** + * Merge the given ExpressionValues into the builders existing ExpressionValues + * @param expressionValues The values to merge into the ExpressionValues map + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionValues + */ + public Builder mergeExpressionValues(Map expressionValues) { + if (this.expressionValues == null) { + return expressionValues(expressionValues); + } + + if (expressionValues == null) { + return this; + } + + expressionValues.forEach((key, value) -> { + AttributeValue oldValue = this.expressionValues.put(key, value); + + if (oldValue != null && !oldValue.equals(value)) { + throw new IllegalArgumentException( + String.format("Attempt to coalesce expressions with conflicting expression values. " + + "Expression value key = '%s'", key)); + } + }); + + return this; + } + /** * Adds a single element to the optional 'expression values' token map */ @@ -275,6 +386,33 @@ public Builder expressionNames(Map expressionNames) { return this; } + /** + * Merge the given ExpressionNames into the builders existing ExpressionNames + * @param expressionNames The values to merge into the ExpressionNames map + * @throws IllegalArgumentException if a conflict occurs when merging ExpressionNames + */ + public Builder mergeExpressionNames(Map expressionNames) { + if (this.expressionNames == null) { + return expressionNames(expressionNames); + } + + if (expressionNames == null) { + return this; + } + + expressionNames.forEach((key, value) -> { + String oldValue = this.expressionNames.put(key, value); + + if (oldValue != null && !oldValue.equals(value)) { + throw new IllegalArgumentException( + String.format("Attempt to coalesce expressions with conflicting expression names. " + + "Expression name key = '%s'", key)); + } + }); + + return this; + } + /** * Adds a single element to the optional 'expression names' token map */ diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java index c5c394e89bbb..aa18fe7f553b 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/ExpressionTest.java @@ -144,4 +144,242 @@ public void joinValues_conflictingKey() { exception.expectMessage("two"); Expression.joinValues(values1, values2); } + + @Test + public void join_correctlyJoins() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + Expression expression1 = Expression.builder() + .expression("one") + .expressionNames(names1) + .expressionValues(values1) + .build(); + + Map names2 = new HashMap<>(); + names2.put("two", "2"); + Map values2 = new HashMap<>(); + values2.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Expression expression2 = Expression.builder() + .expression("two") + .expressionNames(names2) + .expressionValues(values2) + .build(); + + Map names3 = new HashMap<>(); + names3.put("three", "3"); + Map values3 = new HashMap<>(); + values3.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + Expression expression3 = Expression.builder() + .expression("three") + .expressionNames(names3) + .expressionValues(values3) + .build(); + + Expression joinedExpression = Expression.join(Expression.AND, expression1, expression2, expression3); + + String expectedExpression = "(one) AND (two) AND (three)"; + assertThat(joinedExpression.expression(), is(expectedExpression)); + + final Map names = joinedExpression.expressionNames(); + assertThat(names.size(), is(3)); + assertThat(names, hasEntry("one", "1")); + assertThat(names, hasEntry("two", "2")); + assertThat(names, hasEntry("three", "3")); + + final Map values = joinedExpression.expressionValues(); + assertThat(values.size(), is(3)); + assertThat(values, hasEntry("one", AttributeValue.fromS("1"))); + assertThat(values, hasEntry("two", AttributeValue.fromS("2"))); + assertThat(values, hasEntry("three", AttributeValue.fromS("3"))); + } + + @Test + public void join_conflictingKey() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + Expression expression1 = Expression.builder() + .expression("one") + .expressionNames(names1) + .expressionValues(values1) + .build(); + + Map names2 = new HashMap<>(); + names2.put("one", "2"); + Map values2 = new HashMap<>(); + values2.put("one", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Expression expression2 = Expression.builder() + .expression("two") + .expressionNames(names2) + .expressionValues(values2) + .build(); + + Map names3 = new HashMap<>(); + names3.put("one", "3"); + Map values3 = new HashMap<>(); + values3.put("one", EnhancedAttributeValue.fromString("3").toAttributeValue()); + Expression expression3 = Expression.builder() + .expression("three") + .expressionNames(names3) + .expressionValues(values3) + .build(); + + exception.expect(IllegalArgumentException.class); + exception.expectMessage("on"); + Expression.join(Expression.AND, expression1, expression2, expression3); + } + + @Test + public void and_correctlyJoins() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + Expression expression1 = Expression.builder() + .expression("one") + .expressionNames(names1) + .expressionValues(values1) + .build(); + + Map names2 = new HashMap<>(); + names2.put("two", "2"); + Map values2 = new HashMap<>(); + values2.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Expression expression2 = Expression.builder() + .expression("two") + .expressionNames(names2) + .expressionValues(values2) + .build(); + + Map names3 = new HashMap<>(); + names3.put("three", "3"); + Map values3 = new HashMap<>(); + values3.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + Expression expression3 = Expression.builder() + .expression("three") + .expressionNames(names3) + .expressionValues(values3) + .build(); + + Expression joinedExpression = expression1.and(expression2, expression3); + + String expectedExpression = "(one) AND (two) AND (three)"; + assertThat(joinedExpression.expression(), is(expectedExpression)); + + final Map names = joinedExpression.expressionNames(); + assertThat(names.size(), is(3)); + assertThat(names, hasEntry("one", "1")); + assertThat(names, hasEntry("two", "2")); + assertThat(names, hasEntry("three", "3")); + + final Map values = joinedExpression.expressionValues(); + assertThat(values.size(), is(3)); + assertThat(values, hasEntry("one", AttributeValue.fromS("1"))); + assertThat(values, hasEntry("two", AttributeValue.fromS("2"))); + assertThat(values, hasEntry("three", AttributeValue.fromS("3"))); + } + + @Test + public void or_correctlyJoins() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + Expression expression1 = Expression.builder() + .expression("one") + .expressionNames(names1) + .expressionValues(values1) + .build(); + + Map names2 = new HashMap<>(); + names2.put("two", "2"); + Map values2 = new HashMap<>(); + values2.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Expression expression2 = Expression.builder() + .expression("two") + .expressionNames(names2) + .expressionValues(values2) + .build(); + + Map names3 = new HashMap<>(); + names3.put("three", "3"); + Map values3 = new HashMap<>(); + values3.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + Expression expression3 = Expression.builder() + .expression("three") + .expressionNames(names3) + .expressionValues(values3) + .build(); + + Expression joinedExpression = expression1.or(expression2, expression3); + + String expectedExpression = "(one) OR (two) OR (three)"; + assertThat(joinedExpression.expression(), is(expectedExpression)); + + final Map names = joinedExpression.expressionNames(); + assertThat(names.size(), is(3)); + assertThat(names, hasEntry("one", "1")); + assertThat(names, hasEntry("two", "2")); + assertThat(names, hasEntry("three", "3")); + + final Map values = joinedExpression.expressionValues(); + assertThat(values.size(), is(3)); + assertThat(values, hasEntry("one", AttributeValue.fromS("1"))); + assertThat(values, hasEntry("two", AttributeValue.fromS("2"))); + assertThat(values, hasEntry("three", AttributeValue.fromS("3"))); + } + + @Test + public void compounded_expressions_correctlyJoins() { + Map names1 = new HashMap<>(); + names1.put("one", "1"); + Map values1 = new HashMap<>(); + values1.put("one", EnhancedAttributeValue.fromString("1").toAttributeValue()); + Expression expression1 = Expression.builder() + .expression("one") + .expressionNames(names1) + .expressionValues(values1) + .build(); + + Map names2 = new HashMap<>(); + names2.put("two", "2"); + Map values2 = new HashMap<>(); + values2.put("two", EnhancedAttributeValue.fromString("2").toAttributeValue()); + Expression expression2 = Expression.builder() + .expression("two") + .expressionNames(names2) + .expressionValues(values2) + .build(); + + Map names3 = new HashMap<>(); + names3.put("three", "3"); + Map values3 = new HashMap<>(); + values3.put("three", EnhancedAttributeValue.fromString("3").toAttributeValue()); + Expression expression3 = Expression.builder() + .expression("three") + .expressionNames(names3) + .expressionValues(values3) + .build(); + + Expression joinedExpression = expression1.and(expression2.or(expression3)); + + String expectedExpression = "(one) AND ((two) OR (three))"; + assertThat(joinedExpression.expression(), is(expectedExpression)); + + final Map names = joinedExpression.expressionNames(); + assertThat(names.size(), is(3)); + assertThat(names, hasEntry("one", "1")); + assertThat(names, hasEntry("two", "2")); + assertThat(names, hasEntry("three", "3")); + + final Map values = joinedExpression.expressionValues(); + assertThat(values.size(), is(3)); + assertThat(values, hasEntry("one", AttributeValue.fromS("1"))); + assertThat(values, hasEntry("two", AttributeValue.fromS("2"))); + assertThat(values, hasEntry("three", AttributeValue.fromS("3"))); + } + } diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index de8255582120..8c4070368112 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 78aa612d93a2..1f9dba8e22a6 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 5c779323fafc..aae191666b8f 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java index 98f81b731014..eb45d1f7370d 100644 --- a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java +++ b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java @@ -68,6 +68,7 @@ public static void setUpForAllIntegTests() throws Exception { Log.initLoggingToStdout(Log.LogLevel.Warn); System.setProperty("aws.crt.debugnative", "true"); s3 = s3ClientBuilder().build(); + // TODO - enable multipart once TransferListener fixed for MultipartClient s3Async = s3AsyncClientBuilder().build(); s3CrtAsync = S3CrtAsyncClient.builder() .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) diff --git a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java index 23705c6bc5bf..e872080f2e32 100644 --- a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java +++ b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.transfer.s3; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; @@ -26,13 +25,17 @@ import java.time.Duration; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.stream.Stream; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.core.retry.backoff.FixedDelayBackoffStrategy; import software.amazon.awssdk.core.waiters.AsyncWaiter; import software.amazon.awssdk.core.waiters.Waiter; import software.amazon.awssdk.core.waiters.WaiterAcceptor; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; import software.amazon.awssdk.services.s3.model.ListPartsResponse; import software.amazon.awssdk.services.s3.model.NoSuchUploadException; @@ -48,17 +51,25 @@ public class S3TransferManagerUploadPauseResumeIntegrationTest extends S3Integra private static final String BUCKET = temporaryBucketName(S3TransferManagerUploadPauseResumeIntegrationTest.class); private static final String KEY = "key"; // 24 * MB is chosen to make sure we have data written in the file already upon pausing. - private static final long OBJ_SIZE = 24 * MB; + private static final long LARGE_OBJ_SIZE = 24 * MB; + private static final long SMALL_OBJ_SIZE = 2 * MB; private static File largeFile; private static File smallFile; private static ScheduledExecutorService executorService; + // TODO - switch to tmJava from TestBase once TransferListener fixed for MultipartClient + protected static S3TransferManager tmJavaMpu; + @BeforeAll public static void setup() throws Exception { createBucket(BUCKET); - largeFile = new RandomTempFile(OBJ_SIZE); - smallFile = new RandomTempFile(2 * MB); + largeFile = new RandomTempFile(LARGE_OBJ_SIZE); + smallFile = new RandomTempFile(SMALL_OBJ_SIZE); executorService = Executors.newScheduledThreadPool(3); + + // TODO - switch to tmJava from TestBase once TransferListener fixed for MultipartClient + S3AsyncClient s3AsyncMpu = s3AsyncClientBuilder().multipartEnabled(true).build(); + tmJavaMpu = S3TransferManager.builder().s3Client(s3AsyncMpu).build(); } @AfterAll @@ -69,30 +80,42 @@ public static void cleanup() { executorService.shutdown(); } - @Test - void pause_singlePart_shouldResume() { + private static Stream transferManagers() { + return Stream.of( + Arguments.of(tmJavaMpu, tmJavaMpu), + Arguments.of(tmCrt, tmCrt), + Arguments.of(tmCrt, tmJavaMpu), + Arguments.of(tmJavaMpu, tmCrt) + ); + } + + @ParameterizedTest + @MethodSource("transferManagers") + void pause_singlePart_shouldResume(S3TransferManager uploadTm, S3TransferManager resumeTm) { UploadFileRequest request = UploadFileRequest.builder() .putObjectRequest(b -> b.bucket(BUCKET).key(KEY)) .source(smallFile) .build(); - FileUpload fileUpload = tmCrt.uploadFile(request); + FileUpload fileUpload = uploadTm.uploadFile(request); ResumableFileUpload resumableFileUpload = fileUpload.pause(); log.debug(() -> "Paused: " + resumableFileUpload); validateEmptyResumeToken(resumableFileUpload); - FileUpload resumedUpload = tmCrt.resumeUploadFile(resumableFileUpload); + FileUpload resumedUpload = resumeTm.resumeUploadFile(resumableFileUpload); resumedUpload.completionFuture().join(); + assertThat(resumedUpload.progress().snapshot().totalBytes()).hasValue(SMALL_OBJ_SIZE); } - @Test - void pause_fileNotChanged_shouldResume() { + @ParameterizedTest + @MethodSource("transferManagers") + void pause_fileNotChanged_shouldResume(S3TransferManager uploadTm, S3TransferManager resumeTm) throws Exception { UploadFileRequest request = UploadFileRequest.builder() .putObjectRequest(b -> b.bucket(BUCKET).key(KEY)) .addTransferListener(LoggingTransferListener.create()) .source(largeFile) .build(); - FileUpload fileUpload = tmCrt.uploadFile(request); + FileUpload fileUpload = uploadTm.uploadFile(request); waitUntilMultipartUploadExists(); ResumableFileUpload resumableFileUpload = fileUpload.pause(); log.debug(() -> "Paused: " + resumableFileUpload); @@ -103,33 +126,37 @@ void pause_fileNotChanged_shouldResume() { verifyMultipartUploadIdExists(resumableFileUpload); - FileUpload resumedUpload = tmCrt.resumeUploadFile(resumableFileUpload); + FileUpload resumedUpload = resumeTm.resumeUploadFile(resumableFileUpload); resumedUpload.completionFuture().join(); + assertThat(resumedUpload.progress().snapshot().totalBytes()).hasValue(LARGE_OBJ_SIZE); } - @Test - void pauseImmediately_resume_shouldStartFromBeginning() { + @ParameterizedTest + @MethodSource("transferManagers") + void pauseImmediately_resume_shouldStartFromBeginning(S3TransferManager uploadTm, S3TransferManager resumeTm) { UploadFileRequest request = UploadFileRequest.builder() - .putObjectRequest(b -> b.bucket(BUCKET).key(KEY)) - .source(largeFile) - .build(); - FileUpload fileUpload = tmCrt.uploadFile(request); + .putObjectRequest(b -> b.bucket(BUCKET).key(KEY)) + .source(largeFile) + .build(); + FileUpload fileUpload = uploadTm.uploadFile(request); ResumableFileUpload resumableFileUpload = fileUpload.pause(); log.debug(() -> "Paused: " + resumableFileUpload); validateEmptyResumeToken(resumableFileUpload); - FileUpload resumedUpload = tmCrt.resumeUploadFile(resumableFileUpload); + FileUpload resumedUpload = resumeTm.resumeUploadFile(resumableFileUpload); resumedUpload.completionFuture().join(); + assertThat(resumedUpload.progress().snapshot().totalBytes()).hasValue(LARGE_OBJ_SIZE); } - @Test - void pause_fileChanged_resumeShouldStartFromBeginning() throws Exception { + @ParameterizedTest + @MethodSource("transferManagers") + void pause_fileChanged_resumeShouldStartFromBeginning(S3TransferManager uploadTm, S3TransferManager resumeTm) throws Exception { UploadFileRequest request = UploadFileRequest.builder() .putObjectRequest(b -> b.bucket(BUCKET).key(KEY)) .source(largeFile) .build(); - FileUpload fileUpload = tmCrt.uploadFile(request); + FileUpload fileUpload = uploadTm.uploadFile(request); waitUntilMultipartUploadExists(); ResumableFileUpload resumableFileUpload = fileUpload.pause(); log.debug(() -> "Paused: " + resumableFileUpload); @@ -139,13 +166,18 @@ void pause_fileChanged_resumeShouldStartFromBeginning() throws Exception { assertThat(resumableFileUpload.totalParts()).isNotEmpty(); verifyMultipartUploadIdExists(resumableFileUpload); - byte[] bytes = "helloworld".getBytes(StandardCharsets.UTF_8); - Files.write(largeFile.toPath(), bytes); - - FileUpload resumedUpload = tmCrt.resumeUploadFile(resumableFileUpload); - resumedUpload.completionFuture().join(); - verifyMultipartUploadIdNotExist(resumableFileUpload); - assertThat(resumedUpload.progress().snapshot().totalBytes()).hasValue(bytes.length); + byte[] originalBytes = Files.readAllBytes(largeFile.toPath()); + try { + byte[] bytes = "helloworld".getBytes(StandardCharsets.UTF_8); + Files.write(largeFile.toPath(), bytes); + + FileUpload resumedUpload = resumeTm.resumeUploadFile(resumableFileUpload); + resumedUpload.completionFuture().join(); + verifyMultipartUploadIdNotExist(resumableFileUpload); + assertThat(resumedUpload.progress().snapshot().totalBytes()).hasValue(bytes.length); + } finally { + Files.write(largeFile.toPath(), originalBytes); + } } private void verifyMultipartUploadIdExists(ResumableFileUpload resumableFileUpload) { diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java index eef9205be1c7..71ebeef56e62 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java @@ -20,7 +20,6 @@ import static software.amazon.awssdk.services.s3.crt.S3CrtSdkHttpExecutionAttribute.METAREQUEST_PAUSE_OBSERVABLE; import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.CRT_PAUSE_RESUME_TOKEN; import static software.amazon.awssdk.transfer.s3.internal.GenericS3TransferManager.assertNotUnsupportedArn; -import static software.amazon.awssdk.transfer.s3.internal.utils.FileUtils.fileNotModified; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; @@ -31,7 +30,6 @@ import software.amazon.awssdk.http.SdkHttpExecutionAttributes; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.transfer.s3.S3TransferManager; @@ -51,6 +49,7 @@ @SdkInternalApi class CrtS3TransferManager extends DelegatingS3TransferManager { private static final Logger log = Logger.loggerFor(S3TransferManager.class); + private static final PauseResumeHelper PAUSE_RESUME_HELPER = new PauseResumeHelper(); private final S3AsyncClient s3AsyncClient; CrtS3TransferManager(TransferManagerConfiguration transferConfiguration, S3AsyncClient s3AsyncClient, @@ -99,67 +98,15 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { return new CrtFileUpload(returnFuture, progressUpdater.progress(), observable, uploadFileRequest); } - private FileUpload uploadFromBeginning(ResumableFileUpload resumableFileUpload, boolean fileModified, - boolean noResumeToken) { - UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); - PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); - if (fileModified) { - log.debug(() -> String.format("The file (%s) has been modified since " - + "the last pause. " + - "The SDK will upload the requested object in bucket" - + " (%s) with key (%s) from " - + "the " - + "beginning.", - uploadFileRequest.source(), - putObjectRequest.bucket(), - putObjectRequest.key())); - resumableFileUpload.multipartUploadId() - .ifPresent(id -> { - log.debug(() -> "Aborting previous upload with multipartUploadId: " + id); - s3AsyncClient.abortMultipartUpload( - AbortMultipartUploadRequest.builder() - .bucket(putObjectRequest.bucket()) - .key(putObjectRequest.key()) - .uploadId(id) - .build()) - .exceptionally(t -> { - log.warn(() -> String.format("Failed to abort previous multipart upload " - + "(id: %s)" - + ". You may need to call " - + "S3AsyncClient#abortMultiPartUpload to " - + "free all storage consumed by" - + " all parts. ", - id), t); - return null; - }); - }); - } - - if (noResumeToken) { - log.debug(() -> String.format("No resume token is found. " + - "The SDK will upload the requested object in bucket" - + " (%s) with key (%s) from " - + "the beginning.", - putObjectRequest.bucket(), - putObjectRequest.key())); - } - - - return uploadFile(uploadFileRequest); - } - @Override public FileUpload resumeUploadFile(ResumableFileUpload resumableFileUpload) { Validate.paramNotNull(resumableFileUpload, "resumableFileUpload"); - boolean fileModified = !fileNotModified(resumableFileUpload.fileLength(), - resumableFileUpload.fileLastModified(), - resumableFileUpload.uploadFileRequest().source()); - - boolean noResumeToken = !hasResumeToken(resumableFileUpload); + boolean fileModified = PAUSE_RESUME_HELPER.fileModified(resumableFileUpload, s3AsyncClient); + boolean noResumeToken = !PAUSE_RESUME_HELPER.hasResumeToken(resumableFileUpload); if (fileModified || noResumeToken) { - return uploadFromBeginning(resumableFileUpload, fileModified, noResumeToken); + return uploadFile(resumableFileUpload.uploadFileRequest()); } return doResumeUpload(resumableFileUpload); @@ -188,10 +135,6 @@ private static ResumeToken crtResumeToken(ResumableFileUpload resumableFileUploa .withUploadId(resumableFileUpload.multipartUploadId().orElse(null))); } - private boolean hasResumeToken(ResumableFileUpload resumableFileUpload) { - return resumableFileUpload.totalParts().isPresent() && resumableFileUpload.partSizeInBytes().isPresent(); - } - private PutObjectRequest attachSdkAttribute(PutObjectRequest putObjectRequest, Consumer builderMutation) { SdkHttpExecutionAttributes modifiedAttributes = diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java index b06d0824b709..ba10ac39e79e 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java @@ -15,21 +15,27 @@ package software.amazon.awssdk.transfer.s3.internal; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.PAUSE_OBSERVABLE; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.RESUME_TOKEN; import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; import static software.amazon.awssdk.transfer.s3.internal.utils.ResumableRequestConverter.toDownloadFileRequestAndTransformer; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; +import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.FileTransformerConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; +import software.amazon.awssdk.services.s3.DelegatingS3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.multipart.MultipartS3AsyncClient; import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; import software.amazon.awssdk.services.s3.internal.resource.S3ArnConverter; import software.amazon.awssdk.services.s3.internal.resource.S3Resource; @@ -39,6 +45,8 @@ import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.multipart.PauseObservable; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.internal.model.DefaultCopy; import software.amazon.awssdk.transfer.s3.internal.model.DefaultDirectoryDownload; @@ -65,6 +73,7 @@ import software.amazon.awssdk.transfer.s3.model.FileDownload; import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.Upload; import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; @@ -80,6 +89,7 @@ class GenericS3TransferManager implements S3TransferManager { protected static final int DEFAULT_FILE_UPLOAD_CHUNK_SIZE = (int) (16 * MB); private static final Logger log = Logger.loggerFor(S3TransferManager.class); + private static final PauseResumeHelper PAUSE_RESUME_HELPER = new PauseResumeHelper(); private final S3AsyncClient s3AsyncClient; private final UploadDirectoryHelper uploadDirectoryHelper; private final DownloadDirectoryHelper downloadDirectoryHelper; @@ -101,11 +111,11 @@ class GenericS3TransferManager implements S3TransferManager { } @SdkTestInternalApi - GenericS3TransferManager(S3AsyncClient s3CrtAsyncClient, + GenericS3TransferManager(S3AsyncClient s3AsyncClient, UploadDirectoryHelper uploadDirectoryHelper, TransferManagerConfiguration configuration, DownloadDirectoryHelper downloadDirectoryHelper) { - this.s3AsyncClient = s3CrtAsyncClient; + this.s3AsyncClient = s3AsyncClient; this.isDefaultS3AsyncClient = false; this.transferConfiguration = configuration; this.uploadDirectoryHelper = uploadDirectoryHelper; @@ -129,13 +139,13 @@ public Upload upload(UploadRequest uploadRequest) { try { assertNotUnsupportedArn(uploadRequest.putObjectRequest().bucket(), "upload"); - CompletableFuture crtFuture = + CompletableFuture future = s3AsyncClient.putObject(uploadRequest.putObjectRequest(), requestBody); - // Forward upload cancellation to CRT future - CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + // Forward upload cancellation to future + CompletableFutureUtils.forwardExceptionTo(returnFuture, future); - CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + CompletableFutureUtils.forwardTransformedResultTo(future, returnFuture, r -> CompletedUpload.builder() .response(r) .build()); @@ -157,6 +167,15 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { .build(); PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + PauseObservable pauseObservable; + if (isS3ClientMultipartEnabled()) { + pauseObservable = new PauseObservable(); + Consumer attachPauseObservable = + b -> b.putExecutionAttribute(PAUSE_OBSERVABLE, pauseObservable); + putObjectRequest = attachSdkAttribute(uploadFileRequest.putObjectRequest(), attachPauseObservable); + } else { + pauseObservable = null; + } CompletableFuture returnFuture = new CompletableFuture<>(); @@ -182,8 +201,72 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); } + return new DefaultFileUpload(returnFuture, progressUpdater.progress(), pauseObservable, uploadFileRequest); + } + + @Override + public FileUpload resumeUploadFile(ResumableFileUpload resumableFileUpload) { + Validate.paramNotNull(resumableFileUpload, "resumableFileUpload"); + + boolean fileModified = PAUSE_RESUME_HELPER.fileModified(resumableFileUpload, s3AsyncClient); + boolean noResumeToken = !PAUSE_RESUME_HELPER.hasResumeToken(resumableFileUpload); + + if (fileModified || noResumeToken) { + return uploadFile(resumableFileUpload.uploadFileRequest()); + } + + return doResumeUpload(resumableFileUpload); + } + + private boolean isS3ClientMultipartEnabled() { + // TODO use configuration getter when available + return s3AsyncClient instanceof MultipartS3AsyncClient; + } + + private FileUpload doResumeUpload(ResumableFileUpload resumableFileUpload) { + UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + S3ResumeToken s3ResumeToken = s3ResumeToken(resumableFileUpload); + + Consumer attachResumeToken = + b -> b.putExecutionAttribute(RESUME_TOKEN, s3ResumeToken); + + PutObjectRequest modifiedPutObjectRequest = attachSdkAttribute(putObjectRequest, attachResumeToken); + + return uploadFile(uploadFileRequest.toBuilder() + .putObjectRequest(modifiedPutObjectRequest) + .build()); + } + + private static S3ResumeToken s3ResumeToken(ResumableFileUpload resumableFileUpload) { + S3ResumeToken.Builder builder = S3ResumeToken.builder(); + + builder.uploadId(resumableFileUpload.multipartUploadId().orElse(null)); + if (resumableFileUpload.partSizeInBytes().isPresent()) { + builder.partSize(resumableFileUpload.partSizeInBytes().getAsLong()); + } + if (resumableFileUpload.totalParts().isPresent()) { + builder.totalNumParts(resumableFileUpload.totalParts().getAsLong()); + } + if (resumableFileUpload.transferredParts().isPresent()) { + builder.numPartsCompleted(resumableFileUpload.transferredParts().getAsLong()); + } + + return builder.build(); + } - return new DefaultFileUpload(returnFuture, progressUpdater.progress(), uploadFileRequest); + private PutObjectRequest attachSdkAttribute(PutObjectRequest putObjectRequest, + Consumer builderMutation) { + AwsRequestOverrideConfiguration modifiedRequestOverrideConfig = + putObjectRequest.overrideConfiguration() + .map(o -> o.toBuilder().applyMutation(builderMutation).build()) + .orElseGet(() -> AwsRequestOverrideConfiguration.builder() + .applyMutation(builderMutation) + .build()); + + return putObjectRequest.toBuilder() + .overrideConfiguration(modifiedRequestOverrideConfig) + .build(); } @Override @@ -216,13 +299,12 @@ public Download download(DownloadRequest downloadReq try { assertNotUnsupportedArn(downloadRequest.getObjectRequest().bucket(), "download"); - CompletableFuture crtFuture = - s3AsyncClient.getObject(downloadRequest.getObjectRequest(), responseTransformer); + CompletableFuture future = doGetObject(downloadRequest.getObjectRequest(), responseTransformer); - // Forward download cancellation to CRT future - CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + // Forward download cancellation to future + CompletableFutureUtils.forwardExceptionTo(returnFuture, future); - CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + CompletableFutureUtils.forwardTransformedResultTo(future, returnFuture, r -> CompletedDownload.builder() .result(r) .build()); @@ -259,14 +341,12 @@ private TransferProgressUpdater doDownloadFile( assertNotUnsupportedArn(downloadRequest.getObjectRequest().bucket(), "download"); - CompletableFuture crtFuture = - s3AsyncClient.getObject(downloadRequest.getObjectRequest(), - responseTransformer); + CompletableFuture future = doGetObject(downloadRequest.getObjectRequest(), responseTransformer); - // Forward download cancellation to CRT future - CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + // Forward download cancellation to future + CompletableFutureUtils.forwardExceptionTo(returnFuture, future); - CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + CompletableFutureUtils.forwardTransformedResultTo(future, returnFuture, res -> CompletedFileDownload.builder() .response(res) .build()); @@ -368,13 +448,13 @@ public Copy copy(CopyRequest copyRequest) { assertNotUnsupportedArn(copyRequest.copyObjectRequest().sourceBucket(), "copy sourceBucket"); assertNotUnsupportedArn(copyRequest.copyObjectRequest().destinationBucket(), "copy destinationBucket"); - CompletableFuture crtFuture = + CompletableFuture future = s3AsyncClient.copyObject(copyRequest.copyObjectRequest()); - // Forward transfer cancellation to CRT future - CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + // Forward transfer cancellation to future + CompletableFutureUtils.forwardExceptionTo(returnFuture, future); - CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + CompletableFutureUtils.forwardTransformedResultTo(future, returnFuture, r -> CompletedCopy.builder() .response(r) .build()); @@ -429,4 +509,14 @@ private static boolean isMrapArn(Arn arn) { return !s3EndpointResource.region().isPresent(); } + + // TODO remove once MultipartS3AsyncClient is complete + private CompletableFuture doGetObject( + GetObjectRequest getObjectRequest, AsyncResponseTransformer asyncResponseTransformer) { + S3AsyncClient clientToUse = s3AsyncClient; + if (s3AsyncClient instanceof MultipartS3AsyncClient) { + clientToUse = (S3AsyncClient) ((DelegatingS3AsyncClient) s3AsyncClient).delegate(); + } + return clientToUse.getObject(getObjectRequest, asyncResponseTransformer); + } } diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/PauseResumeHelper.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/PauseResumeHelper.java new file mode 100644 index 000000000000..9c5220f388e4 --- /dev/null +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/PauseResumeHelper.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import static software.amazon.awssdk.transfer.s3.internal.utils.FileUtils.fileNotModified; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; +import software.amazon.awssdk.utils.Logger; + +@SdkInternalApi +public class PauseResumeHelper { + private static final Logger log = Logger.loggerFor(PauseResumeHelper.class); + + protected boolean fileModified(ResumableFileUpload resumableFileUpload, S3AsyncClient s3AsyncClient) { + boolean fileModified = !fileNotModified(resumableFileUpload.fileLength(), + resumableFileUpload.fileLastModified(), + resumableFileUpload.uploadFileRequest().source()); + + if (fileModified) { + UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + log.debug(() -> String.format("The file (%s) has been modified since " + + "the last pause. " + + "The SDK will upload the requested object in bucket" + + " (%s) with key (%s) from " + + "the " + + "beginning.", + uploadFileRequest.source(), + putObjectRequest.bucket(), + putObjectRequest.key())); + resumableFileUpload.multipartUploadId() + .ifPresent(id -> { + log.debug(() -> "Aborting previous upload with multipartUploadId: " + id); + s3AsyncClient.abortMultipartUpload( + AbortMultipartUploadRequest.builder() + .bucket(putObjectRequest.bucket()) + .key(putObjectRequest.key()) + .uploadId(id) + .build()) + .exceptionally(t -> { + log.warn(() -> String.format("Failed to abort previous multipart upload " + + "(id: %s)" + + ". You may need to call " + + "S3AsyncClient#abortMultiPartUpload to " + + "free all storage consumed by" + + " all parts. ", + id), t); + return null; + }); + }); + } + + return fileModified; + } + + protected boolean hasResumeToken(ResumableFileUpload resumableFileUpload) { + boolean hasResumeToken = + resumableFileUpload.totalParts().isPresent() && resumableFileUpload.partSizeInBytes().isPresent(); + + if (!hasResumeToken) { + UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + log.debug(() -> String.format("No resume token is found. " + + "The SDK will upload the requested object in bucket" + + " (%s) with key (%s) from " + + "the beginning.", + putObjectRequest.bucket(), + putObjectRequest.key())); + } + + return hasResumeToken; + } +} diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java index 4f7a4a757c2c..790fb0d2ba60 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java @@ -148,7 +148,7 @@ public int hashCode() { @Override public String toString() { - return ToString.builder("DefaultFileUpload") + return ToString.builder("CrtFileUpload") .add("completionFuture", completionFuture) .add("progress", progress) .add("request", request) diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java index 1579c64dbdf1..66647d27cd61 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java @@ -15,35 +15,77 @@ package software.amazon.awssdk.transfer.s3.internal.model; +import java.io.File; +import java.time.Instant; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.multipart.PauseObservable; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.progress.TransferProgress; +import software.amazon.awssdk.utils.Lazy; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; @SdkInternalApi public final class DefaultFileUpload implements FileUpload { + private final Lazy resumableFileUpload; private final CompletableFuture completionFuture; private final TransferProgress progress; private final UploadFileRequest request; + private final PauseObservable pauseObservable; public DefaultFileUpload(CompletableFuture completionFuture, TransferProgress progress, + PauseObservable pauseObservable, UploadFileRequest request) { this.completionFuture = Validate.paramNotNull(completionFuture, "completionFuture"); this.progress = Validate.paramNotNull(progress, "progress"); this.request = Validate.paramNotNull(request, "request"); + this.pauseObservable = pauseObservable; + this.resumableFileUpload = new Lazy<>(this::doPause); } @Override public ResumableFileUpload pause() { - throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For " - + "upload pause support, pass an AWS CRT-based S3 client to S3TransferManager" - + "instead: S3AsyncClient.crtBuilder().build();"); + if (pauseObservable == null) { + throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3Client that does " + + "not have multipart configuration enabled. For upload pause support, pass " + + "a CRT-based S3Client or an S3Client with multipart enabled to " + + "S3TransferManager."); + } + + return resumableFileUpload.getValue(); + } + + private ResumableFileUpload doPause() { + File sourceFile = request.source().toFile(); + Instant fileLastModified = Instant.ofEpochMilli(sourceFile.lastModified()); + + ResumableFileUpload.Builder resumableFileBuilder = ResumableFileUpload.builder() + .fileLastModified(fileLastModified) + .fileLength(sourceFile.length()) + .uploadFileRequest(request); + + if (completionFuture.isDone()) { + return resumableFileBuilder.build(); + } + + S3ResumeToken token = pauseObservable.pause(); + + // Upload hasn't started yet, or it's a single object upload + if (token == null) { + return resumableFileBuilder.build(); + } + + return resumableFileBuilder.multipartUploadId(token.uploadId()) + .totalParts(token.totalNumParts()) + .transferredParts(token.numPartsCompleted()) + .partSizeInBytes(token.partSize()) + .build(); } @Override @@ -67,20 +109,28 @@ public boolean equals(Object o) { DefaultFileUpload that = (DefaultFileUpload) o; + if (!resumableFileUpload.equals(that.resumableFileUpload)) { + return false; + } if (!completionFuture.equals(that.completionFuture)) { return false; } if (!progress.equals(that.progress)) { return false; } - return request.equals(that.request); + if (!request.equals(that.request)) { + return false; + } + return pauseObservable == that.pauseObservable; } @Override public int hashCode() { - int result = completionFuture.hashCode(); + int result = resumableFileUpload.hashCode(); + result = 31 * result + completionFuture.hashCode(); result = 31 * result + progress.hashCode(); result = 31 * result + request.hashCode(); + result = 31 * result + pauseObservable.hashCode(); return result; } diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/model/FileUpload.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/model/FileUpload.java index 28486d76bd0b..90e99f2829f0 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/model/FileUpload.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/model/FileUpload.java @@ -18,6 +18,8 @@ import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; import software.amazon.awssdk.transfer.s3.S3TransferManager; /** @@ -32,12 +34,15 @@ public interface FileUpload extends ObjectTransfer { *

    * The information object is serializable for persistent storage until it should be resumed. * See {@link ResumableFileUpload} for supported formats. - * + * *

    * Currently, it's only supported if the underlying {@link S3AsyncClient} is CRT-based (created via - * {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()}). + * {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()}), OR the underlying + * {@link S3AsyncClient} has multipart enabled ({@link S3AsyncClientBuilder#multipartConfiguration(MultipartConfiguration)} + * or {@link S3AsyncClientBuilder#multipartEnabled(Boolean)}). * It will throw {@link UnsupportedOperationException} if the {@link S3TransferManager} is created - * with a non CRT-based S3 client (created via {@link S3AsyncClient#builder()}). + * with a non CRT-based S3 client (created via {@link S3AsyncClient#builder()}) and does not have + * multipart configuration enabled. * * @return A {@link ResumableFileUpload} that can be used to resume the upload. */ diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java index 9aecc1c05f29..2235da4b5660 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java @@ -40,6 +40,7 @@ import software.amazon.awssdk.crt.s3.ResumeToken; import software.amazon.awssdk.crt.s3.S3MetaRequest; import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; +import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestWrapper; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.transfer.s3.internal.model.CrtFileUpload; import software.amazon.awssdk.transfer.s3.internal.progress.DefaultTransferProgressSnapshot; @@ -53,7 +54,7 @@ class CrtFileUploadTest { private static final int NUM_OF_PARTS_COMPLETED = 5; private static final long PART_SIZE_IN_BYTES = 8 * MB; private static final String MULTIPART_UPLOAD_ID = "someId"; - private S3MetaRequest metaRequest; + private S3MetaRequestPauseObservable observable; private static FileSystem fileSystem; private static File file; private static ResumeToken token; @@ -77,7 +78,7 @@ public static void tearDown() throws IOException { @BeforeEach void setUpBeforeEachTest() { - metaRequest = Mockito.mock(S3MetaRequest.class); + observable = Mockito.mock(S3MetaRequestPauseObservable.class); } @Test @@ -102,17 +103,13 @@ void pause_futureCompleted_shouldReturnNormally() { .sdkResponse(putObjectResponse) .transferredBytes(0L) .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - UploadFileRequest request = uploadFileRequest(); CrtFileUpload fileUpload = new CrtFileUpload(future, transferProgress, observable, request); - observable.subscribe(metaRequest); - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest, Mockito.never()).pause(); + Mockito.verify(observable, Mockito.never()).pause(); assertThat(resumableFileUpload.totalParts()).isEmpty(); assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); @@ -130,10 +127,7 @@ void pauseTwice_shouldReturnTheSame() { .transferredBytes(1000L) .build()); UploadFileRequest request = uploadFileRequest(); - - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenReturn(token); - observable.subscribe(metaRequest); + when(observable.pause()).thenReturn(token); CrtFileUpload fileUpload = new CrtFileUpload(future, transferProgress, observable, request); @@ -154,10 +148,8 @@ void pause_crtThrowException_shouldPropogate() { .build()); UploadFileRequest request = uploadFileRequest(); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); CrtRuntimeException exception = new CrtRuntimeException("exception"); - when(metaRequest.pause()).thenThrow(exception); - observable.subscribe(metaRequest); + when(observable.pause()).thenThrow(exception); CrtFileUpload fileUpload = new CrtFileUpload(future, transferProgress, observable, request); @@ -173,17 +165,14 @@ void pause_futureNotComplete_shouldPause() { when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() .transferredBytes(0L) .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenReturn(token); + when(observable.pause()).thenReturn(token); UploadFileRequest request = uploadFileRequest(); CrtFileUpload fileUpload = new CrtFileUpload(future, transferProgress, observable, request); - observable.subscribe(metaRequest); - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest).pause(); + Mockito.verify(observable).pause(); assertThat(resumableFileUpload.totalParts()).hasValue(TOTAL_PARTS); assertThat(resumableFileUpload.partSizeInBytes()).hasValue(PART_SIZE_IN_BYTES); assertThat(resumableFileUpload.multipartUploadId()).hasValue(MULTIPART_UPLOAD_ID); @@ -204,17 +193,14 @@ void pause_singlePart_shouldPause() { .sdkResponse(putObjectResponse) .transferredBytes(0L) .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenThrow(new CrtRuntimeException(6)); + when(observable.pause()).thenThrow(new CrtRuntimeException(6)); UploadFileRequest request = uploadFileRequest(); CrtFileUpload fileUpload = new CrtFileUpload(future, transferProgress, observable, request); - observable.subscribe(metaRequest); - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest).pause(); + Mockito.verify(observable).pause(); assertThat(resumableFileUpload.totalParts()).isEmpty(); assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java index 539433734920..f7e523d9355f 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java @@ -15,38 +15,179 @@ package software.amazon.awssdk.transfer.s3.internal; -import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; -import java.nio.file.Paths; +import com.google.common.jimfs.Jimfs; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.time.Instant; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.mockito.Mockito; -import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.services.s3.internal.multipart.PausableUpload; +import software.amazon.awssdk.services.s3.multipart.PauseObservable; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.transfer.s3.internal.model.DefaultFileUpload; -import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.internal.progress.DefaultTransferProgressSnapshot; +import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.progress.TransferProgress; class DefaultFileUploadTest { + private static final long TOTAL_PARTS = 10; + private static final long NUM_OF_PARTS_COMPLETED = 5; + private static final long PART_SIZE_IN_BYTES = 8 * MB; + private static final String MULTIPART_UPLOAD_ID = "someId"; + private static FileSystem fileSystem; + private static File file; + private static S3ResumeToken token; + + @BeforeAll + public static void setUp() throws IOException { + fileSystem = Jimfs.newFileSystem(); + file = File.createTempFile("test", UUID.randomUUID().toString()); + Files.write(file.toPath(), RandomStringUtils.random(2000).getBytes(StandardCharsets.UTF_8)); + token = S3ResumeToken.builder() + .uploadId(MULTIPART_UPLOAD_ID) + .totalNumParts(TOTAL_PARTS) + .numPartsCompleted(NUM_OF_PARTS_COMPLETED) + .partSize(PART_SIZE_IN_BYTES) + .build(); + } + + @AfterAll + public static void tearDown() throws IOException { + file.delete(); + } + @Test void equals_hashcode() { EqualsVerifier.forClass(DefaultFileUpload.class) - .withNonnullFields("completionFuture", "progress", "request") + .withNonnullFields("completionFuture", "progress", "request", "resumableFileUpload", "pauseObservable") + .withPrefabValues(PauseObservable.class, new PauseObservable(), new PauseObservable()) .verify(); } + private S3ResumeToken s3ResumeToken(CompletableFuture future) { + if (future.isDone()) { + return null; + } + return token; + } + @Test - void pause_shouldThrowUnsupportedOperation() { - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - UploadFileRequest request = UploadFileRequest.builder() - .source(Paths.get("test")) - .putObjectRequest(p -> p.key("test").bucket("bucket")) - .build(); - FileUpload fileUpload = new DefaultFileUpload(new CompletableFuture<>(), - transferProgress, - request); - - assertThatThrownBy(() -> fileUpload.pause()).isInstanceOf(UnsupportedOperationException.class); + void pause_futureCompleted_shouldReturnNormally() { + CompletableFuture future = + CompletableFuture.completedFuture(CompletedFileUpload.builder() + .response(PutObjectResponse.builder().build()) + .build()); + TransferProgress transferProgress = mock(TransferProgress.class); + PauseObservable observable = new PauseObservable(); + PausableUpload pausableUpload = mock(PausableUpload.class); + observable.setPausableUpload(pausableUpload); + when(pausableUpload.pause()).thenReturn(s3ResumeToken(future)); + + UploadFileRequest request = uploadFileRequest(); + DefaultFileUpload fileUpload = new DefaultFileUpload(future, transferProgress, observable, request); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + + verify(pausableUpload, Mockito.never()).pause(); + assertThat(resumableFileUpload.totalParts()).isEmpty(); + assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); + assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + + @Test + void pauseTwice_shouldReturnTheSame() { + CompletableFuture future = new CompletableFuture<>(); + TransferProgress transferProgress = mock(TransferProgress.class); + PauseObservable observable = new PauseObservable(); + PausableUpload pausableUpload = mock(PausableUpload.class); + observable.setPausableUpload(pausableUpload); + when(pausableUpload.pause()).thenReturn(s3ResumeToken(future)); + + UploadFileRequest request = uploadFileRequest(); + DefaultFileUpload fileUpload = new DefaultFileUpload(future, transferProgress, observable, request); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + ResumableFileUpload resumableFileUpload2 = fileUpload.pause(); + + verify(pausableUpload).pause(); + assertThat(resumableFileUpload).isEqualTo(resumableFileUpload2); + } + + @Test + void pause_futureNotComplete_shouldPause() { + CompletableFuture future = new CompletableFuture<>(); + TransferProgress transferProgress = mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .transferredBytes(0L) + .build()); + + + PauseObservable observable = new PauseObservable(); + PausableUpload pausableUpload = mock(PausableUpload.class); + observable.setPausableUpload(pausableUpload); + when(pausableUpload.pause()).thenReturn(s3ResumeToken(future)); + + UploadFileRequest request = uploadFileRequest(); + DefaultFileUpload fileUpload = new DefaultFileUpload(future, transferProgress, observable, request); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + + verify(pausableUpload).pause(); + assertThat(resumableFileUpload.totalParts()).hasValue(TOTAL_PARTS); + assertThat(resumableFileUpload.partSizeInBytes()).hasValue(PART_SIZE_IN_BYTES); + assertThat(resumableFileUpload.multipartUploadId()).hasValue(MULTIPART_UPLOAD_ID); + assertThat(resumableFileUpload.transferredParts()).hasValue(NUM_OF_PARTS_COMPLETED); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + @Test + void pause_singlePart_shouldReturnNullResumeToken() { + CompletableFuture future = new CompletableFuture<>(); + TransferProgress transferProgress = mock(TransferProgress.class); + + PauseObservable observable = new PauseObservable(); + observable.setPausableUpload(null); + + UploadFileRequest request = uploadFileRequest(); + DefaultFileUpload fileUpload = new DefaultFileUpload(future, transferProgress, observable, request); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + assertThat(resumableFileUpload.totalParts()).isEmpty(); + assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); + assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + private UploadFileRequest uploadFileRequest() { + return UploadFileRequest.builder() + .source(file) + .putObjectRequest(p -> p.key("test").bucket("bucket")) + .build(); } } \ No newline at end of file diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/MultipartDownloadJavaBasedTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/MultipartDownloadJavaBasedTest.java new file mode 100644 index 000000000000..1b5c1063239f --- /dev/null +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/MultipartDownloadJavaBasedTest.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.nio.file.Paths; +import java.util.concurrent.CompletableFuture; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.multipart.MultipartS3AsyncClient; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.transfer.s3.model.CompletedFileDownload; + +class MultipartDownloadJavaBasedTest { + private S3AsyncClient mockDelegate; + private MultipartS3AsyncClient s3Multi; + private S3TransferManager tm; + private UploadDirectoryHelper uploadDirectoryHelper; + private DownloadDirectoryHelper downloadDirectoryHelper; + private TransferManagerConfiguration configuration; + + @BeforeEach + public void methodSetup() { + mockDelegate = mock(S3AsyncClient.class); + s3Multi = MultipartS3AsyncClient.create(mockDelegate, MultipartConfiguration.builder().build()); + uploadDirectoryHelper = mock(UploadDirectoryHelper.class); + configuration = mock(TransferManagerConfiguration.class); + downloadDirectoryHelper = mock(DownloadDirectoryHelper.class); + tm = new GenericS3TransferManager(s3Multi, uploadDirectoryHelper, configuration, downloadDirectoryHelper); + } + + @Test + void usingMultipartDownload_shouldNotThrowException() { + GetObjectResponse response = GetObjectResponse.builder().build(); + when(mockDelegate.getObject(any(GetObjectRequest.class), any(AsyncResponseTransformer.class))) + .thenReturn(CompletableFuture.completedFuture(response)); + + CompletedFileDownload completedFileDownload = tm.downloadFile(d -> d.getObjectRequest(g -> g.bucket("bucket") + .key("key")) + .destination(Paths.get("."))) + .completionFuture() + .join(); + assertThat(completedFileDownload.response()).isEqualTo(response); + } +} diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3CrtTransferProgressListenerTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3CrtTransferProgressListenerTest.java index 580cced1808a..10aa2fd68e2e 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3CrtTransferProgressListenerTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3CrtTransferProgressListenerTest.java @@ -160,7 +160,9 @@ void listeners_reports_ErrorsWhenCancelled(WireMockRuntimeInfo wm) throws Interr assertThat(transferListener.getExceptionCaught()).isInstanceOf(CancellationException.class); assertThat(transferListener.isTransferComplete()).isFalse(); assertThat(transferListener.isTransferInitiated()).isTrue(); - assertMockOnFailure(transferListenerMock); + Mockito.verify(transferListenerMock, times(1)).transferFailed(ArgumentMatchers.any()); + Mockito.verify(transferListenerMock, times(1)).transferInitiated(ArgumentMatchers.any()); + Mockito.verify(transferListenerMock, times(0)).transferComplete(ArgumentMatchers.any()); } diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java index 351fd03f7495..a79a8b4a5083 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.when; import static software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute.SDK_HTTP_EXECUTION_ATTRIBUTES; import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.CRT_PAUSE_RESUME_TOKEN; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.RESUME_TOKEN; import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; import java.io.File; @@ -32,15 +33,20 @@ import java.time.Instant; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.ArgumentCaptor; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; @@ -52,7 +58,9 @@ class S3TransferManagerUploadPauseAndResumeTest { private S3CrtAsyncClient mockS3Crt; - private S3TransferManager tm; + private S3AsyncClient mockS3; + private S3TransferManager tmCrt; + private S3TransferManager tmJava; private UploadDirectoryHelper uploadDirectoryHelper; private DownloadDirectoryHelper downloadDirectoryHelper; private TransferManagerConfiguration configuration; @@ -62,21 +70,52 @@ class S3TransferManagerUploadPauseAndResumeTest { public void methodSetup() throws IOException { file = RandomTempFile.createTempFile("test", UUID.randomUUID().toString()); Files.write(file.toPath(), RandomStringUtils.randomAlphanumeric(1000).getBytes(StandardCharsets.UTF_8)); - mockS3Crt = mock(S3CrtAsyncClient.class); uploadDirectoryHelper = mock(UploadDirectoryHelper.class); configuration = mock(TransferManagerConfiguration.class); downloadDirectoryHelper = mock(DownloadDirectoryHelper.class); - tm = new CrtS3TransferManager(configuration, mockS3Crt, false); + mockS3Crt = mock(S3CrtAsyncClient.class); + mockS3 = mock(S3AsyncClient.class); + tmCrt = new CrtS3TransferManager(configuration, mockS3Crt, false); + tmJava = new GenericS3TransferManager(mockS3, uploadDirectoryHelper, configuration, downloadDirectoryHelper); } @AfterEach public void methodTeardown() { file.delete(); - tm.close(); + tmCrt.close(); + tmJava.close(); + } + + enum TmType{ + JAVA, CRT + } + + private static Stream transferManagers() { + return Stream.of( + Arguments.of(TmType.JAVA), + Arguments.of(TmType.CRT) + ); } - @Test - void resumeUploadFile_noResumeToken_shouldUploadFromBeginning() { + private S3TransferManager configureTestBehavior(TmType tmType, PutObjectResponse response) { + if (tmType == TmType.JAVA) { + when(mockS3.putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class))) + .thenReturn(CompletableFuture.completedFuture(response)); + when(mockS3.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + return tmJava; + } else { + when(mockS3Crt.putObject(any(PutObjectRequest.class), any(Path.class))) + .thenReturn(CompletableFuture.completedFuture(response)); + when(mockS3Crt.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + return tmCrt; + } + } + + @ParameterizedTest + @MethodSource("transferManagers") + void resumeUploadFile_noResumeToken_shouldUploadFromBeginning(TmType tmType) { PutObjectRequest putObjectRequest = putObjectRequest(); PutObjectResponse response = PutObjectResponse.builder().build(); Instant fileLastModified = Instant.ofEpochMilli(file.lastModified()); @@ -87,9 +126,7 @@ void resumeUploadFile_noResumeToken_shouldUploadFromBeginning() { .source(file) .build(); - - when(mockS3Crt.putObject(any(PutObjectRequest.class), any(Path.class))) - .thenReturn(CompletableFuture.completedFuture(response)); + S3TransferManager tm = configureTestBehavior(tmType, response); CompletedFileUpload completedFileUpload = tm.resumeUploadFile(r -> r.fileLength(fileLength) .uploadFileRequest(uploadFileRequest) @@ -97,11 +134,17 @@ void resumeUploadFile_noResumeToken_shouldUploadFromBeginning() { .completionFuture() .join(); assertThat(completedFileUpload.response()).isEqualTo(response); - verifyActualPutObjectRequestNotResumed(); + + if (tmType == TmType.JAVA) { + verifyActualPutObjectRequestNotResumed_tmJava(); + } else { + verifyActualPutObjectRequestNotResumed_tmCrt(); + } } - @Test - void resumeUploadFile_fileModified_shouldAbortExistingAndUploadFromBeginning() { + @ParameterizedTest + @MethodSource("transferManagers") + void resumeUploadFile_fileModified_shouldAbortExistingAndUploadFromBeginning(TmType tmType) { PutObjectRequest putObjectRequest = putObjectRequest(); PutObjectResponse response = PutObjectResponse.builder().build(); Instant fileLastModified = Instant.ofEpochMilli(file.lastModified()); @@ -112,12 +155,7 @@ void resumeUploadFile_fileModified_shouldAbortExistingAndUploadFromBeginning() { .source(file) .build(); - - when(mockS3Crt.putObject(any(PutObjectRequest.class), any(Path.class))) - .thenReturn(CompletableFuture.completedFuture(response)); - - when(mockS3Crt.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) - .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + S3TransferManager tm = configureTestBehavior(tmType, response); String multipartId = "someId"; CompletedFileUpload completedFileUpload = tm.resumeUploadFile(r -> r.fileLength(fileLength + 10L) @@ -129,18 +167,29 @@ void resumeUploadFile_fileModified_shouldAbortExistingAndUploadFromBeginning() { .completionFuture() .join(); assertThat(completedFileUpload.response()).isEqualTo(response); - verifyActualPutObjectRequestNotResumed(); + + if (tmType == TmType.JAVA) { + verifyActualPutObjectRequestNotResumed_tmJava(); + } else { + verifyActualPutObjectRequestNotResumed_tmCrt(); + } ArgumentCaptor abortMultipartUploadRequestArgumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class); - verify(mockS3Crt).abortMultipartUpload(abortMultipartUploadRequestArgumentCaptor.capture()); + + if (tmType == TmType.JAVA) { + verify(mockS3).abortMultipartUpload(abortMultipartUploadRequestArgumentCaptor.capture()); + } else { + verify(mockS3Crt).abortMultipartUpload(abortMultipartUploadRequestArgumentCaptor.capture()); + } AbortMultipartUploadRequest actualRequest = abortMultipartUploadRequestArgumentCaptor.getValue(); assertThat(actualRequest.uploadId()).isEqualTo(multipartId); } - @Test - void resumeUploadFile_hasValidResumeToken_shouldResumeUpload() { + @ParameterizedTest + @MethodSource("transferManagers") + void resumeUploadFile_hasValidResumeToken_shouldResumeUpload(TmType tmType) { PutObjectRequest putObjectRequest = putObjectRequest(); PutObjectResponse response = PutObjectResponse.builder().build(); Instant fileLastModified = Instant.ofEpochMilli(file.lastModified()); @@ -151,10 +200,7 @@ void resumeUploadFile_hasValidResumeToken_shouldResumeUpload() { .source(file) .build(); - - when(mockS3Crt.putObject(any(PutObjectRequest.class), any(Path.class))) - .thenReturn(CompletableFuture.completedFuture(response)); - + S3TransferManager tm = configureTestBehavior(tmType, response); String multipartId = "someId"; long totalParts = 10L; @@ -169,31 +215,65 @@ void resumeUploadFile_hasValidResumeToken_shouldResumeUpload() { .join(); assertThat(completedFileUpload.response()).isEqualTo(response); - ArgumentCaptor putObjectRequestArgumentCaptor = - ArgumentCaptor.forClass(PutObjectRequest.class); - verify(mockS3Crt).putObject(putObjectRequestArgumentCaptor.capture(), any(Path.class)); + if (tmType == TmType.JAVA) { + verifyActualPutObjectRequestResumedAndCorrectTokenReturned_tmJava(multipartId, partSizeInBytes, totalParts); + } else { + verifyActualPutObjectRequestResumedAndCorrectTokenReturned_tmCrt(multipartId, partSizeInBytes, totalParts); + } + } + + private void verifyActualPutObjectRequestNotResumed_tmCrt() { + ArgumentCaptor putObjectRequestArgumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); + + verify(mockS3Crt).putObject(putObjectRequestArgumentCaptor.capture(), any(Path.class)); PutObjectRequest actualRequest = putObjectRequestArgumentCaptor.getValue(); AwsRequestOverrideConfiguration awsRequestOverrideConfiguration = actualRequest.overrideConfiguration().get(); SdkHttpExecutionAttributes attribute = awsRequestOverrideConfiguration.executionAttributes().getAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES); - assertThat(attribute.getAttribute(CRT_PAUSE_RESUME_TOKEN)).satisfies(token -> { - assertThat(token.getUploadId()).isEqualTo(multipartId); - assertThat(token.getPartSize()).isEqualTo(partSizeInBytes); - assertThat(token.getTotalNumParts()).isEqualTo(totalParts); - }); + assertThat(attribute.getAttribute(CRT_PAUSE_RESUME_TOKEN)).isNull(); + } + + private void verifyActualPutObjectRequestNotResumed_tmJava() { + ArgumentCaptor putObjectRequestArgumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); + + verify(mockS3).putObject(putObjectRequestArgumentCaptor.capture(), any(AsyncRequestBody.class)); + PutObjectRequest actualRequest = putObjectRequestArgumentCaptor.getValue(); + + assertThat(actualRequest.overrideConfiguration()).isEmpty(); + } + + private void verifyActualPutObjectRequestResumedAndCorrectTokenReturned_tmJava(String multipartId, long partSizeInBytes, + long totalParts) { + ArgumentCaptor putObjectRequestArgumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); + + verify(mockS3).putObject(putObjectRequestArgumentCaptor.capture(), any(AsyncRequestBody.class)); + PutObjectRequest actualRequest = putObjectRequestArgumentCaptor.getValue(); + AwsRequestOverrideConfiguration awsRequestOverrideConfiguration = actualRequest.overrideConfiguration().get(); + + assertThat(awsRequestOverrideConfiguration.executionAttributes().getAttribute(RESUME_TOKEN)).isNotNull(); + S3ResumeToken s3ResumeToken = awsRequestOverrideConfiguration.executionAttributes().getAttribute(RESUME_TOKEN); + + assertThat(s3ResumeToken.uploadId()).isEqualTo(multipartId); + assertThat(s3ResumeToken.partSize()).isEqualTo(partSizeInBytes); + assertThat(s3ResumeToken.totalNumParts()).isEqualTo(totalParts); } - private void verifyActualPutObjectRequestNotResumed() { - ArgumentCaptor putObjectRequestArgumentCaptor = - ArgumentCaptor.forClass(PutObjectRequest.class); + private void verifyActualPutObjectRequestResumedAndCorrectTokenReturned_tmCrt(String multipartId, long partSizeInBytes, + long totalParts) { + ArgumentCaptor putObjectRequestArgumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class); + verify(mockS3Crt).putObject(putObjectRequestArgumentCaptor.capture(), any(Path.class)); PutObjectRequest actualRequest = putObjectRequestArgumentCaptor.getValue(); AwsRequestOverrideConfiguration awsRequestOverrideConfiguration = actualRequest.overrideConfiguration().get(); SdkHttpExecutionAttributes attribute = awsRequestOverrideConfiguration.executionAttributes().getAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES); - assertThat(attribute.getAttribute(CRT_PAUSE_RESUME_TOKEN)).isNull(); + assertThat(attribute.getAttribute(CRT_PAUSE_RESUME_TOKEN)).satisfies(token -> { + assertThat(token.getUploadId()).isEqualTo(multipartId); + assertThat(token.getPartSize()).isEqualTo(partSizeInBytes); + assertThat(token.getTotalNumParts()).isEqualTo(totalParts); + }); } private static PutObjectRequest putObjectRequest() { diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java index aba7cc86ae0d..9e975f09a357 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java @@ -48,7 +48,7 @@ import org.junit.jupiter.params.provider.MethodSource; import org.mockito.ArgumentCaptor; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; +import software.amazon.awssdk.services.s3.multipart.PauseObservable; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.testutils.FileUtils; @@ -443,6 +443,7 @@ private DefaultFileUpload completedUpload() { new DefaultTransferProgress(DefaultTransferProgressSnapshot.builder() .transferredBytes(0L) .build()), + new PauseObservable(), UploadFileRequest.builder() .source(Paths.get(".")).putObjectRequest(b -> b.bucket("bucket").key("key")) .build()); @@ -453,6 +454,7 @@ private FileUpload newUpload(CompletableFuture future) { new DefaultTransferProgress(DefaultTransferProgressSnapshot.builder() .transferredBytes(0L) .build()), + new PauseObservable(), UploadFileRequest.builder() .putObjectRequest(p -> p.key("key").bucket("bucket")).source(Paths.get( "test.txt")) diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 8ab3341a9414..6d0a86b3fb5f 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/customization.config b/services/accessanalyzer/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/accessanalyzer/src/main/resources/codegen-resources/customization.config +++ b/services/accessanalyzer/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/account/pom.xml b/services/account/pom.xml index 68ad086e8962..be43c2df517a 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/account/src/main/resources/codegen-resources/customization.config b/services/account/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/account/src/main/resources/codegen-resources/customization.config +++ b/services/account/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 5fa69230a472..7913078e89a7 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acm/src/main/resources/codegen-resources/customization.config b/services/acm/src/main/resources/codegen-resources/customization.config index 6146c9256ea3..2e642dc4284c 100644 --- a/services/acm/src/main/resources/codegen-resources/customization.config +++ b/services/acm/src/main/resources/codegen-resources/customization.config @@ -2,5 +2,6 @@ "verifiedSimpleMethods": [ "listCertificates" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index a845ba829adc..78e9f469a493 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/acmpca/src/main/resources/codegen-resources/customization.config b/services/acmpca/src/main/resources/codegen-resources/customization.config index a001cc0951e3..fa05c9e79dcb 100644 --- a/services/acmpca/src/main/resources/codegen-resources/customization.config +++ b/services/acmpca/src/main/resources/codegen-resources/customization.config @@ -2,5 +2,6 @@ "verifiedSimpleMethods": [ "listCertificateAuthorities" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index 06012f4a6ce6..d1b4e2b248ca 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config index 0d85d066f3c5..bf752c5c8eb1 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config @@ -2,5 +2,6 @@ "excludedSimpleMethods": [ "*" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 0e9c73182e5a..9855a17d022c 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amp/src/main/resources/codegen-resources/customization.config b/services/amp/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/amp/src/main/resources/codegen-resources/customization.config +++ b/services/amp/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amp/src/main/resources/codegen-resources/service-2.json b/services/amp/src/main/resources/codegen-resources/service-2.json index 8020640a984f..fb8386642173 100644 --- a/services/amp/src/main/resources/codegen-resources/service-2.json +++ b/services/amp/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Create an alert manager definition.

    ", + "documentation":"

    The CreateAlertManagerDefinition operation creates the alert manager definition in a workspace. If a workspace already has an alert manager definition, don't use this operation to update it. Instead, use PutAlertManagerDefinition.

    ", "idempotent":true }, "CreateLoggingConfiguration":{ @@ -48,7 +48,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Create logging configuration.

    ", + "documentation":"

    The CreateLoggingConfiguration operation creates a logging configuration for the workspace. Use this operation to set the CloudWatch log group to which the logs will be published to.

    ", "idempotent":true }, "CreateRuleGroupsNamespace":{ @@ -69,7 +69,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Create a rule group namespace.

    ", + "documentation":"

    The CreateRuleGroupsNamespace operation creates a rule groups namespace within a workspace. A rule groups namespace is associated with exactly one rules file. A workspace can have multiple rule groups namespaces.

    Use this operation only to create new rule groups namespaces. To update an existing rule groups namespace, use PutRuleGroupsNamespace.

    ", "idempotent":true }, "CreateScraper":{ @@ -90,7 +90,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Create a scraper.

    ", + "documentation":"

    The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace.

    If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide.

    You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one.

    The scrapeConfiguration parameter contains the base64-encoded version of the YAML configuration file.

    For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide.

    ", "idempotent":true }, "CreateWorkspace":{ @@ -110,7 +110,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates a new AMP workspace.

    ", + "documentation":"

    Creates a Prometheus workspace. A workspace is a logical space dedicated to the storage and querying of Prometheus metrics. You can have one or more workspaces in each Region in your account.

    ", "idempotent":true }, "DeleteAlertManagerDefinition":{ @@ -129,7 +129,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes an alert manager definition.

    ", + "documentation":"

    Deletes the alert manager definition from a workspace.

    ", "idempotent":true }, "DeleteLoggingConfiguration":{ @@ -147,7 +147,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Delete logging configuration.

    ", + "documentation":"

    Deletes the logging configuration for a workspace.

    ", "idempotent":true }, "DeleteRuleGroupsNamespace":{ @@ -166,7 +166,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Delete a rule groups namespace.

    ", + "documentation":"

    Deletes one rule groups namespace and its associated rule groups definition.

    ", "idempotent":true }, "DeleteScraper":{ @@ -186,7 +186,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes a scraper.

    ", + "documentation":"

    The DeleteScraper operation deletes one scraper, and stops any metrics collection that the scraper performs.

    ", "idempotent":true }, "DeleteWorkspace":{ @@ -205,7 +205,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes an AMP workspace.

    ", + "documentation":"

    Deletes an existing workspace.

    When you delete a workspace, the data that has been ingested into it is not immediately deleted. It will be permanently deleted within one month.

    ", "idempotent":true }, "DescribeAlertManagerDefinition":{ @@ -224,7 +224,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Describes an alert manager definition.

    " + "documentation":"

    Retrieves the full information about the alert manager definition for a workspace.

    " }, "DescribeLoggingConfiguration":{ "name":"DescribeLoggingConfiguration", @@ -241,7 +241,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Describes logging configuration.

    " + "documentation":"

    Returns complete information about the current logging configuration of the workspace.

    " }, "DescribeRuleGroupsNamespace":{ "name":"DescribeRuleGroupsNamespace", @@ -259,7 +259,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Describe a rule groups namespace.

    " + "documentation":"

    Returns complete information about one rule groups namespace. To retrieve a list of rule groups namespaces, use ListRuleGroupsNamespaces.

    " }, "DescribeScraper":{ "name":"DescribeScraper", @@ -277,7 +277,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Describe an existing scraper.

    " + "documentation":"

    The DescribeScraper operation displays information about an existing scraper.

    " }, "DescribeWorkspace":{ "name":"DescribeWorkspace", @@ -295,7 +295,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Describes an existing AMP workspace.

    " + "documentation":"

    Returns information about an existing workspace.

    " }, "GetDefaultScraperConfiguration":{ "name":"GetDefaultScraperConfiguration", @@ -311,7 +311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Gets a default configuration.

    " + "documentation":"

    The GetDefaultScraperConfiguration operation returns the default scraper configuration used when Amazon EKS creates a scraper for you.

    " }, "ListRuleGroupsNamespaces":{ "name":"ListRuleGroupsNamespaces", @@ -329,7 +329,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Lists rule groups namespaces.

    " + "documentation":"

    Returns a list of rule groups namespaces in a workspace.

    " }, "ListScrapers":{ "name":"ListScrapers", @@ -346,7 +346,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Lists all scrapers in a customer account, including scrapers being created or deleted. You may provide filters to return a more specific list of results.

    " + "documentation":"

    The ListScrapers operation lists all of the scrapers in your account. This includes scrapers being created or deleted. You can optionally filter the returned list.

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -364,7 +364,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Lists the tags you have assigned to the resource.

    " + "documentation":"

    The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces.

    " }, "ListWorkspaces":{ "name":"ListWorkspaces", @@ -381,7 +381,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Lists all AMP workspaces, including workspaces being created or deleted.

    " + "documentation":"

    Lists all of the Amazon Managed Service for Prometheus workspaces in your account. This includes workspaces being created or deleted.

    " }, "PutAlertManagerDefinition":{ "name":"PutAlertManagerDefinition", @@ -401,7 +401,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Update an alert manager definition.

    ", + "documentation":"

    Updates an existing alert manager definition in a workspace. If the workspace does not already have an alert manager definition, don't use this operation to create it. Instead, use CreateAlertManagerDefinition.

    ", "idempotent":true }, "PutRuleGroupsNamespace":{ @@ -422,7 +422,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Update a rule groups namespace.

    ", + "documentation":"

    Updates an existing rule groups namespace within a workspace. A rule groups namespace is associated with exactly one rules file. A workspace can have multiple rule groups namespaces.

    Use this operation only to update existing rule groups namespaces. To create a new rule groups namespace, use CreateRuleGroupsNamespace.

    You can't use this operation to add tags to an existing rule groups namespace. Instead, use TagResource.

    ", "idempotent":true }, "TagResource":{ @@ -441,7 +441,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates tags for the specified resource.

    " + "documentation":"

    The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

    If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

    " }, "UntagResource":{ "name":"UntagResource", @@ -459,7 +459,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes tags from the specified resource.

    ", + "documentation":"

    Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

    ", "idempotent":true }, "UpdateLoggingConfiguration":{ @@ -478,7 +478,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Update logging configuration.

    ", + "documentation":"

    Updates the log group ARN or the workspace ID of the current logging configuration.

    ", "idempotent":true }, "UpdateWorkspaceAlias":{ @@ -498,7 +498,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Updates an AMP workspace alias.

    ", + "documentation":"

    Updates the alias of an existing workspace.

    ", "idempotent":true } }, @@ -512,7 +512,7 @@ "documentation":"

    Description of the error.

    " } }, - "documentation":"

    User does not have sufficient access to perform this action.

    ", + "documentation":"

    You do not have sufficient access to perform this action.

    ", "error":{ "httpStatusCode":403, "senderFault":true @@ -521,35 +521,35 @@ }, "AlertManagerDefinitionData":{ "type":"blob", - "documentation":"

    The alert manager definition data.

    " + "documentation":"

    The base-64 encoded blob that is alert manager definition.

    For details about the alert manager definition, see AlertManagedDefinitionData.

    " }, "AlertManagerDefinitionDescription":{ "type":"structure", "required":[ - "status", - "data", "createdAt", - "modifiedAt" + "data", + "modifiedAt", + "status" ], "members":{ - "status":{ - "shape":"AlertManagerDefinitionStatus", - "documentation":"

    The status of alert manager definition.

    " + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the alert manager definition was created.

    " }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

    The alert manager definition.

    " - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

    The time when the alert manager definition was created.

    " + "documentation":"

    The actual alert manager definition.

    For details about the alert manager definition, see AlertManagedDefinitionData.

    " }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the alert manager definition was modified.

    " + "documentation":"

    The date and time that the alert manager definition was most recently changed.

    " + }, + "status":{ + "shape":"AlertManagerDefinitionStatus", + "documentation":"

    A structure that displays the current status of the alert manager definition..

    " } }, - "documentation":"

    Represents the properties of an alert manager definition.

    " + "documentation":"

    The details of an alert manager definition.

    " }, "AlertManagerDefinitionStatus":{ "type":"structure", @@ -557,18 +557,18 @@ "members":{ "statusCode":{ "shape":"AlertManagerDefinitionStatusCode", - "documentation":"

    Status code of this definition.

    " + "documentation":"

    The current status of the alert manager.

    " }, "statusReason":{ "shape":"String", - "documentation":"

    The reason for failure if any.

    " + "documentation":"

    If there is a failure, the reason for the failure.

    " } }, - "documentation":"

    Represents the status of a definition.

    " + "documentation":"

    The status of the alert manager.

    " }, "AlertManagerDefinitionStatusCode":{ "type":"string", - "documentation":"

    State of an alert manager definition.

    ", + "documentation":"

    State of an AlertManagerDefinition.

    ", "enum":[ "CREATING", "ACTIVE", @@ -584,16 +584,16 @@ "members":{ "workspaceArn":{ "shape":"WorkspaceArn", - "documentation":"

    The ARN of an AMP workspace.

    " + "documentation":"

    ARN of the Amazon Managed Service for Prometheus workspace.

    " } }, - "documentation":"

    A representation of an AMP destination.

    " + "documentation":"

    The AmpConfiguration structure defines the Amazon Managed Service for Prometheus instance a scraper should send metrics to.

    " }, "Blob":{"type":"blob"}, "ClusterArn":{ "type":"string", "documentation":"

    The ARN of an EKS cluster.

    ", - "pattern":"arn:aws[-a-z]*:eks:[-a-z0-9]+:[0-9]{12}:cluster/.+" + "pattern":"^arn:aws[-a-z]*:eks:[-a-z0-9]+:[0-9]{12}:cluster/.+$" }, "ConflictException":{ "type":"structure", @@ -616,7 +616,7 @@ "documentation":"

    Type of the resource affected.

    " } }, - "documentation":"

    Updating or deleting a resource can cause an inconsistent state.

    ", + "documentation":"

    The request would cause an inconsistent state.

    ", "error":{ "httpStatusCode":409, "senderFault":true @@ -626,27 +626,27 @@ "CreateAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "workspaceId", - "data" + "data", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace in which to create the alert manager definition.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

    The alert manager definition data.

    " + "documentation":"

    The alert manager definition to add. A base64-encoded version of the YAML alert manager definition file.

    For details about the alert manager definition, see AlertManagedDefinitionData.

    " }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to add the alert manager definition to.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a CreateAlertManagerDefinition operation.

    " + "documentation":"

    Represents the input of a CreateAlertManagerDefinition operation.

    " }, "CreateAlertManagerDefinitionResponse":{ "type":"structure", @@ -654,35 +654,35 @@ "members":{ "status":{ "shape":"AlertManagerDefinitionStatus", - "documentation":"

    The status of alert manager definition.

    " + "documentation":"

    A structure that displays the current status of the alert manager definition.

    " } }, - "documentation":"

    Represents the output of a CreateAlertManagerDefinition operation.

    " + "documentation":"

    Represents the output of a CreateAlertManagerDefinition operation.

    " }, "CreateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "workspaceId", - "logGroupArn" + "logGroupArn", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to vend logs to.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

    The ARN of the CW log group to which the vended log data will be published.

    " + "documentation":"

    The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API.

    " }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to create the logging configuration for.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a CreateLoggingConfiguration operation.

    " + "documentation":"

    Represents the input of a CreateLoggingConfiguration operation.

    " }, "CreateLoggingConfigurationResponse":{ "type":"structure", @@ -690,277 +690,277 @@ "members":{ "status":{ "shape":"LoggingConfigurationStatus", - "documentation":"

    The status of the logging configuration.

    " + "documentation":"

    A structure that displays the current status of the logging configuration.

    " } }, - "documentation":"

    Represents the output of a CreateLoggingConfiguration operation.

    " + "documentation":"

    Represents the output of a CreateLoggingConfiguration operation.

    " }, "CreateRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", + "data", "name", - "data" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace in which to create the rule group namespace.

    ", - "location":"uri", - "locationName":"workspaceId" - }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    " + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true }, "data":{ "shape":"RuleGroupsNamespaceData", - "documentation":"

    The namespace data that define the rule groups.

    " + "documentation":"

    The rules file to use in the new namespace.

    Contains the base64-encoded version of the YAML rules file.

    For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

    " }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

    The name for the new rule groups namespace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    Optional, user-provided tags for this rule groups namespace.

    " + "documentation":"

    The list of tag keys and values to associate with the rule groups namespace.

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to add the rule groups namespace.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a CreateRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the input of a CreateRuleGroupsNamespace operation.

    " }, "CreateRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "name", "arn", + "name", "status" ], "members":{ - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    " - }, "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

    The Amazon Resource Name (ARN) of this rule groups namespace.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the new rule groups namespace.

    " + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

    The name of the new rule groups namespace.

    " }, "status":{ "shape":"RuleGroupsNamespaceStatus", - "documentation":"

    The status of rule groups namespace.

    " + "documentation":"

    A structure that returns the current status of the rule groups namespace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this rule groups namespace.

    " + "documentation":"

    The list of tag keys and values that are associated with the namespace.

    " } }, - "documentation":"

    Represents the output of a CreateRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the output of a CreateRuleGroupsNamespace operation.

    " }, "CreateScraperRequest":{ "type":"structure", "required":[ + "destination", "scrapeConfiguration", - "source", - "destination" + "source" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

    An optional user-assigned alias for this scraper. This alias is for user reference and does not need to be unique.

    " + "documentation":"

    (optional) a name to associate with the scraper. This is for your use, and does not need to be unique.

    " + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    (Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "destination":{ + "shape":"Destination", + "documentation":"

    The Amazon Managed Service for Prometheus workspace to send metrics to.

    " }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

    The configuration used to create the scraper.

    " + "documentation":"

    The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

    " }, "source":{ "shape":"Source", - "documentation":"

    The source that the scraper will be discovering and collecting metrics from.

    " - }, - "destination":{ - "shape":"Destination", - "documentation":"

    The destination that the scraper will be producing metrics to.

    " - }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "documentation":"

    The Amazon EKS cluster from which the scraper will collect metrics.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    Optional, user-provided tags for this scraper.

    " + "documentation":"

    (Optional) The list of tag keys and values to associate with the scraper.

    " } }, - "documentation":"

    Represents the input of a CreateScraper operation.

    " + "documentation":"

    Represents the input of a CreateScraper operation.

    " }, "CreateScraperResponse":{ "type":"structure", "required":[ - "scraperId", "arn", + "scraperId", "status" ], "members":{ - "scraperId":{ - "shape":"ScraperId", - "documentation":"

    The generated ID of the scraper that was just created.

    " - }, "arn":{ "shape":"ScraperArn", - "documentation":"

    The ARN of the scraper that was just created.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the new scraper.

    " + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

    The ID of the new scraper.

    " }, "status":{ "shape":"ScraperStatus", - "documentation":"

    The status of the scraper that was just created (usually CREATING).

    " + "documentation":"

    A structure that displays the current status of the scraper.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this scraper.

    " + "documentation":"

    The list of tag keys and values that are associated with the scraper.

    " } }, - "documentation":"

    Represents the output of a CreateScraper operation.

    " + "documentation":"

    Represents the output of a CreateScraper operation.

    " }, "CreateWorkspaceRequest":{ "type":"structure", "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"

    An optional user-assigned alias for this workspace. This alias is for user reference and does not need to be unique.

    " + "documentation":"

    An alias that you assign to this workspace to help you identify it. It does not need to be unique.

    Blank spaces at the beginning or end of the alias that you specify will be trimmed from the value used.

    " }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", "idempotencyToken":true }, - "tags":{ - "shape":"TagMap", - "documentation":"

    Optional, user-provided tags for this workspace.

    " - }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

    Optional, customer managed KMS key used to encrypt data for this workspace

    " + "documentation":"

    (optional) The ARN for a customer managed KMS key to use for encrypting data within your workspace. For more information about using your own key in your workspace, see Encryption at rest in the Amazon Managed Service for Prometheus User Guide.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of tag keys and values to associate with the workspace.

    " } }, - "documentation":"

    Represents the input of a CreateWorkspace operation.

    " + "documentation":"

    Represents the input of a CreateWorkspace operation.

    " }, "CreateWorkspaceResponse":{ "type":"structure", "required":[ - "workspaceId", "arn", - "status" + "status", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The generated ID of the workspace that was just created.

    " - }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

    The ARN of the workspace that was just created.

    " + "documentation":"

    The ARN for the new workspace.

    " + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    (optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

    " }, "status":{ "shape":"WorkspaceStatus", - "documentation":"

    The status of the workspace that was just created (usually CREATING).

    " + "documentation":"

    The current status of the new workspace. Immediately after you create the workspace, the status is usually CREATING.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this workspace.

    " + "documentation":"

    The list of tag keys and values that are associated with the workspace.

    " }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

    Customer managed KMS key ARN for this workspace

    " + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The unique ID for the new workspace.

    " } }, - "documentation":"

    Represents the output of a CreateWorkspace operation.

    " + "documentation":"

    Represents the output of a CreateWorkspace operation.

    " }, "DeleteAlertManagerDefinitionRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace in which to delete the alert manager definition.

    ", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to delete the alert manager definition from.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DeleteAlertManagerDefinition operation.

    " + "documentation":"

    Represents the input of a DeleteAlertManagerDefinition operation.

    " }, "DeleteLoggingConfigurationRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to vend logs to.

    ", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace containing the logging configuration to delete.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DeleteLoggingConfiguration operation.

    " + "documentation":"

    Represents the input of a DeleteLoggingConfiguration operation.

    " }, "DeleteRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", - "name" + "name", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to delete rule group definition.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    ", + "documentation":"

    The name of the rule groups namespace to delete.

    ", "location":"uri", "locationName":"name" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true, - "location":"querystring", - "locationName":"clientToken" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace containing the rule groups namespace and definition to delete.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DeleteRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the input of a DeleteRuleGroupsNamespace operation.

    " }, "DeleteScraperRequest":{ "type":"structure", "required":["scraperId"], "members":{ - "scraperId":{ - "shape":"ScraperId", - "documentation":"

    The ID of the scraper to delete.

    ", - "location":"uri", - "locationName":"scraperId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    (Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

    ", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

    The ID of the scraper to delete.

    ", + "location":"uri", + "locationName":"scraperId" } }, - "documentation":"

    Represents the input of a DeleteScraper operation.

    " + "documentation":"

    Represents the input of a DeleteScraper operation.

    " }, "DeleteScraperResponse":{ "type":"structure", @@ -971,34 +971,34 @@ "members":{ "scraperId":{ "shape":"ScraperId", - "documentation":"

    The ID of the scraper that was deleted.

    " + "documentation":"

    The ID of the scraper to delete.

    " }, "status":{ "shape":"ScraperStatus", - "documentation":"

    The status of the scraper that is being deleted.

    " + "documentation":"

    The current status of the scraper.

    " } }, - "documentation":"

    Represents the output of a DeleteScraper operation.

    " + "documentation":"

    Represents the output of a DeleteScraper operation.

    " }, "DeleteWorkspaceRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to delete.

    ", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to delete.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DeleteWorkspace operation.

    " + "documentation":"

    Represents the input of a DeleteWorkspace operation.

    " }, "DescribeAlertManagerDefinitionRequest":{ "type":"structure", @@ -1006,12 +1006,12 @@ "members":{ "workspaceId":{ "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to describe.

    ", + "documentation":"

    The ID of the workspace to retrieve the alert manager definition from.

    ", "location":"uri", "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DescribeAlertManagerDefinition operation.

    " + "documentation":"

    Represents the input of a DescribeAlertManagerDefinition operation.

    " }, "DescribeAlertManagerDefinitionResponse":{ "type":"structure", @@ -1019,10 +1019,10 @@ "members":{ "alertManagerDefinition":{ "shape":"AlertManagerDefinitionDescription", - "documentation":"

    The properties of the selected workspace's alert manager definition.

    " + "documentation":"

    The alert manager definition.

    " } }, - "documentation":"

    Represents the output of a DescribeAlertManagerDefinition operation.

    " + "documentation":"

    Represents the output of a DescribeAlertManagerDefinition operation.

    " }, "DescribeLoggingConfigurationRequest":{ "type":"structure", @@ -1030,12 +1030,12 @@ "members":{ "workspaceId":{ "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to vend logs to.

    ", + "documentation":"

    The ID of the workspace to describe the logging configuration for.

    ", "location":"uri", "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DescribeLoggingConfiguration operation.

    " + "documentation":"

    Represents the input of a DescribeLoggingConfiguration operation.

    " }, "DescribeLoggingConfigurationResponse":{ "type":"structure", @@ -1043,32 +1043,32 @@ "members":{ "loggingConfiguration":{ "shape":"LoggingConfigurationMetadata", - "documentation":"

    Metadata object containing information about the logging configuration of a workspace.

    " + "documentation":"

    A structure that displays the information about the logging configuration.

    " } }, - "documentation":"

    Represents the output of a DescribeLoggingConfiguration operation.

    " + "documentation":"

    Represents the output of a DescribeLoggingConfiguration operation.

    " }, "DescribeRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", - "name" + "name", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to describe.

    ", - "location":"uri", - "locationName":"workspaceId" - }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace.

    ", + "documentation":"

    The name of the rule groups namespace that you want information for.

    ", "location":"uri", "locationName":"name" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace containing the rule groups namespace.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DescribeRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the input of a DescribeRuleGroupsNamespace operation.

    " }, "DescribeRuleGroupsNamespaceResponse":{ "type":"structure", @@ -1076,10 +1076,10 @@ "members":{ "ruleGroupsNamespace":{ "shape":"RuleGroupsNamespaceDescription", - "documentation":"

    The selected rule groups namespace.

    " + "documentation":"

    The information about the rule groups namespace.

    " } }, - "documentation":"

    Represents the output of a DescribeRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the output of a DescribeRuleGroupsNamespace operation.

    " }, "DescribeScraperRequest":{ "type":"structure", @@ -1087,12 +1087,12 @@ "members":{ "scraperId":{ "shape":"ScraperId", - "documentation":"

    The IDs of the scraper to describe.

    ", + "documentation":"

    The ID of the scraper to describe.

    ", "location":"uri", "locationName":"scraperId" } }, - "documentation":"

    Represents the input of a DescribeScraper operation.

    " + "documentation":"

    Represents the input of a DescribeScraper operation.

    " }, "DescribeScraperResponse":{ "type":"structure", @@ -1100,10 +1100,10 @@ "members":{ "scraper":{ "shape":"ScraperDescription", - "documentation":"

    The properties of the selected scrapers.

    " + "documentation":"

    Contains details about the scraper.

    " } }, - "documentation":"

    Represents the output of a DescribeScraper operation.

    " + "documentation":"

    Represents the output of a DescribeScraper operation.

    " }, "DescribeWorkspaceRequest":{ "type":"structure", @@ -1116,7 +1116,7 @@ "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a DescribeWorkspace operation.

    " + "documentation":"

    Represents the input of a DescribeWorkspace operation.

    " }, "DescribeWorkspaceResponse":{ "type":"structure", @@ -1124,20 +1124,20 @@ "members":{ "workspace":{ "shape":"WorkspaceDescription", - "documentation":"

    The properties of the selected workspace.

    " + "documentation":"

    A structure that contains details about the workspace.

    " } }, - "documentation":"

    Represents the output of a DescribeWorkspace operation.

    " + "documentation":"

    Represents the output of a DescribeWorkspace operation.

    " }, "Destination":{ "type":"structure", "members":{ "ampConfiguration":{ "shape":"AmpConfiguration", - "documentation":"

    A representation of an AMP destination.

    " + "documentation":"

    The Amazon Managed Service for Prometheusworkspace to send metrics to.

    " } }, - "documentation":"

    A representation of a destination that a scraper can produce metrics to.

    ", + "documentation":"

    Where to send the metrics from a scraper.

    ", "union":true }, "EksConfiguration":{ @@ -1149,28 +1149,28 @@ "members":{ "clusterArn":{ "shape":"ClusterArn", - "documentation":"

    The ARN of an EKS cluster.

    " + "documentation":"

    ARN of the Amazon EKS cluster.

    " }, "securityGroupIds":{ "shape":"SecurityGroupIds", - "documentation":"

    A list of security group IDs specified for VPC configuration.

    " + "documentation":"

    A list of the security group IDs for the Amazon EKS cluster VPC configuration.

    " }, "subnetIds":{ "shape":"SubnetIds", - "documentation":"

    A list of subnet IDs specified for VPC configuration.

    " + "documentation":"

    A list of subnet IDs for the Amazon EKS cluster VPC configuration.

    " } }, - "documentation":"

    A representation of an EKS source.

    " + "documentation":"

    The EksConfiguration structure describes the connection to the Amazon EKS cluster from which a scraper collects metrics.

    " }, "FilterKey":{ "type":"string", - "documentation":"

    The name of the key to filter by.

    ", + "documentation":"

    The name of the key by which to filter.

    ", "max":256, "min":1 }, "FilterValue":{ "type":"string", - "documentation":"

    The value of a given key to filter by.

    ", + "documentation":"

    The value for a given key by which to filter.

    ", "max":256, "min":1 }, @@ -1185,7 +1185,7 @@ "type":"structure", "members":{ }, - "documentation":"

    Represents the input of a GetDefaultScraperConfiguration operation.

    " + "documentation":"

    Represents the input of a GetDefaultScraperConfiguration operation.

    " }, "GetDefaultScraperConfigurationResponse":{ "type":"structure", @@ -1193,10 +1193,10 @@ "members":{ "configuration":{ "shape":"Blob", - "documentation":"

    The default configuration.

    " + "documentation":"

    The configuration file. Base 64 encoded. For more information, see Scraper configurationin the Amazon Managed Service for Prometheus User Guide.

    " } }, - "documentation":"

    Represents the output of a GetDefaultScraperConfiguration operation.

    " + "documentation":"

    Represents the output of a GetDefaultScraperConfiguration operation.

    " }, "IamRoleArn":{ "type":"string", @@ -1207,7 +1207,7 @@ "documentation":"

    An identifier used to ensure the idempotency of a write request.

    ", "max":64, "min":1, - "pattern":"[!-~]+" + "pattern":"^[!-~]+$" }, "Integer":{ "type":"integer", @@ -1228,7 +1228,7 @@ "locationName":"Retry-After" } }, - "documentation":"

    Unexpected error during processing of request.

    ", + "documentation":"

    An unexpected error occurred during the processing of the request.

    ", "error":{"httpStatusCode":500}, "exception":true, "fault":true, @@ -1239,38 +1239,38 @@ "documentation":"

    A KMS Key ARN.

    ", "max":2048, "min":20, - "pattern":"arn:aws:kms:[a-z0-9\\-]+:\\d+:key/[a-f0-9\\-]+" + "pattern":"^arn:aws:kms:[a-z0-9\\-]+:\\d+:key/[a-f0-9\\-]+$" }, "ListRuleGroupsNamespacesRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace.

    ", - "location":"uri", - "locationName":"workspaceId" + "maxResults":{ + "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", + "documentation":"

    The maximum number of results to return. The default is 100.

    ", + "location":"querystring", + "locationName":"maxResults" }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

    Optional filter for rule groups namespace name. Only the rule groups namespace that begin with this value will be returned.

    ", + "documentation":"

    Use this parameter to filter the rule groups namespaces that are returned. Only the namespaces with names that begin with the value that you specify are returned.

    ", "location":"querystring", "locationName":"name" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

    Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListRuleGroupsNamespaces request.

    ", + "documentation":"

    The token for the next set of items to return. You receive this token from a previous call, and use it to get the next page of results. The other parameters must be the same as the initial call.

    For example, if your initial request has maxResults of 10, and there are 12 rule groups namespaces to return, then your initial request will return 10 and a nextToken. Using the next token in a subsequent call will return the remaining 2 namespaces.

    ", "location":"querystring", "locationName":"nextToken" }, - "maxResults":{ - "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", - "documentation":"

    Maximum results to return in response (default=100, maximum=1000).

    ", - "location":"querystring", - "locationName":"maxResults" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace containing the rule groups namespaces.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a ListRuleGroupsNamespaces operation.

    " + "documentation":"

    Represents the input of a ListRuleGroupsNamespaces operation.

    " }, "ListRuleGroupsNamespacesRequestMaxResultsInteger":{ "type":"integer", @@ -1282,39 +1282,39 @@ "type":"structure", "required":["ruleGroupsNamespaces"], "members":{ - "ruleGroupsNamespaces":{ - "shape":"RuleGroupsNamespaceSummaryList", - "documentation":"

    The list of the selected rule groups namespaces.

    " - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

    Pagination token to use when requesting the next page in this list.

    " + "documentation":"

    A token indicating that there are more results to retrieve. You can use this token as part of your next ListRuleGroupsNamespaces request to retrieve those results.

    " + }, + "ruleGroupsNamespaces":{ + "shape":"RuleGroupsNamespaceSummaryList", + "documentation":"

    The returned list of rule groups namespaces.

    " } }, - "documentation":"

    Represents the output of a ListRuleGroupsNamespaces operation.

    " + "documentation":"

    Represents the output of a ListRuleGroupsNamespaces operation.

    " }, "ListScrapersRequest":{ "type":"structure", "members":{ "filters":{ "shape":"ScraperFilters", - "documentation":"

    A list of scraper filters.

    ", + "documentation":"

    (Optional) A list of key-value pairs to filter the list of scrapers returned. Keys include status, sourceArn, destinationArn, and alias.

    Filters on the same key are OR'd together, and filters on different keys are AND'd together. For example, status=ACTIVE&status=CREATING&alias=Test, will return all scrapers that have the alias Test, and are either in status ACTIVE or CREATING.

    To find all active scrapers that are sending metrics to a specific Amazon Managed Service for Prometheus workspace, you would use the ARN of the workspace in a query:

    status=ACTIVE&destinationArn=arn:aws:aps:us-east-1:123456789012:workspace/ws-example1-1234-abcd-56ef-123456789012

    If this is included, it filters the results to only the scrapers that match the filter.

    ", "location":"querystring" }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

    Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListScrapers request.

    ", - "location":"querystring", - "locationName":"nextToken" - }, "maxResults":{ "shape":"ListScrapersRequestMaxResultsInteger", - "documentation":"

    Maximum results to return in response (default=100, maximum=1000).

    ", + "documentation":"

    Optional) The maximum number of scrapers to return in one ListScrapers operation. The range is 1-1000.

    If you omit this parameter, the default of 100 is used.

    ", "location":"querystring", "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    (Optional) The token for the next set of items to return. (You received this token from a previous call.)

    ", + "location":"querystring", + "locationName":"nextToken" } }, - "documentation":"

    Represents the input of a ListScrapers operation.

    " + "documentation":"

    Represents the input of a ListScrapers operation.

    " }, "ListScrapersRequestMaxResultsInteger":{ "type":"integer", @@ -1326,16 +1326,16 @@ "type":"structure", "required":["scrapers"], "members":{ - "scrapers":{ - "shape":"ScraperSummaryList", - "documentation":"

    The list of scrapers, filtered down if a set of filters was provided in the request.

    " - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

    Pagination token to use when requesting the next page in this list.

    " + "documentation":"

    A token indicating that there are more results to retrieve. You can use this token as part of your next ListScrapers operation to retrieve those results.

    " + }, + "scrapers":{ + "shape":"ScraperSummaryList", + "documentation":"

    A list of ScraperSummary structures giving information about scrapers in the account that match the filters provided.

    " } }, - "documentation":"

    Represents the output of a ListScrapers operation.

    " + "documentation":"

    Represents the output of a ListScrapers operation.

    " }, "ListTagsForResourceRequest":{ "type":"structure", @@ -1343,7 +1343,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

    The ARN of the resource.

    ", + "documentation":"

    The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource.

    ", "location":"uri", "locationName":"resourceArn" } @@ -1352,32 +1352,35 @@ "ListTagsForResourceResponse":{ "type":"structure", "members":{ - "tags":{"shape":"TagMap"} + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of tag keys and values associated with the resource.

    " + } } }, "ListWorkspacesRequest":{ "type":"structure", "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

    Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListWorkspaces request.

    ", - "location":"querystring", - "locationName":"nextToken" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

    Optional filter for workspace alias. Only the workspaces with aliases that begin with this value will be returned.

    ", + "documentation":"

    If this is included, it filters the results to only the workspaces with names that start with the value that you specify here.

    Amazon Managed Service for Prometheus will automatically strip any blank spaces from the beginning and end of the alias that you specify.

    ", "location":"querystring", "locationName":"alias" }, "maxResults":{ "shape":"ListWorkspacesRequestMaxResultsInteger", - "documentation":"

    Maximum results to return in response (default=100, maximum=1000).

    ", + "documentation":"

    The maximum number of workspaces to return per request. The default is 100.

    ", "location":"querystring", "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token for the next set of items to return. You receive this token from a previous call, and use it to get the next page of results. The other parameters must be the same as the initial call.

    For example, if your initial request has maxResults of 10, and there are 12 workspaces to return, then your initial request will return 10 and a nextToken. Using the next token in a subsequent call will return the remaining 2 workspaces.

    ", + "location":"querystring", + "locationName":"nextToken" } }, - "documentation":"

    Represents the input of a ListWorkspaces operation.

    " + "documentation":"

    Represents the input of a ListWorkspaces operation.

    " }, "ListWorkspacesRequestMaxResultsInteger":{ "type":"integer", @@ -1389,53 +1392,53 @@ "type":"structure", "required":["workspaces"], "members":{ - "workspaces":{ - "shape":"WorkspaceSummaryList", - "documentation":"

    The list of existing workspaces, including those undergoing creation or deletion.

    " - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

    Pagination token to use when requesting the next page in this list.

    " + "documentation":"

    A token indicating that there are more results to retrieve. You can use this token as part of your next ListWorkspaces request to retrieve those results.

    " + }, + "workspaces":{ + "shape":"WorkspaceSummaryList", + "documentation":"

    An array of WorkspaceSummary structures containing information about the workspaces requested.

    " } }, - "documentation":"

    Represents the output of a ListWorkspaces operation.

    " + "documentation":"

    Represents the output of a ListWorkspaces operation.

    " }, "LogGroupArn":{ "type":"string", - "pattern":"arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*" + "pattern":"^arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*$" }, "LoggingConfigurationMetadata":{ "type":"structure", "required":[ - "status", - "workspace", - "logGroupArn", "createdAt", - "modifiedAt" + "logGroupArn", + "modifiedAt", + "status", + "workspace" ], "members":{ - "status":{ - "shape":"LoggingConfigurationStatus", - "documentation":"

    The status of the logging configuration.

    " - }, - "workspace":{ - "shape":"WorkspaceId", - "documentation":"

    The workspace where the logging configuration exists.

    " - }, - "logGroupArn":{ - "shape":"LogGroupArn", - "documentation":"

    The ARN of the CW log group to which the vended log data will be published.

    " - }, "createdAt":{ "shape":"Timestamp", - "documentation":"

    The time when the logging configuration was created.

    " + "documentation":"

    The date and time that the logging configuration was created.

    " + }, + "logGroupArn":{ + "shape":"LogGroupArn", + "documentation":"

    The ARN of the CloudWatch log group to which the vended log data will be published.

    " }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the logging configuration was modified.

    " + "documentation":"

    The date and time that the logging configuration was most recently changed.

    " + }, + "status":{ + "shape":"LoggingConfigurationStatus", + "documentation":"

    The current status of the logging configuration.

    " + }, + "workspace":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace the logging configuration is for.

    " } }, - "documentation":"

    Represents the properties of a logging configuration metadata.

    " + "documentation":"

    Contains information about the logging configuration.

    " }, "LoggingConfigurationStatus":{ "type":"structure", @@ -1443,14 +1446,14 @@ "members":{ "statusCode":{ "shape":"LoggingConfigurationStatusCode", - "documentation":"

    Status code of the logging configuration.

    " + "documentation":"

    The current status of the logging configuration.

    " }, "statusReason":{ "shape":"String", - "documentation":"

    The reason for failure if any.

    " + "documentation":"

    If failed, the reason for the failure.

    " } }, - "documentation":"

    Represents the status of a logging configuration.

    " + "documentation":"

    The status of the logging configuration.

    " }, "LoggingConfigurationStatusCode":{ "type":"string", @@ -1473,27 +1476,27 @@ "PutAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "workspaceId", - "data" + "data", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace in which to update the alert manager definition.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

    The alert manager definition data.

    " + "documentation":"

    The alert manager definition to use. A base64-encoded version of the YAML alert manager definition file.

    For details about the alert manager definition, see AlertManagedDefinitionData.

    " }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to update the alert manager definition in.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a PutAlertManagerDefinition operation.

    " + "documentation":"

    Represents the input of a PutAlertManagerDefinition operation.

    " }, "PutAlertManagerDefinitionResponse":{ "type":"structure", @@ -1501,69 +1504,69 @@ "members":{ "status":{ "shape":"AlertManagerDefinitionStatus", - "documentation":"

    The status of alert manager definition.

    " + "documentation":"

    A structure that returns the current status of the alert manager definition.

    " } }, - "documentation":"

    Represents the output of a PutAlertManagerDefinition operation.

    " + "documentation":"

    Represents the output of a PutAlertManagerDefinition operation.

    " }, "PutRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", + "data", "name", - "data" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace in which to update the rule group namespace.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true + }, + "data":{ + "shape":"RuleGroupsNamespaceData", + "documentation":"

    The new rules file to use in the namespace. A base64-encoded version of the YAML rule groups file.

    For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

    " }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    ", + "documentation":"

    The name of the rule groups namespace that you are updating.

    ", "location":"uri", "locationName":"name" }, - "data":{ - "shape":"RuleGroupsNamespaceData", - "documentation":"

    The namespace data that define the rule groups.

    " - }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace where you are updating the rule groups namespace.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of a PutRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the input of a PutRuleGroupsNamespace operation.

    " }, "PutRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "name", "arn", + "name", "status" ], "members":{ - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    " - }, "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

    The Amazon Resource Name (ARN) of this rule groups namespace.

    " + "documentation":"

    The ARN of the rule groups namespace.

    " + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

    The name of the rule groups namespace that was updated.

    " }, "status":{ "shape":"RuleGroupsNamespaceStatus", - "documentation":"

    The status of rule groups namespace.

    " + "documentation":"

    A structure that includes the current status of the rule groups namespace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this rule groups namespace.

    " + "documentation":"

    The list of tag keys and values that are associated with the namespace.

    " } }, - "documentation":"

    Represents the output of a PutRuleGroupsNamespace operation.

    " + "documentation":"

    Represents the output of a PutRuleGroupsNamespace operation.

    " }, "ResourceNotFoundException":{ "type":"structure", @@ -1586,7 +1589,7 @@ "documentation":"

    Type of the resource affected.

    " } }, - "documentation":"

    Request references a resource which does not exist.

    ", + "documentation":"

    The request references a resources that doesn't exist.

    ", "error":{ "httpStatusCode":404, "senderFault":true @@ -1605,50 +1608,50 @@ "type":"structure", "required":[ "arn", - "name", - "status", - "data", "createdAt", - "modifiedAt" + "data", + "modifiedAt", + "name", + "status" ], "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

    The Amazon Resource Name (ARN) of this rule groups namespace.

    " - }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    " + "documentation":"

    The ARN of the rule groups namespace.

    " }, - "status":{ - "shape":"RuleGroupsNamespaceStatus", - "documentation":"

    The status of rule groups namespace.

    " + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the rule groups namespace was created.

    " }, "data":{ "shape":"RuleGroupsNamespaceData", - "documentation":"

    The rule groups namespace data.

    " - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

    The time when the rule groups namespace was created.

    " + "documentation":"

    The rule groups file used in the namespace.

    For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

    " }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the rule groups namespace was modified.

    " + "documentation":"

    The date and time that the rule groups namespace was most recently changed.

    " + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

    The name of the rule groups namespace.

    " + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

    The current status of the rule groups namespace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this rule groups namespace.

    " + "documentation":"

    The list of tag keys and values that are associated with the rule groups namespace.

    " } }, - "documentation":"

    Represents a description of the rule groups namespace.

    " + "documentation":"

    The details about one rule groups namespace.

    " }, "RuleGroupsNamespaceName":{ "type":"string", - "documentation":"

    The namespace name that the rule group belong to.

    ", + "documentation":"

    The name of the namespace that the rule group belong to.

    ", "max":64, "min":1, - "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" }, "RuleGroupsNamespaceStatus":{ "type":"structure", @@ -1656,14 +1659,14 @@ "members":{ "statusCode":{ "shape":"RuleGroupsNamespaceStatusCode", - "documentation":"

    Status code of this namespace.

    " + "documentation":"

    The current status of the namespace.

    " }, "statusReason":{ "shape":"String", - "documentation":"

    The reason for failure if any.

    " + "documentation":"

    The reason for the failure, if any.

    " } }, - "documentation":"

    Represents the status of a namespace.

    " + "documentation":"

    The status information about a rule groups namespace.

    " }, "RuleGroupsNamespaceStatusCode":{ "type":"string", @@ -1681,38 +1684,38 @@ "type":"structure", "required":[ "arn", - "name", - "status", "createdAt", - "modifiedAt" + "modifiedAt", + "name", + "status" ], "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

    The Amazon Resource Name (ARN) of this rule groups namespace.

    " - }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

    The rule groups namespace name.

    " - }, - "status":{ - "shape":"RuleGroupsNamespaceStatus", - "documentation":"

    The status of rule groups namespace.

    " + "documentation":"

    The ARN of the rule groups namespace.

    " }, "createdAt":{ "shape":"Timestamp", - "documentation":"

    The time when the rule groups namespace was created.

    " + "documentation":"

    The date and time that the rule groups namespace was created.

    " }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the rule groups namespace was modified.

    " + "documentation":"

    The date and time that the rule groups namespace was most recently changed.

    " + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

    The name of the rule groups namespace.

    " + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

    A structure that displays the current status of the rule groups namespace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this rule groups namespace.

    " + "documentation":"

    The list of tag keys and values that are associated with the rule groups namespace.

    " } }, - "documentation":"

    Represents a summary of the rule groups namespace.

    " + "documentation":"

    The high-level information about a rule groups namespace. To retrieve more information, use DescribeRuleGroupsNamespace.

    " }, "RuleGroupsNamespaceSummaryList":{ "type":"list", @@ -1724,10 +1727,10 @@ "members":{ "configurationBlob":{ "shape":"Blob", - "documentation":"

    Binary data representing a Prometheus configuration file.

    " + "documentation":"

    The base 64 encoded scrape configuration file.

    " } }, - "documentation":"

    A representation of a Prometheus configuration file.

    ", + "documentation":"

    A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

    ", "union":true }, "ScraperAlias":{ @@ -1735,7 +1738,7 @@ "documentation":"

    A user-assigned scraper alias.

    ", "max":100, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" }, "ScraperArn":{ "type":"string", @@ -1744,77 +1747,77 @@ "ScraperDescription":{ "type":"structure", "required":[ - "scraperId", "arn", - "roleArn", - "status", "createdAt", + "destination", "lastModifiedAt", + "roleArn", "scrapeConfiguration", + "scraperId", "source", - "destination" + "status" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

    Alias of this scraper.

    " - }, - "scraperId":{ - "shape":"ScraperId", - "documentation":"

    Unique string identifying this scraper.

    " + "documentation":"

    (Optional) A name associated with the scraper.

    " }, "arn":{ "shape":"ScraperArn", - "documentation":"

    The Amazon Resource Name (ARN) of this scraper.

    " - }, - "roleArn":{ - "shape":"IamRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to dsicover, collect, and produce metrics on your behalf.

    " - }, - "status":{ - "shape":"ScraperStatus", - "documentation":"

    The status of this scraper.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the scraper.

    " }, "createdAt":{ "shape":"Timestamp", - "documentation":"

    The time when the scraper was created.

    " + "documentation":"

    The date and time that the scraper was created.

    " + }, + "destination":{ + "shape":"Destination", + "documentation":"

    The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.

    " }, "lastModifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the scraper was last modified.

    " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

    The tags of this scraper.

    " + "documentation":"

    The date and time that the scraper was last modified.

    " }, - "statusReason":{ - "shape":"StatusReason", - "documentation":"

    The reason for failure if any.

    " + "roleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

    " }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

    The configuration used to create the scraper.

    " + "documentation":"

    The configuration file in use by the scraper.

    " + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

    The ID of the scraper.

    " }, "source":{ "shape":"Source", - "documentation":"

    The source that the scraper is discovering and collecting metrics from.

    " + "documentation":"

    The Amazon EKS cluster from which the scraper collects metrics.

    " }, - "destination":{ - "shape":"Destination", - "documentation":"

    The destination that the scraper is producing metrics to.

    " + "status":{ + "shape":"ScraperStatus", + "documentation":"

    A structure that contains the current status of the scraper.

    " + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

    If there is a failure, the reason for the failure.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    (Optional) The list of tag keys and values associated with the scraper.

    " } }, - "documentation":"

    Represents the properties of a scraper.

    " + "documentation":"

    The ScraperDescription structure contains the full details about one scraper in your account.

    " }, "ScraperFilters":{ "type":"map", "key":{ "shape":"FilterKey", - "documentation":"

    The name of the key to filter by. Currently supported filter keys are 'status', 'sourceArn', 'destinationArn', and 'alias'.

    " + "documentation":"

    The name of the key to filter by. Currently supported filter keys are status, sourceArn, destinationArn, and alias.

    " }, "value":{ "shape":"FilterValues", - "documentation":"

    The values of the given key to filter by.

    " + "documentation":"

    The values of the given key by which to filter.

    " }, "documentation":"

    A list of scraper filters.

    ", "max":4, @@ -1825,7 +1828,7 @@ "documentation":"

    A scraper ID.

    ", "max":64, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" }, "ScraperStatus":{ "type":"structure", @@ -1833,10 +1836,10 @@ "members":{ "statusCode":{ "shape":"ScraperStatusCode", - "documentation":"

    Status code of this scraper.

    " + "documentation":"

    The current status of the scraper.

    " } }, - "documentation":"

    Represents the status of a scraper.

    " + "documentation":"

    The ScraperStatus structure contains status information about the scraper.

    " }, "ScraperStatusCode":{ "type":"string", @@ -1852,62 +1855,62 @@ "ScraperSummary":{ "type":"structure", "required":[ - "scraperId", "arn", - "roleArn", - "status", "createdAt", + "destination", "lastModifiedAt", + "roleArn", + "scraperId", "source", - "destination" + "status" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

    Alias of this scraper.

    " - }, - "scraperId":{ - "shape":"ScraperId", - "documentation":"

    Unique string identifying this scraper.

    " + "documentation":"

    (Optional) A name associated with the scraper.

    " }, "arn":{ "shape":"ScraperArn", - "documentation":"

    The Amazon Resource Name (ARN) of this scraper.

    " - }, - "roleArn":{ - "shape":"IamRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to dsicover, collect, and produce metrics on your behalf.

    " - }, - "status":{ - "shape":"ScraperStatus", - "documentation":"

    The status of this scraper.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the scraper.

    " }, "createdAt":{ "shape":"Timestamp", - "documentation":"

    The time when the scraper was created.

    " + "documentation":"

    The date and time that the scraper was created.

    " + }, + "destination":{ + "shape":"Destination", + "documentation":"

    The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.

    " }, "lastModifiedAt":{ "shape":"Timestamp", - "documentation":"

    The time when the scraper was last modified.

    " + "documentation":"

    The date and time that the scraper was last modified.

    " }, - "tags":{ - "shape":"TagMap", - "documentation":"

    The tags of this scraper.

    " + "roleArn":{ + "shape":"IamRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

    " }, - "statusReason":{ - "shape":"StatusReason", - "documentation":"

    The reason for failure if any.

    " + "scraperId":{ + "shape":"ScraperId", + "documentation":"

    The ID of the scraper.

    " }, "source":{ "shape":"Source", - "documentation":"

    The source that the scraper is discovering and collecting metrics from.

    " + "documentation":"

    The Amazon EKS cluster from which the scraper collects metrics.

    " }, - "destination":{ - "shape":"Destination", - "documentation":"

    The destination that the scraper is producing metrics to.

    " + "status":{ + "shape":"ScraperStatus", + "documentation":"

    A structure that contains the current status of the scraper.

    " + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

    If there is a failure, the reason for the failure.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    (Optional) The list of tag keys and values associated with the scraper.

    " } }, - "documentation":"

    Represents a summary of the properties of a scraper.

    " + "documentation":"

    The ScraperSummary structure contains a summary of the details about one scraper in your account.

    " }, "ScraperSummaryList":{ "type":"list", @@ -1919,7 +1922,7 @@ "documentation":"

    ID of a VPC security group.

    ", "max":255, "min":0, - "pattern":"sg-[0-9a-z]+" + "pattern":"^sg-[0-9a-z]+$" }, "SecurityGroupIds":{ "type":"list", @@ -1932,16 +1935,20 @@ "type":"structure", "required":[ "message", + "quotaCode", "resourceId", "resourceType", - "serviceCode", - "quotaCode" + "serviceCode" ], "members":{ "message":{ "shape":"String", "documentation":"

    Description of the error.

    " }, + "quotaCode":{ + "shape":"String", + "documentation":"

    Service quotas code of the originating quota.

    " + }, "resourceId":{ "shape":"String", "documentation":"

    Identifier of the resource affected.

    " @@ -1952,14 +1959,10 @@ }, "serviceCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating service.

    " - }, - "quotaCode":{ - "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating quota.

    " + "documentation":"

    Service quotas code for the originating service.

    " } }, - "documentation":"

    Request would cause a service quota to be exceeded.

    ", + "documentation":"

    Completing the request would cause a service quota to be exceeded.

    ", "error":{ "httpStatusCode":402, "senderFault":true @@ -1971,15 +1974,15 @@ "members":{ "eksConfiguration":{ "shape":"EksConfiguration", - "documentation":"

    A representation of an EKS source.

    " + "documentation":"

    The Amazon EKS cluster from which a scraper collects metrics.

    " } }, - "documentation":"

    A representation of a source that a scraper can discover and collect metrics from.

    ", + "documentation":"

    The source of collected metrics for a scraper.

    ", "union":true }, "StatusReason":{ "type":"string", - "documentation":"

    The reason for failure if any.

    ", + "documentation":"

    The reason for the failure, if any.

    ", "max":256, "min":1 }, @@ -1989,7 +1992,7 @@ "documentation":"

    ID of a VPC subnet.

    ", "max":255, "min":0, - "pattern":"subnet-[0-9a-z]+" + "pattern":"^subnet-[0-9a-z]+$" }, "SubnetIds":{ "type":"list", @@ -2002,7 +2005,7 @@ "type":"string", "max":128, "min":1, - "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeys":{ "type":"list", @@ -2012,11 +2015,11 @@ "type":"map", "key":{ "shape":"TagKey", - "documentation":"

    The key of the tag.

    Constraints: Tag keys are case-sensitive and accept a maximum of 128 Unicode characters. Valid characters are Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @ May not begin with aws:.

    " + "documentation":"

    The key of the tag. May not begin with aws:.

    " }, "value":{ "shape":"TagValue", - "documentation":"

    The value of the tag.

    Constraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters. Valid characters are Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @

    " + "documentation":"

    The value of the tag.

    " }, "documentation":"

    The list of tags assigned to the resource.

    ", "max":50, @@ -2031,11 +2034,14 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

    The ARN of the resource.

    ", + "documentation":"

    The ARN of the workspace or rule groups namespace to apply tags to.

    ", "location":"uri", "locationName":"resourceArn" }, - "tags":{"shape":"TagMap"} + "tags":{ + "shape":"TagMap", + "documentation":"

    The list of tag keys and values to associate with the resource.

    Keys may not begin with aws:.

    " + } } }, "TagResourceResponse":{ @@ -2047,7 +2053,7 @@ "type":"string", "max":256, "min":0, - "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "ThrottlingException":{ "type":"structure", @@ -2057,22 +2063,22 @@ "shape":"String", "documentation":"

    Description of the error.

    " }, - "serviceCode":{ - "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating service.

    " - }, "quotaCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating quota.

    " + "documentation":"

    Service quotas code for the originating quota.

    " }, "retryAfterSeconds":{ "shape":"Integer", "documentation":"

    Advice to clients on when the call can be safely retried.

    ", "location":"header", "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    Service quotas code for the originating service.

    " } }, - "documentation":"

    Request was denied due to request throttling.

    ", + "documentation":"

    The request was denied due to request throttling.

    ", "error":{ "httpStatusCode":429, "senderFault":true @@ -2090,13 +2096,13 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

    The ARN of the resource.

    ", + "documentation":"

    The ARN of the workspace or rule groups namespace.

    ", "location":"uri", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeys", - "documentation":"

    One or more tag keys

    ", + "documentation":"

    The keys of the tags to remove.

    ", "location":"querystring", "locationName":"tagKeys" } @@ -2110,27 +2116,27 @@ "UpdateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "workspaceId", - "logGroupArn" + "logGroupArn", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace to vend logs to.

    ", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", + "idempotencyToken":true }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

    The ARN of the CW log group to which the vended log data will be published.

    " + "documentation":"

    The ARN of the CloudWatch log group to which the vended log data will be published.

    " }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to update the logging configuration for.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of an UpdateLoggingConfiguration operation.

    " + "documentation":"

    Represents the input of an UpdateLoggingConfiguration operation.

    " }, "UpdateLoggingConfigurationResponse":{ "type":"structure", @@ -2138,32 +2144,32 @@ "members":{ "status":{ "shape":"LoggingConfigurationStatus", - "documentation":"

    The status of the logging configuration.

    " + "documentation":"

    A structure that contains the current status of the logging configuration.

    " } }, - "documentation":"

    Represents the output of an UpdateLoggingConfiguration operation.

    " + "documentation":"

    Represents the output of an UpdateLoggingConfiguration operation.

    " }, "UpdateWorkspaceAliasRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    The ID of the workspace being updated.

    ", - "location":"uri", - "locationName":"workspaceId" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

    The new alias of the workspace.

    " + "documentation":"

    The new alias for the workspace. It does not need to be unique.

    Amazon Managed Service for Prometheus will automatically strip any blank spaces from the beginning and end of the alias that you specify.

    " }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

    Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

    ", + "documentation":"

    A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

    ", "idempotencyToken":true + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace to update.

    ", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

    Represents the input of an UpdateWorkspaceAlias operation.

    " + "documentation":"

    Represents the input of an UpdateWorkspaceAlias operation.

    " }, "Uri":{ "type":"string", @@ -2177,6 +2183,10 @@ "reason" ], "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The field that caused the error, if applicable.

    " + }, "message":{ "shape":"String", "documentation":"

    Description of the error.

    " @@ -2184,13 +2194,9 @@ "reason":{ "shape":"ValidationExceptionReason", "documentation":"

    Reason the request failed validation.

    " - }, - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

    The field that caused the error, if applicable. If more than one field caused the error, pick one and elaborate in the message.

    " } }, - "documentation":"

    The input fails to satisfy the constraints specified by an AWS service.

    ", + "documentation":"

    The input fails to satisfy the constraints specified by an Amazon Web Services service.

    ", "error":{ "httpStatusCode":400, "senderFault":true @@ -2200,20 +2206,20 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "name", - "message" + "message", + "name" ], "members":{ - "name":{ + "message":{ "shape":"String", - "documentation":"

    The field name.

    " + "documentation":"

    A message describing why the field caused an exception.

    " }, - "message":{ + "name":{ "shape":"String", - "documentation":"

    Message describing why the field failed validation.

    " + "documentation":"

    The name of the field that caused an exception.

    " } }, - "documentation":"

    Stores information about a field passed inside a request that resulted in an exception.

    " + "documentation":"

    Information about a field passed into a request that resulted in an exception.

    " }, "ValidationExceptionFieldList":{ "type":"list", @@ -2239,58 +2245,58 @@ "WorkspaceArn":{ "type":"string", "documentation":"

    An ARN identifying a Workspace.

    ", - "pattern":"arn:aws[-a-z]*:aps:[-a-z0-9]+:[0-9]{12}:workspace/.+" + "pattern":"^arn:aws[-a-z]*:aps:[-a-z0-9]+:[0-9]{12}:workspace/.+$" }, "WorkspaceDescription":{ "type":"structure", "required":[ - "workspaceId", "arn", + "createdAt", "status", - "createdAt" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    Unique string identifying this workspace.

    " - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

    Alias of this workspace.

    " + "documentation":"

    The alias that is assigned to this workspace to help identify it. It may not be unique.

    " }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

    The Amazon Resource Name (ARN) of this workspace.

    " + "documentation":"

    The ARN of the workspace.

    " }, - "status":{ - "shape":"WorkspaceStatus", - "documentation":"

    The status of this workspace.

    " + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the workspace was created.

    " + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    (optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

    " }, "prometheusEndpoint":{ "shape":"Uri", - "documentation":"

    Prometheus endpoint URI.

    " + "documentation":"

    The Prometheus endpoint available for this workspace.

    " }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

    The time when the workspace was created.

    " + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

    The current status of the workspace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this workspace.

    " + "documentation":"

    The list of tag keys and values that are associated with the workspace.

    " }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

    The customer managed KMS key of this workspace.

    " + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The unique ID for the workspace.

    " } }, - "documentation":"

    Represents the properties of a workspace.

    " + "documentation":"

    The full details about one Amazon Managed Service for Prometheus workspace in your account.

    " }, "WorkspaceId":{ "type":"string", "documentation":"

    A workspace ID.

    ", "max":64, "min":1, - "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" }, "WorkspaceStatus":{ "type":"structure", @@ -2298,10 +2304,10 @@ "members":{ "statusCode":{ "shape":"WorkspaceStatusCode", - "documentation":"

    Status code of this workspace.

    " + "documentation":"

    The current status of the workspace.

    " } }, - "documentation":"

    Represents the status of a workspace.

    " + "documentation":"

    The status of the workspace.

    " }, "WorkspaceStatusCode":{ "type":"string", @@ -2317,42 +2323,42 @@ "WorkspaceSummary":{ "type":"structure", "required":[ - "workspaceId", "arn", + "createdAt", "status", - "createdAt" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

    Unique string identifying this workspace.

    " - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

    Alias of this workspace.

    " + "documentation":"

    The alias that is assigned to this workspace to help identify it. It may not be unique.

    " }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

    The AmazonResourceName of this workspace.

    " - }, - "status":{ - "shape":"WorkspaceStatus", - "documentation":"

    The status of this workspace.

    " + "documentation":"

    The ARN of the workspace.

    " }, "createdAt":{ "shape":"Timestamp", - "documentation":"

    The time when the workspace was created.

    " + "documentation":"

    The date and time that the workspace was created.

    " + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    (optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

    " + }, + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

    The current status of the workspace.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags of this workspace.

    " + "documentation":"

    The list of tag keys and values that are associated with the workspace.

    " }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

    Customer managed KMS key ARN for this workspace

    " + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The unique ID for the workspace.

    " } }, - "documentation":"

    Represents a summary of the properties of a workspace.

    " + "documentation":"

    The information about one Amazon Managed Service for Prometheus workspace in your account.

    " }, "WorkspaceSummaryList":{ "type":"list", @@ -2360,5 +2366,5 @@ "documentation":"

    A list of workspace summaries.

    " } }, - "documentation":"

    Amazon Managed Service for Prometheus

    " + "documentation":"

    Amazon Managed Service for Prometheus is a serverless, Prometheus-compatible monitoring service for container metrics that makes it easier to securely monitor container environments at scale. With Amazon Managed Service for Prometheus, you can use the same open-source Prometheus data model and query language that you use today to monitor the performance of your containerized workloads, and also enjoy improved scalability, availability, and security without having to manage the underlying infrastructure.

    For more information about Amazon Managed Service for Prometheus, see the Amazon Managed Service for Prometheus User Guide.

    Amazon Managed Service for Prometheus includes two APIs.

    • Use the Amazon Web Services API described in this guide to manage Amazon Managed Service for Prometheus resources, such as workspaces, rule groups, and alert managers.

    • Use the Prometheus-compatible API to work within your Prometheus workspace.

    " } diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 185d6b1be49f..9de8099b0ecd 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplify/src/main/resources/codegen-resources/customization.config b/services/amplify/src/main/resources/codegen-resources/customization.config index 9f9ccd14e9ed..bc376ac406f4 100644 --- a/services/amplify/src/main/resources/codegen-resources/customization.config +++ b/services/amplify/src/main/resources/codegen-resources/customization.config @@ -2,5 +2,6 @@ "verifiedSimpleMethods": [ "listApps" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amplify/src/main/resources/codegen-resources/service-2.json b/services/amplify/src/main/resources/codegen-resources/service-2.json index 60c5a3ad949a..a1cbbbb3b86d 100644 --- a/services/amplify/src/main/resources/codegen-resources/service-2.json +++ b/services/amplify/src/main/resources/codegen-resources/service-2.json @@ -97,7 +97,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

    Creates a new domain association for an Amplify app. This action associates a custom domain with the Amplify app

    " + "documentation":"

    Creates a new domain association for an Amplify app. This action associates a custom domain with the Amplify app

    " }, "CreateWebhook":{ "name":"CreateWebhook", @@ -183,7 +183,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

    Deletes a domain association for an Amplify app.

    " + "documentation":"

    Deletes a domain association for an Amplify app.

    " }, "DeleteJob":{ "name":"DeleteJob", @@ -314,7 +314,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

    Returns the domain information for an Amplify app.

    " + "documentation":"

    Returns the domain information for an Amplify app.

    " }, "GetJob":{ "name":"GetJob", @@ -424,7 +424,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

    Returns the domain associations for an Amplify app.

    " + "documentation":"

    Returns the domain associations for an Amplify app.

    " }, "ListJobs":{ "name":"ListJobs", @@ -602,7 +602,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

    Creates a new domain association for an Amplify app.

    " + "documentation":"

    Creates a new domain association for an Amplify app.

    " }, "UpdateWebhook":{ "name":"UpdateWebhook", @@ -1112,6 +1112,53 @@ "pattern":"(?s).+", "sensitive":true }, + "Certificate":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"CertificateType", + "documentation":"

    The type of SSL/TLS certificate that you want to use.

    Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

    Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide .

    " + }, + "customCertificateArn":{ + "shape":"CertificateArn", + "documentation":"

    The Amazon resource name (ARN) for a custom certificate that you have already added to Certificate Manager in your Amazon Web Services account.

    This field is required only when the certificate type is CUSTOM.

    " + }, + "certificateVerificationDNSRecord":{ + "shape":"CertificateVerificationDNSRecord", + "documentation":"

    The DNS record for certificate verification.

    " + } + }, + "documentation":"

    Describes the current SSL/TLS certificate that is in use for the domain. If you are using CreateDomainAssociation to create a new domain association, Certificate describes the new certificate that you are creating.

    " + }, + "CertificateArn":{ + "type":"string", + "max":1000, + "min":0, + "pattern":"^arn:aws:acm:[a-z0-9-]+:\\d{12}:certificate\\/.+$" + }, + "CertificateSettings":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"CertificateType", + "documentation":"

    The certificate type.

    Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

    Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide.

    " + }, + "customCertificateArn":{ + "shape":"CertificateArn", + "documentation":"

    The Amazon resource name (ARN) for the custom certificate that you have already added to Certificate Manager in your Amazon Web Services account.

    This field is required only when the certificate type is CUSTOM.

    " + } + }, + "documentation":"

    The type of SSL/TLS certificate to use for your custom domain. If a certificate type isn't specified, Amplify uses the default AMPLIFY_MANAGED certificate.

    " + }, + "CertificateType":{ + "type":"string", + "enum":[ + "AMPLIFY_MANAGED", + "CUSTOM" + ] + }, "CertificateVerificationDNSRecord":{ "type":"string", "max":1000 @@ -1444,6 +1491,10 @@ "autoSubDomainIAMRole":{ "shape":"AutoSubDomainIAMRole", "documentation":"

    The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

    " + }, + "certificateSettings":{ + "shape":"CertificateSettings", + "documentation":"

    The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.

    " } }, "documentation":"

    The request structure for the create domain association request.

    " @@ -1527,7 +1578,7 @@ }, "status":{ "shape":"Status", - "documentation":"

    The status code for a URL rewrite or redirect rule.

    200

    Represents a 200 rewrite rule.

    301

    Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.

    302

    Represents a 302 temporary redirect rule.

    404

    Represents a 404 redirect rule.

    404-200

    Represents a 404 rewrite rule.

    " + "documentation":"

    The status code for a URL rewrite or redirect rule.

    200

    Represents a 200 rewrite rule.

    301

    Represents a 301 (moved permanently) redirect rule. This and all future requests should be directed to the target URL.

    302

    Represents a 302 temporary redirect rule.

    404

    Represents a 404 redirect rule.

    404-200

    Represents a 404 rewrite rule.

    " }, "condition":{ "shape":"Condition", @@ -1786,9 +1837,13 @@ "shape":"DomainStatus", "documentation":"

    The current status of the domain association.

    " }, + "updateStatus":{ + "shape":"UpdateStatus", + "documentation":"

    The status of the domain update operation that is currently in progress. The following list describes the valid update states.

    REQUESTING_CERTIFICATE

    The certificate is in the process of being updated.

    PENDING_VERIFICATION

    Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.

    IMPORTING_CUSTOM_CERTIFICATE

    Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.

    PENDING_DEPLOYMENT

    Indicates that the subdomain or certificate changes are being propagated.

    AWAITING_APP_CNAME

    Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route 53, Amplify handles this for you automatically. For more information about custom domains, see Setting up custom domains in the Amplify Hosting User Guide.

    UPDATE_COMPLETE

    The certificate has been associated with a domain.

    UPDATE_FAILED

    The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.

    " + }, "statusReason":{ "shape":"StatusReason", - "documentation":"

    The reason for the current status of the domain association.

    " + "documentation":"

    Additional information that describes why the domain association is in the current state.

    " }, "certificateVerificationDNSRecord":{ "shape":"CertificateVerificationDNSRecord", @@ -1797,9 +1852,13 @@ "subDomains":{ "shape":"SubDomains", "documentation":"

    The subdomains for the domain association.

    " + }, + "certificate":{ + "shape":"Certificate", + "documentation":"

    Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.

    If you are updating your domain to use a different certificate, certificate points to the new certificate that is being created instead of the current active certificate. Otherwise, certificate points to the current active certificate.

    " } }, - "documentation":"

    Describes a domain association that associates a custom domain with an Amplify app.

    " + "documentation":"

    Describes the association between a custom domain and an Amplify app.

    " }, "DomainAssociationArn":{ "type":"string", @@ -1826,7 +1885,9 @@ "PENDING_VERIFICATION", "IN_PROGRESS", "AVAILABLE", + "IMPORTING_CUSTOM_CERTIFICATE", "PENDING_DEPLOYMENT", + "AWAITING_APP_CNAME", "FAILED", "CREATING", "REQUESTING_CERTIFICATE", @@ -3354,6 +3415,10 @@ "autoSubDomainIAMRole":{ "shape":"AutoSubDomainIAMRole", "documentation":"

    The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

    " + }, + "certificateSettings":{ + "shape":"CertificateSettings", + "documentation":"

    The type of SSL/TLS certificate to use for your custom domain.

    " } }, "documentation":"

    The request structure for the update domain association request.

    " @@ -3369,6 +3434,18 @@ }, "documentation":"

    The result structure for the update domain association request.

    " }, + "UpdateStatus":{ + "type":"string", + "enum":[ + "REQUESTING_CERTIFICATE", + "PENDING_VERIFICATION", + "IMPORTING_CUSTOM_CERTIFICATE", + "PENDING_DEPLOYMENT", + "AWAITING_APP_CNAME", + "UPDATE_COMPLETE", + "UPDATE_FAILED" + ] + }, "UpdateTime":{"type":"timestamp"}, "UpdateWebhookRequest":{ "type":"structure", diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 9400558b52c8..bf51eea8211b 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifybackend/src/main/resources/codegen-resources/customization.config b/services/amplifybackend/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/customization.config +++ b/services/amplifybackend/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index f4ea24d118d8..d613c00a380c 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config b/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 9d5dfb8d4fbf..16ba614715ac 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index e20615a1aa28..5065cafe5651 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config b/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config +++ b/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index b53f4973c81f..da2828414f69 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 387c60f7528a..7a3fb310717e 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfig/src/main/resources/codegen-resources/customization.config b/services/appconfig/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/appconfig/src/main/resources/codegen-resources/customization.config +++ b/services/appconfig/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 754e37e51cee..c1aa78a560db 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index 9fe9fb442f57..be4d2ec62829 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appfabric/src/main/resources/codegen-resources/customization.config b/services/appfabric/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/appfabric/src/main/resources/codegen-resources/customization.config +++ b/services/appfabric/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index ae946fa6a1e6..db202be668cf 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/customization.config b/services/appflow/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/appflow/src/main/resources/codegen-resources/customization.config +++ b/services/appflow/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index b7ae3798f413..7540749b8dbe 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/appintegrations/src/main/resources/codegen-resources/customization.config b/services/appintegrations/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/appintegrations/src/main/resources/codegen-resources/customization.config +++ b/services/appintegrations/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 0f020e3ce40b..aa01d93ec737 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config b/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config +++ b/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index e3fea198abcd..64906aeb4265 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config b/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config +++ b/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 89dd4504fa23..702ec666a5d3 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config index b3b1890cad0c..739c1aacf1e3 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config @@ -17,5 +17,6 @@ "DescribeExportConfigurations", "ExportConfigurations" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index ba17454d1162..c7230e7ab004 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/applicationinsights/src/main/resources/codegen-resources/customization.config b/services/applicationinsights/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/customization.config +++ b/services/applicationinsights/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 608ffb98a2b2..4df8d055b0d0 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 5a64aeeb2ef3..0e38ed7194b7 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 952e5e615cb0..4a5c4370180f 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index d59460d86e31..574ea593965e 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 9ab4223b3025..2b24207403fe 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -146,7 +146,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

    Creates a Function object.

    A function is a reusable entity. You can use multiple functions to compose the resolver logic.

    " }, @@ -281,7 +282,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

    Deletes a Function.

    " }, @@ -1008,7 +1010,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

    Updates a Function object.

    " }, @@ -1166,6 +1169,10 @@ "status":{ "shape":"ApiCacheStatus", "documentation":"

    The cache instance status.

    • AVAILABLE: The instance is available for use.

    • CREATING: The instance is currently creating.

    • DELETING: The instance is currently deleting.

    • MODIFYING: The instance is currently modifying.

    • FAILED: The instance has failed creation.

    " + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

    Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

    • NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.

    • EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.

    Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

    " } }, "documentation":"

    The ApiCache object.

    " @@ -1458,6 +1465,13 @@ "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, "BooleanValue":{"type":"boolean"}, + "CacheHealthMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "CachingConfig":{ "type":"structure", "required":["ttl"], @@ -1618,6 +1632,10 @@ "type":{ "shape":"ApiCacheType", "documentation":"

    The cache instance type. Valid values are

    • SMALL

    • MEDIUM

    • LARGE

    • XLARGE

    • LARGE_2X

    • LARGE_4X

    • LARGE_8X (not available in all regions)

    • LARGE_12X

    Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

    The following legacy instance types are available, but their use is discouraged:

    • T2_SMALL: A t2.small instance type.

    • T2_MEDIUM: A t2.medium instance type.

    • R4_LARGE: A r4.large instance type.

    • R4_XLARGE: A r4.xlarge instance type.

    • R4_2XLARGE: A r4.2xlarge instance type.

    • R4_4XLARGE: A r4.4xlarge instance type.

    • R4_8XLARGE: A r4.8xlarge instance type.

    " + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

    Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

    • NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.

    • EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

    " } }, "documentation":"

    Represents the input of a CreateApiCache operation.

    " @@ -1718,6 +1736,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

    Amazon EventBridge settings.

    " + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } } }, @@ -1889,6 +1911,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

    The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

    " + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

    The enhancedMetricsConfig object.

    " } } }, @@ -1961,6 +1987,10 @@ "code":{ "shape":"Code", "documentation":"

    The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

    " + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } } }, @@ -2056,6 +2086,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

    Amazon EventBridge settings.

    " + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } }, "documentation":"

    Describes a data source.

    " @@ -2182,6 +2216,20 @@ "SUCCESS" ] }, + "DataSourceLevelMetricsBehavior":{ + "type":"string", + "enum":[ + "FULL_REQUEST_DATA_SOURCE_METRICS", + "PER_DATA_SOURCE_METRICS" + ] + }, + "DataSourceLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "DataSourceType":{ "type":"string", "enum":[ @@ -2582,6 +2630,29 @@ }, "documentation":"

    Describes an OpenSearch data source configuration.

    As of September 2021, Amazon Elasticsearch service is Amazon OpenSearch Service. This configuration is deprecated. For new data sources, use OpenSearchServiceDataSourceConfig to specify an OpenSearch data source.

    " }, + "EnhancedMetricsConfig":{ + "type":"structure", + "required":[ + "resolverLevelMetricsBehavior", + "dataSourceLevelMetricsBehavior", + "operationLevelMetricsConfig" + ], + "members":{ + "resolverLevelMetricsBehavior":{ + "shape":"ResolverLevelMetricsBehavior", + "documentation":"

    Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

    • GraphQL errors: The number of GraphQL errors that occurred.

    • Requests: The number of invocations that occurred during a request.

    • Latency: The time to complete a resolver invocation.

    • Cache hits: The number of cache hits during a request.

    • Cache misses: The number of cache misses during a request.

    These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

    • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricConfig value set to ENABLED.

    " + }, + "dataSourceLevelMetricsBehavior":{ + "shape":"DataSourceLevelMetricsBehavior", + "documentation":"

    Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

    • Requests: The number of invocations that occured during a request.

    • Latency: The time to complete a data source invocation.

    • Errors: The number of errors that occurred during a data source invocation.

    These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

    • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricConfig value set to ENABLED.

    " + }, + "operationLevelMetricsConfig":{ + "shape":"OperationLevelMetricsConfig", + "documentation":"

    Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

    • Requests: The number of times a specified GraphQL operation was called.

    • GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

    " + } + }, + "documentation":"

    Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.

    Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig contains three required parameters, each controlling one of these categories:

    1. resolverLevelMetricsBehavior: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

      • GraphQL errors: The number of GraphQL errors that occurred.

      • Requests: The number of invocations that occurred during a request.

      • Latency: The time to complete a resolver invocation.

      • Cache hits: The number of cache hits during a request.

      • Cache misses: The number of cache misses during a request.

      These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

      • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

      • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricConfig value set to ENABLED.

    2. dataSourceLevelMetricsBehavior: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

      • Requests: The number of invocations that occured during a request.

      • Latency: The time to complete a data source invocation.

      • Errors: The number of errors that occurred during a data source invocation.

      These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

      • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

      • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricConfig value set to ENABLED.

    3. operationLevelMetricsConfig: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

      • Requests: The number of times a specified GraphQL operation was called.

      • GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.

      Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

    " + }, "EnvironmentVariableKey":{ "type":"string", "max":64, @@ -3306,6 +3377,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

    The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

    " + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

    The enhancedMetricsConfig object.

    " } }, "documentation":"

    Describes a GraphQL API.

    " @@ -3924,6 +3999,13 @@ }, "documentation":"

    Describes an OpenSearch data source configuration.

    " }, + "OperationLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "OutputType":{ "type":"string", "enum":[ @@ -4122,6 +4204,10 @@ "code":{ "shape":"Code", "documentation":"

    The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

    " + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } }, "documentation":"

    Describes a resolver.

    " @@ -4138,6 +4224,20 @@ "PIPELINE" ] }, + "ResolverLevelMetricsBehavior":{ + "type":"string", + "enum":[ + "FULL_REQUEST_RESOLVER_METRICS", + "PER_RESOLVER_METRICS" + ] + }, + "ResolverLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Resolvers":{ "type":"list", "member":{"shape":"Resolver"} @@ -4543,6 +4643,10 @@ "type":{ "shape":"ApiCacheType", "documentation":"

    The cache instance type. Valid values are

    • SMALL

    • MEDIUM

    • LARGE

    • XLARGE

    • LARGE_2X

    • LARGE_4X

    • LARGE_8X (not available in all regions)

    • LARGE_12X

    Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

    The following legacy instance types are available, but their use is discouraged:

    • T2_SMALL: A t2.small instance type.

    • T2_MEDIUM: A t2.medium instance type.

    • R4_LARGE: A r4.large instance type.

    • R4_XLARGE: A r4.xlarge instance type.

    • R4_2XLARGE: A r4.2xlarge instance type.

    • R4_4XLARGE: A r4.4xlarge instance type.

    • R4_8XLARGE: A r4.8xlarge instance type.

    " + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

    Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

    • NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.

    • EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

    " } }, "documentation":"

    Represents the input of a UpdateApiCache operation.

    " @@ -4654,6 +4758,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

    The new Amazon EventBridge settings.

    " + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } } }, @@ -4821,6 +4929,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

    The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

    " + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

    The enhancedMetricsConfig object.

    " } } }, @@ -4895,6 +5007,10 @@ "code":{ "shape":"Code", "documentation":"

    The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

    " + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

    Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

    metricsConfig can be ENABLED or DISABLED.

    " } } }, diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index f049a5fa659a..0953713b0909 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/arczonalshift/src/main/resources/codegen-resources/customization.config b/services/arczonalshift/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/arczonalshift/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/artifact/pom.xml b/services/artifact/pom.xml new file mode 100644 index 000000000000..f548af978a93 --- /dev/null +++ b/services/artifact/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.24.10-SNAPSHOT + + artifact + AWS Java SDK :: Services :: Artifact + The AWS Java SDK for Artifact module holds the client classes that are used for + communicating with Artifact. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.artifact + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/artifact/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/artifact/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..49efffd75776 --- /dev/null +++ b/services/artifact/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/artifact/src/main/resources/codegen-resources/endpoint-tests.json b/services/artifact/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..d78df5725d42 --- /dev/null +++ b/services/artifact/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/artifact/src/main/resources/codegen-resources/paginators-1.json b/services/artifact/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..f8c851442588 --- /dev/null +++ b/services/artifact/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListReports": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "reports" + } + } +} diff --git a/services/artifact/src/main/resources/codegen-resources/service-2.json b/services/artifact/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..690eb4ed9eb7 --- /dev/null +++ b/services/artifact/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,737 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"artifact", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Artifact", + "serviceId":"Artifact", + "signatureVersion":"v4", + "signingName":"artifact", + "uid":"artifact-2018-05-10" + }, + "operations":{ + "GetAccountSettings":{ + "name":"GetAccountSettings", + "http":{ + "method":"GET", + "requestUri":"/v1/account-settings/get", + "responseCode":200 + }, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Get the account settings for Artifact.

    " + }, + "GetReport":{ + "name":"GetReport", + "http":{ + "method":"GET", + "requestUri":"/v1/report/get", + "responseCode":200 + }, + "input":{"shape":"GetReportRequest"}, + "output":{"shape":"GetReportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Get the content for a single report.

    " + }, + "GetReportMetadata":{ + "name":"GetReportMetadata", + "http":{ + "method":"GET", + "requestUri":"/v1/report/getMetadata", + "responseCode":200 + }, + "input":{"shape":"GetReportMetadataRequest"}, + "output":{"shape":"GetReportMetadataResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Get the metadata for a single report.

    " + }, + "GetTermForReport":{ + "name":"GetTermForReport", + "http":{ + "method":"GET", + "requestUri":"/v1/report/getTermForReport", + "responseCode":200 + }, + "input":{"shape":"GetTermForReportRequest"}, + "output":{"shape":"GetTermForReportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Get the Term content associated with a single report.

    " + }, + "ListReports":{ + "name":"ListReports", + "http":{ + "method":"GET", + "requestUri":"/v1/report/list", + "responseCode":200 + }, + "input":{"shape":"ListReportsRequest"}, + "output":{"shape":"ListReportsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    List available reports.

    " + }, + "PutAccountSettings":{ + "name":"PutAccountSettings", + "http":{ + "method":"PUT", + "requestUri":"/v1/account-settings/put", + "responseCode":200 + }, + "input":{"shape":"PutAccountSettingsRequest"}, + "output":{"shape":"PutAccountSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Put the account settings for Artifact.

    ", + "idempotent":true + } + }, + "shapes":{ + "AcceptanceType":{ + "type":"string", + "enum":[ + "PASSTHROUGH", + "EXPLICIT" + ] + }, + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    User does not have sufficient access to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountSettings":{ + "type":"structure", + "members":{ + "notificationSubscriptionStatus":{ + "shape":"NotificationSubscriptionStatus", + "documentation":"

    Notification subscription status of the customer.

    " + } + }, + "documentation":"

    Account settings for the customer.

    " + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the affected resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the affected resource.

    " + } + }, + "documentation":"

    Request to create/modify content would result in a conflict.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "GetAccountSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "GetReportMetadataRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

    Unique resource ID for the report resource.

    ", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

    Version for the report resource.

    ", + "location":"querystring", + "locationName":"reportVersion" + } + } + }, + "GetReportMetadataResponse":{ + "type":"structure", + "members":{ + "reportDetails":{ + "shape":"ReportDetail", + "documentation":"

    Report resource detail.

    " + } + } + }, + "GetReportRequest":{ + "type":"structure", + "required":[ + "reportId", + "termToken" + ], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

    Unique resource ID for the report resource.

    ", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

    Version for the report resource.

    ", + "location":"querystring", + "locationName":"reportVersion" + }, + "termToken":{ + "shape":"ShortStringAttribute", + "documentation":"

    Unique download token provided by GetTermForReport API.

    ", + "location":"querystring", + "locationName":"termToken" + } + } + }, + "GetReportResponse":{ + "type":"structure", + "members":{ + "documentPresignedUrl":{ + "shape":"GetReportResponseDocumentPresignedUrlString", + "documentation":"

    Presigned S3 url to access the report content.

    " + } + } + }, + "GetReportResponseDocumentPresignedUrlString":{ + "type":"string", + "max":10240, + "min":1 + }, + "GetTermForReportRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

    Unique resource ID for the report resource.

    ", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

    Version for the report resource.

    ", + "location":"querystring", + "locationName":"reportVersion" + } + } + }, + "GetTermForReportResponse":{ + "type":"structure", + "members":{ + "documentPresignedUrl":{ + "shape":"GetTermForReportResponseDocumentPresignedUrlString", + "documentation":"

    Presigned S3 url to access the term content.

    " + }, + "termToken":{ + "shape":"String", + "documentation":"

    Unique token representing this request event.

    " + } + } + }, + "GetTermForReportResponseDocumentPresignedUrlString":{ + "type":"string", + "max":10240, + "min":1 + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    Number of seconds in which the caller can retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

    An unknown server exception has occurred.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListReportsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResultsAttribute", + "documentation":"

    Maximum number of resources to return in the paginated response.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

    Pagination token to request the next page of resources.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListReportsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

    Pagination token to request the next page of resources.

    " + }, + "reports":{ + "shape":"ReportsList", + "documentation":"

    List of report resources.

    " + } + } + }, + "LongStringAttribute":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[^<>]*$" + }, + "MaxResultsAttribute":{ + "type":"integer", + "box":true, + "max":300, + "min":1 + }, + "NextTokenAttribute":{ + "type":"string", + "max":2048, + "min":1 + }, + "NotificationSubscriptionStatus":{ + "type":"string", + "enum":[ + "SUBSCRIBED", + "NOT_SUBSCRIBED" + ] + }, + "PublishedState":{ + "type":"string", + "enum":[ + "PUBLISHED", + "UNPUBLISHED" + ] + }, + "PutAccountSettingsRequest":{ + "type":"structure", + "members":{ + "notificationSubscriptionStatus":{ + "shape":"NotificationSubscriptionStatus", + "documentation":"

    Desired notification subscription status.

    " + } + } + }, + "PutAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "ReportDetail":{ + "type":"structure", + "members":{ + "acceptanceType":{ + "shape":"AcceptanceType", + "documentation":"

    Acceptance type for report.

    " + }, + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

    ARN for the report resource.

    " + }, + "category":{ + "shape":"ShortStringAttribute", + "documentation":"

    Category for the report resource.

    " + }, + "companyName":{ + "shape":"ShortStringAttribute", + "documentation":"

    Associated company name for the report resource.

    " + }, + "createdAt":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating when the report resource was created.

    " + }, + "deletedAt":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating when the report resource was deleted.

    " + }, + "description":{ + "shape":"LongStringAttribute", + "documentation":"

    Description for the report resource.

    " + }, + "id":{ + "shape":"ReportId", + "documentation":"

    Unique resource ID for the report resource.

    " + }, + "lastModifiedAt":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating when the report resource was last modified.

    " + }, + "name":{ + "shape":"ShortStringAttribute", + "documentation":"

    Name for the report resource.

    " + }, + "periodEnd":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating the report resource effective end.

    " + }, + "periodStart":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating the report resource effective start.

    " + }, + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

    Associated product name for the report resource.

    " + }, + "sequenceNumber":{ + "shape":"SequenceNumberAttribute", + "documentation":"

    Sequence number to enforce optimistic locking.

    " + }, + "series":{ + "shape":"ShortStringAttribute", + "documentation":"

    Series for the report resource.

    " + }, + "state":{ + "shape":"PublishedState", + "documentation":"

    Current state of the report resource

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The message associated with the current upload state.

    " + }, + "termArn":{ + "shape":"LongStringAttribute", + "documentation":"

    Unique resource ARN for term resource.

    " + }, + "uploadState":{ + "shape":"UploadState", + "documentation":"

    The current state of the document upload.

    " + }, + "version":{ + "shape":"VersionAttribute", + "documentation":"

    Version for the report resource.

    " + } + }, + "documentation":"

    Full detail for report resource metadata.

    " + }, + "ReportId":{ + "type":"string", + "pattern":"^report-[a-zA-Z0-9]{16}$" + }, + "ReportSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

    ARN for the report resource.

    " + }, + "category":{ + "shape":"ShortStringAttribute", + "documentation":"

    Category for the report resource.

    " + }, + "companyName":{ + "shape":"ShortStringAttribute", + "documentation":"

    Associated company name for the report resource.

    " + }, + "description":{ + "shape":"LongStringAttribute", + "documentation":"

    Description for the report resource.

    " + }, + "id":{ + "shape":"ReportId", + "documentation":"

    Unique resource ID for the report resource.

    " + }, + "name":{ + "shape":"ShortStringAttribute", + "documentation":"

    Name for the report resource.

    " + }, + "periodEnd":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating the report resource effective end.

    " + }, + "periodStart":{ + "shape":"TimestampAttribute", + "documentation":"

    Timestamp indicating the report resource effective start.

    " + }, + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

    Associated product name for the report resource.

    " + }, + "series":{ + "shape":"ShortStringAttribute", + "documentation":"

    Series for the report resource.

    " + }, + "state":{ + "shape":"PublishedState", + "documentation":"

    Current state of the report resource.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The message associated with the current upload state.

    " + }, + "uploadState":{ + "shape":"UploadState", + "documentation":"

    The current state of the document upload.

    " + }, + "version":{ + "shape":"VersionAttribute", + "documentation":"

    Version for the report resource.

    " + } + }, + "documentation":"

    Summary for report resource.

    " + }, + "ReportsList":{ + "type":"list", + "member":{"shape":"ReportSummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the affected resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the affected resource.

    " + } + }, + "documentation":"

    Request references a resource which does not exist.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SequenceNumberAttribute":{ + "type":"long", + "box":true, + "min":1 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "quotaCode", + "resourceId", + "resourceType", + "serviceCode" + ], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

    Code for the affected quota.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    Identifier of the affected resource.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    Type of the affected resource.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    Code for the affected service.

    " + } + }, + "documentation":"

    Request would cause a service quota to be exceeded.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "ShortStringAttribute":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9_\\-\\s]*$" + }, + "StatusMessage":{"type":"string"}, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

    Code for the affected quota.

    " + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    Number of seconds in which the caller can retry the request.

    ", + "location":"header", + "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    Code for the affected service.

    " + } + }, + "documentation":"

    Request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "TimestampAttribute":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "UploadState":{ + "type":"string", + "enum":[ + "PROCESSING", + "COMPLETE", + "FAILED", + "FAULT" + ] + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The field that caused the error, if applicable.

    " + }, + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    Reason the request failed validation.

    " + } + }, + "documentation":"

    Request fails to satisfy the constraints specified by an AWS service.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Message describing why the field failed validation.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    Name of validation exception.

    " + } + }, + "documentation":"

    Validation exception message and name.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "invalidToken", + "other" + ] + }, + "VersionAttribute":{ + "type":"long", + "box":true, + "min":1 + } + }, + "documentation":"

    This reference provides descriptions of the low-level AWS Artifact Service API.

    " +} diff --git a/services/athena/pom.xml b/services/athena/pom.xml index e12bf16d0144..64ca76565985 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 43346f3d6a78..60fb3f6033cd 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index e6925767ac31..6070443e59f0 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index be186a4d0369..4a2791b6c7a6 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/b2bi/pom.xml b/services/b2bi/pom.xml index 73b66a90a202..ace6629b7737 100644 --- a/services/b2bi/pom.xml +++ b/services/b2bi/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT b2bi AWS Java SDK :: Services :: B2 Bi diff --git a/services/b2bi/src/main/resources/codegen-resources/customization.config b/services/b2bi/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/b2bi/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 7e9f50377e4e..2a3af25198b9 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 80793c74637d..d3bc108b2b1c 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index b46c849fcf87..dc764415bd62 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index ad40e7d8eb77..2f27fd882e56 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/batch/src/main/resources/codegen-resources/endpoint-rule-set.json index d04df9721c1d..54954abbb7a9 100644 --- a/services/batch/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/batch/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws" ] } ], @@ -252,7 +248,6 @@ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -261,7 +256,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -281,14 +277,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,7 +300,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -322,7 +319,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -351,9 +349,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/batch/src/main/resources/codegen-resources/service-2.json b/services/batch/src/main/resources/codegen-resources/service-2.json index af52be4a1d74..37005ba16bf6 100644 --- a/services/batch/src/main/resources/codegen-resources/service-2.json +++ b/services/batch/src/main/resources/codegen-resources/service-2.json @@ -661,7 +661,7 @@ }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

    The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    BEST_FIT (default)

    Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

    BEST_FIT_PROGRESSIVE

    Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

    SPOT_CAPACITY_OPTIMIZED

    Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

    SPOT_PRICE_CAPACITY_OPTIMIZED

    The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " + "documentation":"

    The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    BEST_FIT (default)

    Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

    BEST_FIT_PROGRESSIVE

    Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

    SPOT_CAPACITY_OPTIMIZED

    Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

    SPOT_PRICE_CAPACITY_OPTIMIZED

    The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " }, "minvCpus":{ "shape":"Integer", @@ -669,7 +669,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

    The maximum number of vCPUs that a compute environment can support.

    With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

    " + "documentation":"

    The maximum number of vCPUs that a compute environment can support.

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " }, "desiredvCpus":{ "shape":"Integer", @@ -699,7 +699,7 @@ }, "instanceRole":{ "shape":"String", - "documentation":"

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    " + "documentation":"

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    " }, "tags":{ "shape":"TagsMap", @@ -737,7 +737,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

    The maximum number of Amazon EC2 vCPUs that an environment can reach.

    With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

    " + "documentation":"

    The maximum number of Amazon EC2 vCPUs that an environment can reach.

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " }, "desiredvCpus":{ "shape":"Integer", @@ -753,7 +753,7 @@ }, "allocationStrategy":{ "shape":"CRUpdateAllocationStrategy", - "documentation":"

    The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

    When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    BEST_FIT_PROGRESSIVE

    Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

    SPOT_CAPACITY_OPTIMIZED

    Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

    SPOT_PRICE_CAPACITY_OPTIMIZED

    The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

    With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " + "documentation":"

    The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

    When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    BEST_FIT_PROGRESSIVE

    Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

    SPOT_CAPACITY_OPTIMIZED

    Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

    SPOT_PRICE_CAPACITY_OPTIMIZED

    The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

    " }, "instanceTypes":{ "shape":"StringList", @@ -765,7 +765,7 @@ }, "instanceRole":{ "shape":"String", - "documentation":"

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

    When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    " + "documentation":"

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

    When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    " }, "tags":{ "shape":"TagsMap", @@ -913,7 +913,14 @@ "shape":"EphemeralStorage", "documentation":"

    The amount of ephemeral storage allocated for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate.

    " }, - "runtimePlatform":{"shape":"RuntimePlatform"} + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

    An object that represents the compute environment architecture for Batch jobs on Fargate.

    " + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

    The private repository authentication credentials to use.

    " + } }, "documentation":"

    An object that represents the details of a container that's part of a job.

    " }, @@ -956,7 +963,7 @@ "members":{ "image":{ "shape":"String", - "documentation":"

    The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

    Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

    • Images in Amazon ECR Public repositories use the full registry/repository[:tag] or registry/repository[@digest] naming conventions. For example, public.ecr.aws/registry_alias/my-web-app:latest .

    • Images in Amazon ECR repositories use the full registry and repository URI (for example, 123456789012.dkr.ecr.<region-name>.amazonaws.com/<repository-name>).

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

    " + "documentation":"

    Required. The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

    Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

    • Images in Amazon ECR Public repositories use the full registry/repository[:tag] or registry/repository[@digest] naming conventions. For example, public.ecr.aws/registry_alias/my-web-app:latest .

    • Images in Amazon ECR repositories use the full registry and repository URI (for example, 123456789012.dkr.ecr.<region-name>.amazonaws.com/<repository-name>).

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

    " }, "vcpus":{ "shape":"Integer", @@ -1042,7 +1049,14 @@ "shape":"EphemeralStorage", "documentation":"

    The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate.

    " }, - "runtimePlatform":{"shape":"RuntimePlatform"} + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

    An object that represents the compute environment architecture for Batch jobs on Fargate.

    " + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

    The private repository authentication credentials to use.

    " + } }, "documentation":"

    Container properties are used for Amazon ECS based job definitions. These properties to describe the container that's launched as part of a job.

    " }, @@ -1302,7 +1316,7 @@ "members":{ "jobDefinitions":{ "shape":"StringList", - "documentation":"

    A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}.

    " + "documentation":"

    A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}. This parameter can't be used with other parameters.

    " }, "maxResults":{ "shape":"Integer", @@ -1503,7 +1517,7 @@ "members":{ "imageType":{ "shape":"ImageType", - "documentation":"

    The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

    ECS

    If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

    ECS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    ECS_AL2_NVIDIA

    Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    ECS_AL1

    Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

    EKS

    If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

    EKS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    EKS_AL2_NVIDIA

    Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    " + "documentation":"

    The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

    ECS

    If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

    ECS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    ECS_AL2_NVIDIA

    Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    ECS_AL2023

    Amazon Linux 2023: Batch supports Amazon Linux 2023.

    Amazon Linux 2023 does not support A1 instances.

    ECS_AL1

    Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

    EKS

    If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

    EKS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    EKS_AL2_NVIDIA

    Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    " }, "imageIdOverride":{ "shape":"ImageIdOverride", @@ -1907,7 +1921,10 @@ "shape":"String", "documentation":"

    The name of the node for this job.

    " }, - "metadata":{"shape":"EksMetadata"} + "metadata":{ + "shape":"EksMetadata", + "documentation":"

    Describes and uniquely identifies Kubernetes resources. For example, the compute environment that a pod runs in or the jobID for a job running in the pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

    " + } }, "documentation":"

    The details for the pod.

    " }, @@ -2053,7 +2070,7 @@ }, "computeReservation":{ "shape":"Integer", - "documentation":"

    A value used to reserve some of the available maximum vCPU for fair share identifiers that aren't already used.

    The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers.

    For example, a computeReservation value of 50 indicates that Batchreserves 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if there are two fair share identifiers. It reserves 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers.

    The minimum value is 0 and the maximum value is 99.

    " + "documentation":"

    A value used to reserve some of the available maximum vCPU for fair share identifiers that aren't already used.

    The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers.

    For example, a computeReservation value of 50 indicates that Batch reserves 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if there are two fair share identifiers. It reserves 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers.

    The minimum value is 0 and the maximum value is 99.

    " }, "shareDistribution":{ "shape":"ShareAttributesList", @@ -2274,7 +2291,7 @@ }, "startedAt":{ "shape":"Long", - "documentation":"

    The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's when the job transitioned from the STARTING state to the RUNNING state. This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.

    " + "documentation":"

    The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's when the job transitioned from the STARTING state to the RUNNING state.

    " }, "stoppedAt":{ "shape":"Long", @@ -2724,7 +2741,7 @@ "documentation":"

    The name of the volume to mount.

    " } }, - "documentation":"

    Details for a Docker volume mount point that's used in a job's container properties. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

    " + "documentation":"

    Details for a Docker volume mount point that's used in a job's container properties. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

    " }, "MountPoints":{ "type":"list", @@ -2977,6 +2994,17 @@ } } }, + "RepositoryCredentials":{ + "type":"structure", + "required":["credentialsParameter"], + "members":{ + "credentialsParameter":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

    " + } + }, + "documentation":"

    The repository credentials for private registry authentication.

    " + }, "ResourceRequirement":{ "type":"structure", "required":[ @@ -3033,14 +3061,14 @@ "members":{ "operatingSystemFamily":{ "shape":"String", - "documentation":"

    The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL.

    The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration.

    The Batch Scheduler checks before registering a task definition with Fargate. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next is checked until a Windows-based compute environment is found.

    Fargate Spot is not supported for Windows-based containers on Fargate. A job queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

    " + "documentation":"

    The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL.

    The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration.

    The Batch Scheduler checks the compute environments that are attached to the job queue before registering a task definition with Fargate. In this scenario, the job queue is where the job is submitted. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next compute environment is checked until a Windows-based compute environment is found.

    Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

    " }, "cpuArchitecture":{ "shape":"String", - "documentation":"

    The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

    This parameter must be set to X86_64 for Windows containers.

    " + "documentation":"

    The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

    This parameter must be set to X86_64 for Windows containers.

    Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

    " } }, - "documentation":"

    An object that represents the compute environment architecture for Batch jobs on Fargate.

    " + "documentation":"

    An object that represents the compute environment architecture for Batch jobs on Fargate.

    " }, "SchedulingPolicyDetail":{ "type":"structure", @@ -3165,7 +3193,7 @@ }, "schedulingPriorityOverride":{ "shape":"Integer", - "documentation":"

    The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition.

    The minimum supported value is 0 and the maximum supported value is 9999.

    " + "documentation":"

    The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier.

    The minimum supported value is 0 and the maximum supported value is 9999.

    " }, "arrayProperties":{ "shape":"ArrayProperties", @@ -3346,18 +3374,18 @@ "members":{ "hardLimit":{ "shape":"Integer", - "documentation":"

    The hard limit for the ulimit type.

    " + "documentation":"

    The hard limit for the ulimit type.

    " }, "name":{ "shape":"String", - "documentation":"

    The type of the ulimit.

    " + "documentation":"

    The type of the ulimit. Valid values are: core | cpu | data | fsize | locks | memlock | msgqueue | nice | nofile | nproc | rss | rtprio | rttime | sigpending | stack.

    " }, "softLimit":{ "shape":"Integer", "documentation":"

    The soft limit for the ulimit type.

    " } }, - "documentation":"

    The ulimit settings to pass to the container.

    This object isn't applicable to jobs that are running on Fargate resources.

    " + "documentation":"

    The ulimit settings to pass to the container. For more information, see Ulimit.

    This object isn't applicable to jobs that are running on Fargate resources.

    " }, "Ulimits":{ "type":"list", diff --git a/services/bcmdataexports/pom.xml b/services/bcmdataexports/pom.xml index d3d956868bc0..028be7f1b9c1 100644 --- a/services/bcmdataexports/pom.xml +++ b/services/bcmdataexports/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bcmdataexports AWS Java SDK :: Services :: BCM Data Exports diff --git a/services/bcmdataexports/src/main/resources/codegen-resources/customization.config b/services/bcmdataexports/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/bcmdataexports/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/bedrock/pom.xml b/services/bedrock/pom.xml index 1cecce743e0a..9ab12ba289fe 100644 --- a/services/bedrock/pom.xml +++ b/services/bedrock/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bedrock AWS Java SDK :: Services :: Bedrock diff --git a/services/bedrockagent/pom.xml b/services/bedrockagent/pom.xml index cb54ca09a107..1b03930cd82a 100644 --- a/services/bedrockagent/pom.xml +++ b/services/bedrockagent/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bedrockagent AWS Java SDK :: Services :: Bedrock Agent diff --git a/services/bedrockagent/src/main/resources/codegen-resources/customization.config b/services/bedrockagent/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/bedrockagent/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/bedrockagentruntime/pom.xml b/services/bedrockagentruntime/pom.xml index 98292a150440..c0baf5389dc7 100644 --- a/services/bedrockagentruntime/pom.xml +++ b/services/bedrockagentruntime/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bedrockagentruntime AWS Java SDK :: Services :: Bedrock Agent Runtime diff --git a/services/bedrockruntime/pom.xml b/services/bedrockruntime/pom.xml index 2cd826bd67db..d539e398f52d 100644 --- a/services/bedrockruntime/pom.xml +++ b/services/bedrockruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT bedrockruntime AWS Java SDK :: Services :: Bedrock Runtime diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 313f6609c172..03a239d67db6 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 532f7266252f..dc5dcea902f3 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/braket/src/main/resources/codegen-resources/customization.config b/services/braket/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/braket/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/braket/src/main/resources/codegen-resources/service-2.json b/services/braket/src/main/resources/codegen-resources/service-2.json index 442c5d9d6d9f..c630d9f618a0 100644 --- a/services/braket/src/main/resources/codegen-resources/service-2.json +++ b/services/braket/src/main/resources/codegen-resources/service-2.json @@ -65,6 +65,7 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, + {"shape":"DeviceOfflineException"}, {"shape":"DeviceRetiredException"}, {"shape":"InternalServiceException"}, {"shape":"ServiceQuotaExceededException"}, @@ -1157,7 +1158,7 @@ }, "JobArn":{ "type":"string", - "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]*:[0-9]{12}:job/.*$" + "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]+:[0-9]{12}:job/.*$" }, "JobCheckpointConfig":{ "type":"structure", diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 8758f201afbf..362b6791b853 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chatbot/pom.xml b/services/chatbot/pom.xml new file mode 100644 index 000000000000..173dde211977 --- /dev/null +++ b/services/chatbot/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.24.10-SNAPSHOT + + chatbot + AWS Java SDK :: Services :: Chatbot + The AWS Java SDK for Chatbot module holds the client classes that are used for + communicating with Chatbot. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.chatbot + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/chatbot/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/chatbot/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..f07ab7b86d66 --- /dev/null +++ b/services/chatbot/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/chatbot/src/main/resources/codegen-resources/endpoint-tests.json b/services/chatbot/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..3a4c6b7af045 --- /dev/null +++ b/services/chatbot/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/chatbot/src/main/resources/codegen-resources/paginators-1.json b/services/chatbot/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..6615c513c112 --- /dev/null +++ b/services/chatbot/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,39 @@ +{ + "pagination": { + "DescribeChimeWebhookConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "DescribeSlackChannelConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "DescribeSlackUserIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "DescribeSlackWorkspaces": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMicrosoftTeamsChannelConfigurations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMicrosoftTeamsConfiguredTeams": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMicrosoftTeamsUserIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/chatbot/src/main/resources/codegen-resources/service-2.json b/services/chatbot/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f40c7271e8f6 --- /dev/null +++ b/services/chatbot/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1770 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-10-11", + "endpointPrefix":"chatbot", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"chatbot", + "serviceId":"chatbot", + "signatureVersion":"v4", + "uid":"chatbot-2017-10-11" + }, + "operations":{ + "CreateChimeWebhookConfiguration":{ + "name":"CreateChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-chime-webhook-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateChimeWebhookConfigurationRequest"}, + "output":{"shape":"CreateChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateChimeWebhookConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates Chime Webhook Configuration" + }, + "CreateMicrosoftTeamsChannelConfiguration":{ + "name":"CreateMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-ms-teams-channel-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateTeamsChannelConfigurationRequest"}, + "output":{"shape":"CreateTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateTeamsChannelConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates MS Teams Channel Configuration" + }, + "CreateSlackChannelConfiguration":{ + "name":"CreateSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-slack-channel-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateSlackChannelConfigurationRequest"}, + "output":{"shape":"CreateSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateSlackChannelConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates Slack Channel Configuration" + }, + "DeleteChimeWebhookConfiguration":{ + "name":"DeleteChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-chime-webhook-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteChimeWebhookConfigurationRequest"}, + "output":{"shape":"DeleteChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteChimeWebhookConfigurationException"} + ], + "documentation":"Deletes a Chime Webhook Configuration" + }, + "DeleteMicrosoftTeamsChannelConfiguration":{ + "name":"DeleteMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-channel-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteTeamsChannelConfigurationRequest"}, + "output":{"shape":"DeleteTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteTeamsChannelConfigurationException"} + ], + "documentation":"Deletes MS Teams Channel Configuration" + }, + "DeleteMicrosoftTeamsConfiguredTeam":{ + "name":"DeleteMicrosoftTeamsConfiguredTeam", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-configured-teams", + "responseCode":204 + }, + "input":{"shape":"DeleteTeamsConfiguredTeamRequest"}, + "output":{"shape":"DeleteTeamsConfiguredTeamResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DeleteTeamsConfiguredTeamException"} + ], + "documentation":"Deletes the Microsoft Teams team authorization allowing for channels to be configured in that Microsoft Teams team. Note that the Microsoft Teams team must have no channels configured to remove it." + }, + "DeleteMicrosoftTeamsUserIdentity":{ + "name":"DeleteMicrosoftTeamsUserIdentity", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-user-identity", + "responseCode":204 + }, + "input":{"shape":"DeleteMicrosoftTeamsUserIdentityRequest"}, + "output":{"shape":"DeleteMicrosoftTeamsUserIdentityResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DeleteMicrosoftTeamsUserIdentityException"} + ], + "documentation":"Deletes a Teams user identity" + }, + "DeleteSlackChannelConfiguration":{ + "name":"DeleteSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-channel-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackChannelConfigurationRequest"}, + "output":{"shape":"DeleteSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteSlackChannelConfigurationException"} + ], + "documentation":"Deletes Slack Channel Configuration" + }, + "DeleteSlackUserIdentity":{ + "name":"DeleteSlackUserIdentity", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-user-identity", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackUserIdentityRequest"}, + "output":{"shape":"DeleteSlackUserIdentityResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DeleteSlackUserIdentityException"} + ], + "documentation":"Deletes a Slack user identity" + }, + "DeleteSlackWorkspaceAuthorization":{ + "name":"DeleteSlackWorkspaceAuthorization", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-workspace-authorization", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackWorkspaceAuthorizationRequest"}, + "output":{"shape":"DeleteSlackWorkspaceAuthorizationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DeleteSlackWorkspaceAuthorizationFault"} + ], + "documentation":"Deletes the Slack workspace authorization that allows channels to be configured in that workspace. This requires all configured channels in the workspace to be deleted." + }, + "DescribeChimeWebhookConfigurations":{ + "name":"DescribeChimeWebhookConfigurations", + "http":{ + "method":"POST", + "requestUri":"/describe-chime-webhook-configurations", + "responseCode":200 + }, + "input":{"shape":"DescribeChimeWebhookConfigurationsRequest"}, + "output":{"shape":"DescribeChimeWebhookConfigurationsResult"}, + "errors":[ + {"shape":"DescribeChimeWebhookConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists Chime Webhook Configurations optionally filtered by ChatConfigurationArn" + }, + "DescribeSlackChannelConfigurations":{ + "name":"DescribeSlackChannelConfigurations", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-channel-configurations", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackChannelConfigurationsRequest"}, + "output":{"shape":"DescribeSlackChannelConfigurationsResult"}, + "errors":[ + {"shape":"DescribeSlackChannelConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists Slack Channel Configurations optionally filtered by ChatConfigurationArn" + }, + "DescribeSlackUserIdentities":{ + "name":"DescribeSlackUserIdentities", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-user-identities", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackUserIdentitiesRequest"}, + "output":{"shape":"DescribeSlackUserIdentitiesResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DescribeSlackUserIdentitiesException"} + ], + "documentation":"Lists all Slack user identities with a mapped role." + }, + "DescribeSlackWorkspaces":{ + "name":"DescribeSlackWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-workspaces", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackWorkspacesRequest"}, + "output":{"shape":"DescribeSlackWorkspacesResult"}, + "errors":[ + {"shape":"DescribeSlackWorkspacesException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all authorized Slack Workspaces for AWS Account" + }, + "GetAccountPreferences":{ + "name":"GetAccountPreferences", + "http":{ + "method":"POST", + "requestUri":"/get-account-preferences", + "responseCode":200 + }, + "input":{"shape":"GetAccountPreferencesRequest"}, + "output":{"shape":"GetAccountPreferencesResult"}, + "errors":[ + {"shape":"GetAccountPreferencesException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Get Chatbot account level preferences" + }, + "GetMicrosoftTeamsChannelConfiguration":{ + "name":"GetMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/get-ms-teams-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"GetTeamsChannelConfigurationRequest"}, + "output":{"shape":"GetTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"GetTeamsChannelConfigurationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Get a single MS Teams Channel Configurations" + }, + "ListMicrosoftTeamsChannelConfigurations":{ + "name":"ListMicrosoftTeamsChannelConfigurations", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-channel-configurations", + "responseCode":200 + }, + "input":{"shape":"ListTeamsChannelConfigurationsRequest"}, + "output":{"shape":"ListTeamsChannelConfigurationsResult"}, + "errors":[ + {"shape":"ListTeamsChannelConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists MS Teams Channel Configurations optionally filtered by TeamId" + }, + "ListMicrosoftTeamsConfiguredTeams":{ + "name":"ListMicrosoftTeamsConfiguredTeams", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-configured-teams", + "responseCode":200 + }, + "input":{"shape":"ListMicrosoftTeamsConfiguredTeamsRequest"}, + "output":{"shape":"ListMicrosoftTeamsConfiguredTeamsResult"}, + "errors":[ + {"shape":"ListMicrosoftTeamsConfiguredTeamsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all authorized MS teams for AWS Account" + }, + "ListMicrosoftTeamsUserIdentities":{ + "name":"ListMicrosoftTeamsUserIdentities", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-user-identities", + "responseCode":200 + }, + "input":{"shape":"ListMicrosoftTeamsUserIdentitiesRequest"}, + "output":{"shape":"ListMicrosoftTeamsUserIdentitiesResult"}, + "errors":[ + {"shape":"ListMicrosoftTeamsUserIdentitiesException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all Microsoft Teams user identities with a mapped role." + }, + "UpdateAccountPreferences":{ + "name":"UpdateAccountPreferences", + "http":{ + "method":"POST", + "requestUri":"/update-account-preferences", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountPreferencesRequest"}, + "output":{"shape":"UpdateAccountPreferencesResult"}, + "errors":[ + {"shape":"UpdateAccountPreferencesException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Update Chatbot account level preferences" + }, + "UpdateChimeWebhookConfiguration":{ + "name":"UpdateChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-chime-webhook-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateChimeWebhookConfigurationRequest"}, + "output":{"shape":"UpdateChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateChimeWebhookConfigurationException"} + ], + "documentation":"Updates a Chime Webhook Configuration" + }, + "UpdateMicrosoftTeamsChannelConfiguration":{ + "name":"UpdateMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-ms-teams-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateTeamsChannelConfigurationRequest"}, + "output":{"shape":"UpdateTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateTeamsChannelConfigurationException"} + ], + "documentation":"Updates MS Teams Channel Configuration" + }, + "UpdateSlackChannelConfiguration":{ + "name":"UpdateSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-slack-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateSlackChannelConfigurationRequest"}, + "output":{"shape":"UpdateSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateSlackChannelConfigurationException"} + ], + "documentation":"Updates Slack Channel Configuration" + } + }, + "shapes":{ + "AccountPreferences":{ + "type":"structure", + "members":{ + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "TrainingDataCollectionEnabled":{ + "shape":"BooleanAccountPreference", + "documentation":"Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot’s AI technologies." + } + }, + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + }, + "Arn":{ + "type":"string", + "max":1224, + "min":12, + "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "AwsUserIdentity":{ + "type":"string", + "max":1101, + "min":15, + "pattern":"^arn:aws:(iam|sts)::[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "BooleanAccountPreference":{"type":"boolean"}, + "ChatConfigurationArn":{ + "type":"string", + "max":1169, + "min":19, + "pattern":"^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "ChimeWebhookConfiguration":{ + "type":"structure", + "required":[ + "WebhookDescription", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.Logging levels include ERROR, INFO, or NONE." + } + }, + "documentation":"An AWS Chatbot configuration for Amazon Chime." + }, + "ChimeWebhookConfigurationList":{ + "type":"list", + "member":{"shape":"ChimeWebhookConfiguration"} + }, + "ChimeWebhookDescription":{ + "type":"string", + "max":255, + "min":1 + }, + "ChimeWebhookUrl":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^https://hooks\\.chime\\.aws/incomingwebhooks/[A-Za-z0-9\\-]+?\\?token=[A-Za-z0-9\\-]+$" + }, + "ConfigurationName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9-_]+$" + }, + "ConfiguredTeam":{ + "type":"structure", + "required":[ + "TenantId", + "TeamId" + ], + "members":{ + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"UUID", + "documentation":"The name of the Microsoft Teams Team.", + "box":true + } + }, + "documentation":"A Microsoft Teams team that has been authorized with AWS Chatbot." + }, + "ConfiguredTeamsList":{ + "type":"list", + "member":{"shape":"ConfiguredTeam"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"There was an issue processing your request.", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":[ + "WebhookDescription", + "WebhookUrl", + "SnsTopicArns", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "WebhookUrl":{ + "shape":"ChimeWebhookUrl", + "documentation":"URL for the Chime webhook." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + } + } + }, + "CreateChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + "WebhookConfiguration":{ + "shape":"ChimeWebhookConfiguration", + "documentation":"Chime webhook configuration." + } + } + }, + "CreateSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateSlackChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "SlackTeamId", + "SlackChannelId", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "CreateSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"SlackChannelConfiguration", + "documentation":"The configuration for a Slack channel configured with AWS Chatbot." + } + } + }, + "CreateTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChannelId", + "TeamId", + "TenantId", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"TeamName", + "documentation":"The name of the Microsoft Teams Team." + }, + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "CreateTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + }, + "CustomerCwLogLevel":{ + "type":"string", + "max":5, + "min":4, + "pattern":"^(ERROR|INFO|NONE)$" + }, + "DeleteChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration to delete." + } + } + }, + "DeleteChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteMicrosoftTeamsUserIdentityException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteMicrosoftTeamsUserIdentityRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "UserId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identity to delete." + }, + "UserId":{ + "shape":"UUID", + "documentation":"Id from Microsoft Teams for user." + } + } + }, + "DeleteMicrosoftTeamsUserIdentityResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration to delete." + } + } + }, + "DeleteSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackUserIdentityException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackUserIdentityRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "SlackTeamId", + "SlackUserId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identity to delete." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackUserId":{ + "shape":"SlackUserId", + "documentation":"The ID of the user in Slack." + } + } + }, + "DeleteSlackUserIdentityResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackWorkspaceAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "documentation":"There was an issue deleting your Slack workspace.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackWorkspaceAuthorizationRequest":{ + "type":"structure", + "required":["SlackTeamId"], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + } + } + }, + "DeleteSlackWorkspaceAuthorizationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to delete." + } + } + }, + "DeleteTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteTeamsConfiguredTeamException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteTeamsConfiguredTeamRequest":{ + "type":"structure", + "required":["TeamId"], + "members":{ + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + } + } + }, + "DeleteTeamsConfiguredTeamResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeChimeWebhookConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeChimeWebhookConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"An optional ARN of a ChimeWebhookConfiguration to describe.", + "box":true + } + } + }, + "DescribeChimeWebhookConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "WebhookConfigurations":{ + "shape":"ChimeWebhookConfigurationList", + "documentation":"A list of Chime webhooks associated with the account." + } + } + }, + "DescribeSlackChannelConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackChannelConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"An optional ARN of a SlackChannelConfiguration to describe.", + "box":true + } + } + }, + "DescribeSlackChannelConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "SlackChannelConfigurations":{ + "shape":"SlackChannelConfigurationList", + "documentation":"A list of Slack channel configurations." + } + } + }, + "DescribeSlackUserIdentitiesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackUserIdentitiesRequest":{ + "type":"structure", + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identities to describe." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + } + } + }, + "DescribeSlackUserIdentitiesResult":{ + "type":"structure", + "members":{ + "SlackUserIdentities":{ + "shape":"SlackUserIdentitiesList", + "documentation":"A list of Slack User Identities." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "DescribeSlackWorkspacesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackWorkspacesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "DescribeSlackWorkspacesResult":{ + "type":"structure", + "members":{ + "SlackWorkspaces":{ + "shape":"SlackWorkspacesList", + "documentation":"A list of Slack Workspaces registered with AWS Chatbot." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "GetAccountPreferencesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "GetAccountPreferencesRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountPreferencesResult":{ + "type":"structure", + "members":{ + "AccountPreferences":{ + "shape":"AccountPreferences", + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + } + } + }, + "GetTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "GetTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to retrieve." + } + } + }, + "GetTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + }, + "GuardrailPolicyArn":{ + "type":"string", + "max":1163, + "min":11, + "pattern":"^(^$|(?!.*\\/aws-service-role\\/.*)arn:aws:iam:[A-Za-z0-9_\\/.-]{0,63}:[A-Za-z0-9_\\/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_\\/+=,@.-]{0,1023})$" + }, + "GuardrailPolicyArnList":{ + "type":"list", + "member":{"shape":"GuardrailPolicyArn"} + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "documentation":"Your request input doesn't meet the constraints that AWS Chatbot requires.", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "documentation":"Your request input doesn't meet the constraints that AWS Chatbot requires.", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"You have exceeded a service limit for AWS Chatbot.", + "error":{"httpStatusCode":403}, + "exception":true + }, + "ListMicrosoftTeamsConfiguredTeamsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListMicrosoftTeamsConfiguredTeamsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListMicrosoftTeamsConfiguredTeamsResult":{ + "type":"structure", + "members":{ + "ConfiguredTeams":{ + "shape":"ConfiguredTeamsList", + "documentation":"A list of teams in Microsoft Teams that have been configured with AWS Chatbot." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListMicrosoftTeamsUserIdentitiesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListMicrosoftTeamsUserIdentitiesRequest":{ + "type":"structure", + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identities to list." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + } + } + }, + "ListMicrosoftTeamsUserIdentitiesResult":{ + "type":"structure", + "members":{ + "TeamsUserIdentities":{ + "shape":"TeamsUserIdentitiesList", + "documentation":"User level permissions associated to a channel configuration." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListTeamsChannelConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListTeamsChannelConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide.", + "box":true + } + } + }, + "ListTeamsChannelConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "TeamChannelConfigurations":{ + "shape":"TeamChannelConfigurationsList", + "documentation":"A list of AWS Chatbot channel configurations for Microsoft Teams." + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":1276, + "min":1, + "pattern":"^[a-zA-Z0-9=\\/+_.\\-,#:\\\\\"{}]{4,1276}$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"We were not able to find the resource for your request.", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SlackChannelConfiguration":{ + "type":"structure", + "required":[ + "SlackTeamName", + "SlackTeamId", + "SlackChannelId", + "SlackChannelName", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "SlackTeamName":{ + "shape":"SlackTeamName", + "documentation":"Name of the Slack Workspace." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot.", + "box":true + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + }, + "documentation":"An AWS Chatbot configuration for Slack." + }, + "SlackChannelConfigurationList":{ + "type":"list", + "member":{"shape":"SlackChannelConfiguration"} + }, + "SlackChannelDisplayName":{ + "type":"string", + "max":255, + "min":1 + }, + "SlackChannelId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[A-Za-z0-9]+$" + }, + "SlackTeamId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[0-9A-Z]{1,255}$" + }, + "SlackTeamName":{ + "type":"string", + "max":255, + "min":1 + }, + "SlackUserId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(.*)$" + }, + "SlackUserIdentitiesList":{ + "type":"list", + "member":{"shape":"SlackUserIdentity"} + }, + "SlackUserIdentity":{ + "type":"structure", + "required":[ + "IamRoleArn", + "ChatConfigurationArn", + "SlackTeamId", + "SlackUserId" + ], + "members":{ + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identity." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackUserId":{ + "shape":"SlackUserId", + "documentation":"The ID of the user in Slack." + }, + "AwsUserIdentity":{ + "shape":"AwsUserIdentity", + "documentation":"The AWS user identity ARN used to associate a Slack User Identity with an IAM Role." + } + }, + "documentation":"Identifes a User level permission for a channel configuration." + }, + "SlackWorkspace":{ + "type":"structure", + "required":[ + "SlackTeamId", + "SlackTeamName" + ], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackTeamName":{ + "shape":"SlackTeamName", + "documentation":"Name of the Slack Workspace." + } + }, + "documentation":"A Slack Workspace." + }, + "SlackWorkspacesList":{ + "type":"list", + "member":{"shape":"SlackWorkspace"} + }, + "SnsTopicArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "String":{"type":"string"}, + "TeamChannelConfigurationsList":{ + "type":"list", + "member":{"shape":"TeamsChannelConfiguration"} + }, + "TeamName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(.*)$" + }, + "TeamsChannelConfiguration":{ + "type":"structure", + "required":[ + "ChannelId", + "TeamId", + "TenantId", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"String", + "documentation":"The name of the Microsoft Teams Team." + }, + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot.", + "box":true + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + }, + "documentation":"An AWS Chatbot configuration for Microsoft Teams." + }, + "TeamsChannelId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([a-zA-Z0-9-_=+\\/.,])*%3[aA]([a-zA-Z0-9-_=+\\/.,])*%40([a-zA-Z0-9-_=+\\/.,])*$" + }, + "TeamsChannelName":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(.*)$" + }, + "TeamsUserIdentitiesList":{ + "type":"list", + "member":{"shape":"TeamsUserIdentity"} + }, + "TeamsUserIdentity":{ + "type":"structure", + "required":[ + "IamRoleArn", + "ChatConfigurationArn", + "TeamId" + ], + "members":{ + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identity." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "UserId":{ + "shape":"UUID", + "documentation":"Id from Microsoft Teams for user." + }, + "AwsUserIdentity":{ + "shape":"AwsUserIdentity", + "documentation":"The AWS user identity ARN used to associate a Microsoft Teams User Identity with an IAM Role." + }, + "TeamsChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "TeamsTenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + } + }, + "documentation":"Identifes a user level permission for a channel configuration." + }, + "UUID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9A-Fa-f]{8}(?:-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$" + }, + "UpdateAccountPreferencesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateAccountPreferencesRequest":{ + "type":"structure", + "members":{ + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "TrainingDataCollectionEnabled":{ + "shape":"BooleanAccountPreference", + "documentation":"Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot’s AI technologies." + } + } + }, + "UpdateAccountPreferencesResult":{ + "type":"structure", + "members":{ + "AccountPreferences":{ + "shape":"AccountPreferences", + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + } + } + }, + "UpdateChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration to update." + }, + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "WebhookUrl":{ + "shape":"ChimeWebhookUrl", + "documentation":"URL for the Chime webhook." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + } + } + }, + "UpdateChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + "WebhookConfiguration":{ + "shape":"ChimeWebhookConfiguration", + "documentation":"Chime webhook configuration." + } + } + }, + "UpdateSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateSlackChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "SlackChannelId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration to update." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "UpdateSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"SlackChannelConfiguration", + "documentation":"The configuration for a Slack channel configured with AWS Chatbot." + } + } + }, + "UpdateTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "ChannelId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to update." + }, + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "UpdateTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + } + }, + "documentation":"AWS Chatbot API" +} diff --git a/services/chime/pom.xml b/services/chime/pom.xml index c9699e2ad254..d6c46ddcf705 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/customization.config b/services/chime/src/main/resources/codegen-resources/customization.config index 254455474301..8e6a2a99dcf4 100644 --- a/services/chime/src/main/resources/codegen-resources/customization.config +++ b/services/chime/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listAccounts" ], - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 24ff4045911f..d52beff8d2d7 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 89b9d93cc8e8..86501d580b71 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index 1a49891d2a8a..c6cb6d6e6c50 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config b/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index c9e2d0238d3d..5f175e152d87 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 0df0c2fadca1..c2e14773ef35 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 9225d69c1201..7bff18cee76c 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cleanroomsml/pom.xml b/services/cleanroomsml/pom.xml index 17bcde015861..8ed4e1d9a94d 100644 --- a/services/cleanroomsml/pom.xml +++ b/services/cleanroomsml/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cleanroomsml AWS Java SDK :: Services :: Clean Rooms ML diff --git a/services/cleanroomsml/src/main/resources/codegen-resources/customization.config b/services/cleanroomsml/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/cleanroomsml/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 730aed9554ab..c4bc1978f6d2 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index 8f55f6628a1c..1907104ca0ef 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index b3db50ffc517..60711fd43f9a 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 0cccf686a484..d24c30bda5a9 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 61c5baa80ad0..bd8091b1649f 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfront/src/main/resources/codegen-resources/customization.config b/services/cloudfront/src/main/resources/codegen-resources/customization.config index 7c53627831d8..106027fc8125 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/customization.config +++ b/services/cloudfront/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listCloudFrontOriginAccessIdentities", "listDistributions", "listFieldLevelEncryptionConfigs", @@ -9,7 +9,7 @@ ], "utilitiesMethod": { "returnType": "software.amazon.awssdk.services.cloudfront.CloudFrontUtilities", - "createMethodParams": [ - ] - } + "createMethodParams": [] + }, + "useSraAuth": true } diff --git a/services/cloudfront/src/test/java/software/amazon/awssdk/services/cloudfront/CloudFrontUtilitiesIntegrationTest.java b/services/cloudfront/src/test/java/software/amazon/awssdk/services/cloudfront/CloudFrontUtilitiesIntegrationTest.java index d2d538fe14f7..97570628251e 100644 --- a/services/cloudfront/src/test/java/software/amazon/awssdk/services/cloudfront/CloudFrontUtilitiesIntegrationTest.java +++ b/services/cloudfront/src/test/java/software/amazon/awssdk/services/cloudfront/CloudFrontUtilitiesIntegrationTest.java @@ -74,7 +74,7 @@ public class CloudFrontUtilitiesIntegrationTest extends IntegrationTestBase { private static final Base64.Encoder ENCODER = Base64.getEncoder(); - private static final String RESOURCE_PREFIX = "cf-test-"; + private static final String RESOURCE_PREFIX = "do-not-delete-cf-test-"; private static final String CALLER_REFERENCE = UUID.randomUUID().toString(); private static final String S3_OBJECT_KEY = "s3ObjectKey"; diff --git a/services/cloudfrontkeyvaluestore/pom.xml b/services/cloudfrontkeyvaluestore/pom.xml index deed1deee681..323dc8cedd23 100644 --- a/services/cloudfrontkeyvaluestore/pom.xml +++ b/services/cloudfrontkeyvaluestore/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudfrontkeyvaluestore AWS Java SDK :: Services :: Cloud Front Key Value Store diff --git a/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config b/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config index 37184c583f71..70f0de837ceb 100644 --- a/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config +++ b/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,5 @@ { "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true, "enableEndpointAuthSchemeParams": true } diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index accf6cbac3ae..f8a85004f0b6 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsm/src/main/resources/codegen-resources/customization.config b/services/cloudhsm/src/main/resources/codegen-resources/customization.config index 7477c0a69fc1..a29fb9630a57 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/customization.config +++ b/services/cloudhsm/src/main/resources/codegen-resources/customization.config @@ -1,22 +1,34 @@ { - "shapeModifiers": { - "ClientVersion": { - "modify": [ - { - "5.1": { "emitEnumName": "FIVE_ONE" } + "shapeModifiers": { + "ClientVersion": { + "modify": [ + { + "5.1": { + "emitEnumName": "FIVE_ONE" + } + }, + { + "5.3": { + "emitEnumName": "FIVE_THREE" + } + } + ] }, - { - "5.3": { "emitEnumName": "FIVE_THREE" } + "CloudHsmServiceException": { + "exclude": [ + "retryable" + ] } - ] }, - "CloudHsmServiceException": { - "exclude": [ "retryable" ] - } - }, - "excludedSimpleMethods" : [ - "describeHsm", - "describeLunaClient" - ], - "verifiedSimpleMethods" : ["listAvailableZones", "listHapgs", "listHsms", "listLunaClients"] -} \ No newline at end of file + "excludedSimpleMethods": [ + "describeHsm", + "describeLunaClient" + ], + "verifiedSimpleMethods": [ + "listAvailableZones", + "listHapgs", + "listHsms", + "listLunaClients" + ], + "useSraAuth": true +} diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 16242298ccb2..0e5968238494 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 42f825e86895..e40e871a87dc 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearch/src/main/resources/codegen-resources/customization.config b/services/cloudsearch/src/main/resources/codegen-resources/customization.config index 900a3f8c3872..f9bf1edc1138 100644 --- a/services/cloudsearch/src/main/resources/codegen-resources/customization.config +++ b/services/cloudsearch/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "describeDomains", "listDomainNames" - ] + ], + "useSraAuth": true } diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index c926e48abbf9..7085d46e1a4c 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config b/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config index bb663cbc1f71..295ab7f144a0 100644 --- a/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config +++ b/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "shapeModifiers" : { + "shapeModifiers": { "UploadDocumentsRequest": { "inject": [ { @@ -11,7 +11,6 @@ } ] }, - "SearchRequest": { "modify": [ { @@ -24,5 +23,6 @@ }, "interceptors": [ "software.amazon.awssdk.services.cloudsearchdomain.internal.SwitchToPostInterceptor" - ] + ], + "useSraAuth": true } diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index a740a063fa12..ed23717eff64 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index f8c893008648..2ad804115400 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudtraildata/src/main/resources/codegen-resources/customization.config b/services/cloudtraildata/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/cloudtraildata/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 715250a6ab49..be681c00ee24 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/customization.config b/services/cloudwatch/src/main/resources/codegen-resources/customization.config index d819241c3710..640e3ce37681 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatch/src/main/resources/codegen-resources/customization.config @@ -1,13 +1,14 @@ { - "verifiedSimpleMethods": [ - "describeAlarmHistory", - "describeAlarms", - "listDashboards", - "listMetrics" - ], - "excludedSimpleMethods": [ - "deleteDashboards", - "putDashboard", - "getDashboard" - ] + "verifiedSimpleMethods": [ + "describeAlarmHistory", + "describeAlarms", + "listDashboards", + "listMetrics" + ], + "excludedSimpleMethods": [ + "deleteDashboards", + "putDashboard", + "getDashboard" + ], + "useSraAuth": true } diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index d4c1b0bebb87..a33bb94f9b9b 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -532,7 +532,8 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

    Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

    You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

    Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics.

    Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

    You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

    You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

    Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

    CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

    • The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal.

    • The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.

    " + "documentation":"

    Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

    You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

    Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics.

    Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

    You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

    You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

    Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

    CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

    • The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal.

    • The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.

    ", + "requestcompression":{"encodings":["gzip"]} }, "PutMetricStream":{ "name":"PutMetricStream", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 3a8aa667fb0c..fca1632d9c24 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 93ee173a364b..a8d3bde185b4 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config index f61c7f5add4a..a7c722d97c77 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config @@ -1,17 +1,18 @@ { - "excludedSimpleMethods" : [ - "deleteResourcePolicy", - "putResourcePolicy" - ], - "verifiedSimpleMethods" : [ - "describeDestinations", - "describeExportTasks", - "describeLogGroups", - "describeMetricFilters", - "describeQueries", - "describeResourcePolicies" - ], - "paginationCustomization": { - "GetLogEvents" : "LastPageHasPreviousToken" - } -} \ No newline at end of file + "excludedSimpleMethods": [ + "deleteResourcePolicy", + "putResourcePolicy" + ], + "verifiedSimpleMethods": [ + "describeDestinations", + "describeExportTasks", + "describeLogGroups", + "describeMetricFilters", + "describeQueries", + "describeResourcePolicies" + ], + "paginationCustomization": { + "GetLogEvents": "LastPageHasPreviousToken" + }, + "useSraAuth": true +} diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 0c934b275d79..c862b6c16f3f 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index de991513e12e..013b409b84e0 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codebuild/src/main/resources/codegen-resources/customization.config b/services/codebuild/src/main/resources/codegen-resources/customization.config index fb06b01acd05..10a3d064dabe 100644 --- a/services/codebuild/src/main/resources/codegen-resources/customization.config +++ b/services/codebuild/src/main/resources/codegen-resources/customization.config @@ -1,8 +1,9 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listBuilds", "listCuratedEnvironmentImages", "listProjects", "listSourceCredentials" - ] + ], + "useSraAuth": true } diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index aeaee6fbb7a5..0cc0c3e69c2c 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 2a1e610750f0..fef113c8423f 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 2e077b300596..374030d8060d 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 5d4a44701d32..fbb23156301d 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config b/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 2c7d1c8430bf..2056f0393a45 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index 033d17010bd0..7266cd6c9f93 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index b8cd33eee716..3eb0adedd077 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/service-2.json b/services/codepipeline/src/main/resources/codegen-resources/service-2.json index 8f1c52d91b5e..135805ebe55a 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/service-2.json +++ b/services/codepipeline/src/main/resources/codegen-resources/service-2.json @@ -526,7 +526,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ConflictException"}, - {"shape":"PipelineNotFoundException"} + {"shape":"PipelineNotFoundException"}, + {"shape":"ConcurrentPipelineExecutionsLimitExceededException"} ], "documentation":"

    Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

    " }, @@ -865,6 +866,10 @@ "namespace":{ "shape":"ActionNamespace", "documentation":"

    The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.

    " + }, + "timeoutInMinutes":{ + "shape":"ActionTimeout", + "documentation":"

    A timeout duration in minutes that can be applied against the ActionType’s default timeout value specified in Quotas for CodePipeline . This attribute is available only to the manual approval ActionType.

    " } }, "documentation":"

    Represents information about an action declaration.

    " @@ -946,6 +951,10 @@ "shape":"Timestamp", "documentation":"

    The last update time of the action execution.

    " }, + "updatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

    The ARN of the user who changed the pipeline execution details.

    " + }, "status":{ "shape":"ActionExecutionStatus", "documentation":"

    The status of the action execution. Status categories are InProgress, Succeeded, and Failed.

    " @@ -971,6 +980,10 @@ "pipelineExecutionId":{ "shape":"PipelineExecutionId", "documentation":"

    The pipeline execution ID used to filter action execution history.

    " + }, + "latestInPipelineExecution":{ + "shape":"LatestInPipelineExecutionFilter", + "documentation":"

    The latest execution in the pipeline.

    Filtering on the latest execution is available for executions run on or after February 08, 2024.

    " } }, "documentation":"

    Filter values for the action execution.

    " @@ -1039,7 +1052,8 @@ "externalExecutionUrl":{ "shape":"Url", "documentation":"

    The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the action.

    " - } + }, + "errorDetails":{"shape":"ErrorDetails"} }, "documentation":"

    Execution result information, such as the external execution ID.

    " }, @@ -1144,6 +1158,12 @@ "type":"list", "member":{"shape":"ActionState"} }, + "ActionTimeout":{ + "type":"integer", + "box":true, + "max":86400, + "min":5 + }, "ActionType":{ "type":"structure", "required":[ @@ -1698,6 +1718,14 @@ "documentation":"

    Unable to modify the tag due to a simultaneous update request.

    ", "exception":true }, + "ConcurrentPipelineExecutionsLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

    The pipeline has reached the limit for concurrent pipeline executions.

    ", + "exception":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -2024,6 +2052,14 @@ "max":1500, "min":1 }, + "ExecutionMode":{ + "type":"string", + "enum":[ + "QUEUED", + "SUPERSEDED", + "PARALLEL" + ] + }, "ExecutionSummary":{ "type":"string", "max":2048, @@ -2278,6 +2314,32 @@ }, "documentation":"

    Represents the output of a GetThirdPartyJobDetails action.

    " }, + "GitBranchFilterCriteria":{ + "type":"structure", + "members":{ + "includes":{ + "shape":"GitBranchPatternList", + "documentation":"

    The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.

    " + }, + "excludes":{ + "shape":"GitBranchPatternList", + "documentation":"

    The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.

    " + } + }, + "documentation":"

    The Git repository branches specified as filter criteria to start the pipeline.

    " + }, + "GitBranchNamePattern":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "GitBranchPatternList":{ + "type":"list", + "member":{"shape":"GitBranchNamePattern"}, + "max":8, + "min":1 + }, "GitConfiguration":{ "type":"structure", "required":["sourceActionName"], @@ -2288,10 +2350,78 @@ }, "push":{ "shape":"GitPushFilterList", - "documentation":"

    The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.

    Git tags is the only supported event type.

    " + "documentation":"

    The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.

    " + }, + "pullRequest":{ + "shape":"GitPullRequestFilterList", + "documentation":"

    The field where the repository event that will start the pipeline is specified as pull requests.

    " } }, - "documentation":"

    A type of trigger configuration for Git-based source actions.

    You can specify the Git configuration trigger type for all third-party Git-based source actions that are supported by the CodeStarSourceConnection action type.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " + "documentation":"

    A type of trigger configuration for Git-based source actions.

    You can specify the Git configuration trigger type for all third-party Git-based source actions that are supported by the CodeStarSourceConnection action type.

    " + }, + "GitFilePathFilterCriteria":{ + "type":"structure", + "members":{ + "includes":{ + "shape":"GitFilePathPatternList", + "documentation":"

    The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.

    " + }, + "excludes":{ + "shape":"GitFilePathPatternList", + "documentation":"

    The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.

    " + } + }, + "documentation":"

    The Git repository file paths specified as filter criteria to start the pipeline.

    " + }, + "GitFilePathPattern":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "GitFilePathPatternList":{ + "type":"list", + "member":{"shape":"GitFilePathPattern"}, + "max":8, + "min":1 + }, + "GitPullRequestEventType":{ + "type":"string", + "enum":[ + "OPEN", + "UPDATED", + "CLOSED" + ] + }, + "GitPullRequestEventTypeList":{ + "type":"list", + "member":{"shape":"GitPullRequestEventType"}, + "max":3, + "min":1 + }, + "GitPullRequestFilter":{ + "type":"structure", + "members":{ + "events":{ + "shape":"GitPullRequestEventTypeList", + "documentation":"

    The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.

    " + }, + "branches":{ + "shape":"GitBranchFilterCriteria", + "documentation":"

    The field that specifies to filter on branches for the pull request trigger configuration.

    " + }, + "filePaths":{ + "shape":"GitFilePathFilterCriteria", + "documentation":"

    The field that specifies to filter on file paths for the pull request trigger configuration.

    " + } + }, + "documentation":"

    The event criteria for the pull request trigger configuration, such as the lists of branches or file paths to include and exclude.

    " + }, + "GitPullRequestFilterList":{ + "type":"list", + "member":{"shape":"GitPullRequestFilter"}, + "max":3, + "min":1 }, "GitPushFilter":{ "type":"structure", @@ -2299,6 +2429,14 @@ "tags":{ "shape":"GitTagFilterCriteria", "documentation":"

    The field that contains the details for the Git tags trigger configuration.

    " + }, + "branches":{ + "shape":"GitBranchFilterCriteria", + "documentation":"

    The field that specifies to filter on branches for the push trigger configuration.

    " + }, + "filePaths":{ + "shape":"GitFilePathFilterCriteria", + "documentation":"

    The field that specifies to filter on file paths for the push trigger configuration.

    " } }, "documentation":"

    The event criteria that specify when a specified repository event will start the pipeline for the specified trigger configuration, such as the lists of Git tags to include and exclude.

    " @@ -2306,7 +2444,7 @@ "GitPushFilterList":{ "type":"list", "member":{"shape":"GitPushFilter"}, - "max":1, + "max":3, "min":1 }, "GitTagFilterCriteria":{ @@ -2598,6 +2736,24 @@ "LastChangedAt":{"type":"timestamp"}, "LastChangedBy":{"type":"string"}, "LastUpdatedBy":{"type":"string"}, + "LatestInPipelineExecutionFilter":{ + "type":"structure", + "required":[ + "pipelineExecutionId", + "startTimeRange" + ], + "members":{ + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

    The execution ID for the latest execution in the pipeline.

    " + }, + "startTimeRange":{ + "shape":"StartTimeRange", + "documentation":"

    The start time to filter on for the latest execution in the pipeline. Valid options:

    • All

    • Latest

    " + } + }, + "documentation":"

    The field that specifies to filter on the latest execution in the pipeline.

    Filtering on the latest execution is available for executions run on or after February 08, 2024.

    " + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -2990,17 +3146,21 @@ "shape":"PipelineVersion", "documentation":"

    The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.

    " }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

    The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

    " + }, "pipelineType":{ "shape":"PipelineType", - "documentation":"

    CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

    • V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.

    • V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.

    Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

    For information about pricing for CodePipeline, see Pricing.

    For information about which type of pipeline to choose, see What type of pipeline is right for me?.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " - }, - "triggers":{ - "shape":"PipelineTriggerDeclarationList", - "documentation":"

    The trigger configuration specifying a type of event, such as Git tags, that starts the pipeline.

    When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

    " + "documentation":"

    CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

    • V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.

    • V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.

    Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

    For information about pricing for CodePipeline, see Pricing.

    For information about which type of pipeline to choose, see What type of pipeline is right for me?.

    " }, "variables":{ "shape":"PipelineVariableDeclarationList", "documentation":"

    A list that defines the pipeline variables for a pipeline resource. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9@\\-_]+.

    " + }, + "triggers":{ + "shape":"PipelineTriggerDeclarationList", + "documentation":"

    The trigger configuration specifying a type of event, such as Git tags, that starts the pipeline.

    When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

    " } }, "documentation":"

    Represents the structure of actions and stages to be performed in the pipeline.

    " @@ -3032,10 +3192,14 @@ "shape":"ArtifactRevisionList", "documentation":"

    A list of ArtifactRevision objects included in a pipeline execution.

    " }, - "trigger":{"shape":"ExecutionTrigger"}, "variables":{ "shape":"ResolvedPipelineVariableList", "documentation":"

    A list of pipeline variables used for the pipeline execution.

    " + }, + "trigger":{"shape":"ExecutionTrigger"}, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

    The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

    " } }, "documentation":"

    Represents information about an execution of a pipeline.

    " @@ -3102,6 +3266,10 @@ "stopTrigger":{ "shape":"StopExecutionTrigger", "documentation":"

    The interaction that stopped a pipeline execution.

    " + }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

    The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

    " } }, "documentation":"

    Summary information about a pipeline execution.

    " @@ -3173,7 +3341,11 @@ }, "pipelineType":{ "shape":"PipelineType", - "documentation":"

    CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

    • V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.

    • V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.

    Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

    For information about pricing for CodePipeline, see Pricing.

    For information about which type of pipeline to choose, see What type of pipeline is right for me?.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " + "documentation":"

    CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

    • V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.

    • V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.

    Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

    For information about pricing for CodePipeline, see Pricing.

    For information about which type of pipeline to choose, see What type of pipeline is right for me?.

    " + }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

    The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

    " }, "created":{ "shape":"Timestamp", @@ -3202,12 +3374,12 @@ "documentation":"

    Provides the filter criteria and the source stage for the repository event that starts the pipeline, such as Git tags.

    " } }, - "documentation":"

    Represents information about the specified trigger configuration, such as the filter criteria and the source stage for the action that contains the trigger.

    This is only supported for the CodeStarSourceConnection action type.

    When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " + "documentation":"

    Represents information about the specified trigger configuration, such as the filter criteria and the source stage for the action that contains the trigger.

    This is only supported for the CodeStarSourceConnection action type.

    When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

    " }, "PipelineTriggerDeclarationList":{ "type":"list", "member":{"shape":"PipelineTriggerDeclaration"}, - "max":20 + "max":50 }, "PipelineTriggerProviderType":{ "type":"string", @@ -3236,7 +3408,7 @@ "documentation":"

    The value of a pipeline-level variable.

    " } }, - "documentation":"

    A pipeline-level variable used for a pipeline execution.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " + "documentation":"

    A pipeline-level variable used for a pipeline execution.

    " }, "PipelineVariableDeclaration":{ "type":"structure", @@ -3255,7 +3427,7 @@ "documentation":"

    The description of a pipeline-level variable. It's used to add additional context about the variable, and not being used at time when pipeline executes.

    " } }, - "documentation":"

    A variable declared at the pipeline level.

    V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

    " + "documentation":"

    A variable declared at the pipeline level.

    " }, "PipelineVariableDeclarationList":{ "type":"list", @@ -3879,6 +4051,10 @@ }, "documentation":"

    Represents information about the run of a stage.

    " }, + "StageExecutionList":{ + "type":"list", + "member":{"shape":"StageExecution"} + }, "StageExecutionStatus":{ "type":"string", "enum":[ @@ -3925,6 +4101,10 @@ "documentation":"

    The name of the stage.

    " }, "inboundExecution":{"shape":"StageExecution"}, + "inboundExecutions":{ + "shape":"StageExecutionList", + "documentation":"

    The inbound executions for a stage.

    " + }, "inboundTransitionState":{ "shape":"TransitionState", "documentation":"

    The state of the inbound transition, which is either enabled or disabled.

    " @@ -3985,6 +4165,13 @@ }, "documentation":"

    Represents the output of a StartPipelineExecution action.

    " }, + "StartTimeRange":{ + "type":"string", + "enum":[ + "Latest", + "All" + ] + }, "StopExecutionTrigger":{ "type":"structure", "members":{ diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 45a199d14ba2..f8da16e4da8a 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index dca786972499..cc4c6f94f471 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 721cd3ec7a45..a47a0a2129e3 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index e869d7c13a54..b4cd5c7a5bc8 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index c6dea1ff7e47..8f9871a0dbeb 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config index 977593026018..3f8aaed78a98 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true, "excludedSimpleMethods" : [ "associateSoftwareToken" ], diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 346d7f61c34d..84ce96e0606e 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/cognitosync/src/main/resources/codegen-resources/customization.config b/services/cognitosync/src/main/resources/codegen-resources/customization.config index f5a8b5b39a27..c1321e251d9e 100644 --- a/services/cognitosync/src/main/resources/codegen-resources/customization.config +++ b/services/cognitosync/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listIdentityPoolUsage" - ] + ], + "useSraAuth": true } diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index b72fcc5d3115..f9dcb1d84fac 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 9eab56c14fc8..baa83c19cea9 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/customization.config b/services/comprehendmedical/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/comprehendmedical/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 71353c9343d2..33e30b9b1890 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/config/pom.xml b/services/config/pom.xml index b184e25b6eb4..67ad75db6a63 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/customization.config b/services/config/src/main/resources/codegen-resources/customization.config index d113522aaf3f..a2279548c021 100644 --- a/services/config/src/main/resources/codegen-resources/customization.config +++ b/services/config/src/main/resources/codegen-resources/customization.config @@ -1,22 +1,23 @@ { - "verifiedSimpleMethods": [ - "describeAggregationAuthorizations", - "describeComplianceByConfigRule", - "describeComplianceByResource", - "describeConfigRuleEvaluationStatus", - "describeConfigRules", - "describeConfigurationAggregators", - "describeConfigurationRecorderStatus", - "describeConfigurationRecorders", - "describeDeliveryChannelStatus", - "describeDeliveryChannels", - "describePendingAggregationRequests", - "describeRetentionConfigurations", - "getComplianceSummaryByConfigRule", - "getComplianceSummaryByResourceType", - "getDiscoveredResourceCounts" - ], - "excludedSimpleMethods": [ - "startConfigRulesEvaluation" - ] + "verifiedSimpleMethods": [ + "describeAggregationAuthorizations", + "describeComplianceByConfigRule", + "describeComplianceByResource", + "describeConfigRuleEvaluationStatus", + "describeConfigRules", + "describeConfigurationAggregators", + "describeConfigurationRecorderStatus", + "describeConfigurationRecorders", + "describeDeliveryChannelStatus", + "describeDeliveryChannels", + "describePendingAggregationRequests", + "describeRetentionConfigurations", + "getComplianceSummaryByConfigRule", + "getComplianceSummaryByResourceType", + "getDiscoveredResourceCounts" + ], + "excludedSimpleMethods": [ + "startConfigRulesEvaluation" + ], + "useSraAuth": true } diff --git a/services/config/src/main/resources/codegen-resources/service-2.json b/services/config/src/main/resources/codegen-resources/service-2.json index ccb94f485155..6219aeebed45 100644 --- a/services/config/src/main/resources/codegen-resources/service-2.json +++ b/services/config/src/main/resources/codegen-resources/service-2.json @@ -522,7 +522,7 @@ {"shape":"InvalidLimitException"}, {"shape":"OrganizationAccessDeniedException"} ], - "documentation":"

    Returns a list of organization Config rules.

    When you specify the limit and the next token, you receive a paginated response.

    Limit and next token are not applicable if you specify organization Config rule names. It is only applicable, when you request all the organization Config rules.

    For accounts within an organzation

    If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

    " + "documentation":"

    Returns a list of organization Config rules.

    When you specify the limit and the next token, you receive a paginated response.

    Limit and next token are not applicable if you specify organization Config rule names. It is only applicable, when you request all the organization Config rules.

    For accounts within an organization

    If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

    " }, "DescribeOrganizationConformancePackStatuses":{ "name":"DescribeOrganizationConformancePackStatuses", @@ -554,7 +554,7 @@ {"shape":"InvalidLimitException"}, {"shape":"OrganizationAccessDeniedException"} ], - "documentation":"

    Returns a list of organization conformance packs.

    When you specify the limit and the next token, you receive a paginated response.

    Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs.

    For accounts within an organzation

    If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

    " + "documentation":"

    Returns a list of organization conformance packs.

    When you specify the limit and the next token, you receive a paginated response.

    Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs.

    For accounts within an organization

    If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

    " }, "DescribePendingAggregationRequests":{ "name":"DescribePendingAggregationRequests", @@ -1178,7 +1178,7 @@ {"shape":"InsufficientPermissionsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

    Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

    If you make backward incompatible changes to the SSM document, you must call this again to ensure the remediations can run.

    This API does not support adding remediation configurations for service-linked Config Rules such as Organization Config rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services Security Hub.

    For manual remediation configuration, you need to provide a value for automationAssumeRole or use a value in the assumeRolefield to remediate your resources. The SSM automation document can use either as long as it maps to a valid parameter.

    However, for automatic remediation configuration, the only valid assumeRole field value is AutomationAssumeRole and you need to provide a value for AutomationAssumeRole to remediate your resources.

    " + "documentation":"

    Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

    Be aware of backward incompatible changes

    If you make backward incompatible changes to the SSM document, you must call this again to ensure the remediations can run.

    This API does not support adding remediation configurations for service-linked Config Rules such as Organization Config rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services Security Hub.

    Required fields

    For manual remediation configuration, you need to provide a value for automationAssumeRole or use a value in the assumeRolefield to remediate your resources. The SSM automation document can use either as long as it maps to a valid parameter.

    However, for automatic remediation configuration, the only valid assumeRole field value is AutomationAssumeRole and you need to provide a value for AutomationAssumeRole to remediate your resources.

    Auto remediation can be initiated even for compliant resources

    If you enable auto remediation for a specific Config rule using the PutRemediationConfigurations API or the Config console, it initiates the remediation process for all non-compliant resources for that specific rule. The auto remediation process relies on the compliance data snapshot which is captured on a periodic basis. Any non-compliant resource that is updated between the snapshot schedule will continue to be remediated based on the last known compliance data snapshot.

    This means that in some cases auto remediation can be initiated even for compliant resources, since the bootstrap processor uses a database that can have stale evaluation results based on the last known compliance data snapshot.

    " }, "PutRemediationExceptions":{ "name":"PutRemediationExceptions", @@ -1192,7 +1192,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InsufficientPermissionsException"} ], - "documentation":"

    A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

    Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

    When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

    Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

    " + "documentation":"

    A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

    Exceptions block auto remediation

    Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

    Manual remediation is recommended when placing an exception

    When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

    Exceptions can only be performed on non-compliant resources

    Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

    Auto remediation can be initiated even for compliant resources

    If you enable auto remediation for a specific Config rule using the PutRemediationConfigurations API or the Config console, it initiates the remediation process for all non-compliant resources for that specific rule. The auto remediation process relies on the compliance data snapshot which is captured on a periodic basis. Any non-compliant resource that is updated between the snapshot schedule will continue to be remediated based on the last known compliance data snapshot.

    This means that in some cases auto remediation can be initiated even for compliant resources, since the bootstrap processor uses a database that can have stale evaluation results based on the last known compliance data snapshot.

    " }, "PutResourceConfig":{ "name":"PutResourceConfig", @@ -4190,7 +4190,7 @@ "documentation":"

    A comma-separated list of resource types to exclude from recording by the configuration recorder.

    " } }, - "documentation":"

    Specifies whether the configuration recorder excludes certain resource types from being recorded. Use the resourceTypes field to enter a comma-separated list of resource types you want to exclude from recording.

    By default, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    How to use the exclusion recording strategy

    To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.

    Config will then record configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded.

    Global resource types and the exclusion recording strategy

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " + "documentation":"

    Specifies whether the configuration recorder excludes certain resource types from being recorded. Use the resourceTypes field to enter a comma-separated list of resource types you want to exclude from recording.

    By default, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    How to use the exclusion recording strategy

    To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.

    Config will then record configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded.

    Global resource types and the exclusion recording strategy

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Canada West (Calgary)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " }, "ExecutionControls":{ "type":"structure", @@ -6638,7 +6638,7 @@ }, "includeGlobalResourceTypes":{ "shape":"IncludeGlobalResourceTypes", - "documentation":"

    This option is a bundle which only applies to the global IAM resource types: IAM users, groups, roles, and customer managed policies. These global IAM resource types can only be recorded by Config in Regions where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    Aurora global clusters are recorded in all enabled Regions

    The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is not set to true. The includeGlobalResourceTypes option is a bundle which only applies to IAM users, groups, roles, and customer managed policies.

    If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies:

    1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

    2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

    For more information, see Selecting Which Resources are Recorded in the Config developer guide.

    Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

    Overriding fields

    If you set this field to false but list global IAM resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

    If you do not want to record configuration changes to the global IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.

    " + "documentation":"

    This option is a bundle which only applies to the global IAM resource types: IAM users, groups, roles, and customer managed policies. These global IAM resource types can only be recorded by Config in Regions where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Canada West (Calgary)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    Aurora global clusters are recorded in all enabled Regions

    The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is setfalse. The includeGlobalResourceTypes option is a bundle which only applies to IAM users, groups, roles, and customer managed policies.

    If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies:

    1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

    2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

    For more information, see Selecting Which Resources are Recorded in the Config developer guide.

    includeGlobalResourceTypes and the exclusion recording strategy

    The includeGlobalResourceTypes field has no impact on the EXCLUSION_BY_RESOURCE_TYPES recording strategy. This means that the global IAM resource types (IAM users, groups, roles, and customer managed policies) will not be automatically added as exclusions for exclusionByResourceTypes when includeGlobalResourceTypes is set to false.

    The includeGlobalResourceTypes field should only be used to modify the AllSupported field, as the default for the AllSupported field is to record configuration changes for all supported resource types excluding the global IAM resource types. To include the global IAM resource types when AllSupported is set to true, make sure to set includeGlobalResourceTypes to true.

    To exclude the global IAM resource types for the EXCLUSION_BY_RESOURCE_TYPES recording strategy, you need to manually add them to the resourceTypes field of exclusionByResourceTypes.

    Required and optional fields

    Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

    Overriding fields

    If you set this field to false but list global IAM resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

    If you do not want to record configuration changes to the global IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.

    " }, "resourceTypes":{ "shape":"ResourceTypeList", @@ -6650,7 +6650,7 @@ }, "recordingStrategy":{ "shape":"RecordingStrategy", - "documentation":"

    An object that specifies the recording strategy for the configuration recorder.

    • If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported resource types, excluding the global IAM resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new resource type, Config automatically starts recording resources of that type.

    • If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup.

    • If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types except the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    Required and optional fields

    The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

    The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

    The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

    Overriding fields

    If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

    For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

    Global resources types and the resource exclusion recording strategy

    By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " + "documentation":"

    An object that specifies the recording strategy for the configuration recorder.

    • If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported resource types, excluding the global IAM resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new resource type, Config automatically starts recording resources of that type.

    • If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup.

    • If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types except the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    Required and optional fields

    The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

    The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

    The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

    Overriding fields

    If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

    For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

    Global resources types and the resource exclusion recording strategy

    By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Canada West (Calgary)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " } }, "documentation":"

    Specifies which resource types Config records for configuration changes. By default, Config records configuration changes for all current and future supported resource types in the Amazon Web Services Region where you have enabled Config, excluding the global IAM resource types: IAM users, groups, roles, and customer managed policies.

    In the recording group, you specify whether you want to record all supported current and future supported resource types or to include or exclude specific resources types. For a list of supported resource types, see Supported Resource Types in the Config developer guide.

    If you don't want Config to record all current and future supported resource types (excluding the global IAM resource types), use one of the following recording strategies:

    1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

    2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

    If you use the recording strategy to Record all current and future resource types (ALL_SUPPORTED_RESOURCE_TYPES), you can use the flag includeGlobalResourceTypes to include the global IAM resource types in your recording.

    Aurora global clusters are recorded in all enabled Regions

    The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled.

    If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use the EXCLUSION_BY_RESOURCE_TYPES or INCLUSION_BY_RESOURCE_TYPES recording strategy.

    " @@ -6707,7 +6707,7 @@ "members":{ "useOnly":{ "shape":"RecordingStrategyType", - "documentation":"

    The recording strategy for the configuration recorder.

    • If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported resource types, excluding the global IAM resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new resource type, Config automatically starts recording resources of that type. For a list of supported resource types, see Supported Resource Types in the Config developer guide.

    • If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types that you specify in the resourceTypes field of RecordingGroup.

    • If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    Required and optional fields

    The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

    The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

    The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

    Overriding fields

    If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

    For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

    Global resource types and the exclusion recording strategy

    By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " + "documentation":"

    The recording strategy for the configuration recorder.

    • If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported resource types, excluding the global IAM resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new resource type, Config automatically starts recording resources of that type. For a list of supported resource types, see Supported Resource Types in the Config developer guide.

    • If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types that you specify in the resourceTypes field of RecordingGroup.

    • If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

    Required and optional fields

    The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

    The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

    The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

    Overriding fields

    If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

    For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

    Global resource types and the exclusion recording strategy

    By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

    Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

    IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

    • Asia Pacific (Hyderabad)

    • Asia Pacific (Melbourne)

    • Canada West (Calgary)

    • Europe (Spain)

    • Europe (Zurich)

    • Israel (Tel Aviv)

    • Middle East (UAE)

    " } }, "documentation":"

    Specifies the recording strategy of the configuration recorder.

    " @@ -8248,7 +8248,7 @@ "members":{ "DocumentName":{ "shape":"SSMDocumentName", - "documentation":"

    The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, Config checks only your account and Amazon Web Services Region for the SSM document. If you want to use an SSM document from another Region or account, you must provide the ARN.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, Config checks only your account and Amazon Web Services Region for the SSM document.

    " }, "DocumentVersion":{ "shape":"SSMDocumentVersion", diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 1d3d1949808e..c50dd98ca2b6 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index 1f8d9a232b51..a739c9a9e459 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index e46235b35e0a..7746c273d672 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 341abb9ffd05..042f468d5e8e 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/customization.config b/services/connectcontactlens/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/connectcontactlens/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index aa4898152e7a..45f293f5c091 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/connectparticipant/src/main/resources/codegen-resources/service-2.json b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json index 1230bf27ab94..41f7f09550ff 100644 --- a/services/connectparticipant/src/main/resources/codegen-resources/service-2.json +++ b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

    Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + "documentation":"

    Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " }, "CreateParticipantConnection":{ "name":"CreateParticipantConnection", @@ -110,7 +110,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + "documentation":"

    Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

    If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:

    • application/vnd.amazonaws.connect.event.participant.left

    • application/vnd.amazonaws.connect.event.participant.joined

    • application/vnd.amazonaws.connect.event.chat.ended

    • application/vnd.amazonaws.connect.event.transfer.succeeded

    • application/vnd.amazonaws.connect.event.transfer.failed

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " }, "SendEvent":{ "name":"SendEvent", @@ -127,7 +127,7 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"} ], - "documentation":"

    Sends an event.

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " + "documentation":"

    The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field.

    Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.

    ConnectionToken is used for invoking this API instead of ParticipantToken.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " }, "SendMessage":{ "name":"SendMessage", @@ -302,7 +302,7 @@ "members":{ "Message":{"shape":"Reason"} }, - "documentation":"

    An attachment with that identifier is already being uploaded.

    ", + "documentation":"

    The requested operation conflicts with the current state of a service resource associated with the request.

    ", "error":{"httpStatusCode":409}, "exception":true }, @@ -728,7 +728,7 @@ "members":{ "ContentType":{ "shape":"ChatContentType", - "documentation":"

    The content type of the request. Supported types are:

    • application/vnd.amazonaws.connect.event.typing

    • application/vnd.amazonaws.connect.event.connection.acknowledged

    • application/vnd.amazonaws.connect.event.message.delivered

    • application/vnd.amazonaws.connect.event.message.read

    " + "documentation":"

    The content type of the request. Supported types are:

    • application/vnd.amazonaws.connect.event.typing

    • application/vnd.amazonaws.connect.event.connection.acknowledged (will be deprecated on December 31, 2024)

    • application/vnd.amazonaws.connect.event.message.delivered

    • application/vnd.amazonaws.connect.event.message.read

    " }, "Content":{ "shape":"ChatContent", diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 36b608da4141..0bea30e1daa7 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/controltower/src/main/resources/codegen-resources/customization.config b/services/controltower/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/controltower/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/controltower/src/main/resources/codegen-resources/paginators-1.json b/services/controltower/src/main/resources/codegen-resources/paginators-1.json index 24f2f2bdf599..10d8dd60fc12 100644 --- a/services/controltower/src/main/resources/codegen-resources/paginators-1.json +++ b/services/controltower/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "ListBaselines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "baselines" + }, + "ListEnabledBaselines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "enabledBaselines" + }, "ListEnabledControls": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/controltower/src/main/resources/codegen-resources/service-2.json b/services/controltower/src/main/resources/codegen-resources/service-2.json index 1972b4fcbf56..bd8367787d45 100644 --- a/services/controltower/src/main/resources/codegen-resources/service-2.json +++ b/services/controltower/src/main/resources/codegen-resources/service-2.json @@ -50,6 +50,27 @@ "documentation":"

    Decommissions a landing zone. This API call starts an asynchronous operation that deletes Amazon Web Services Control Tower resources deployed in accounts managed by Amazon Web Services Control Tower.

    ", "idempotent":true }, + "DisableBaseline":{ + "name":"DisableBaseline", + "http":{ + "method":"POST", + "requestUri":"/disable-baseline", + "responseCode":200 + }, + "input":{"shape":"DisableBaselineInput"}, + "output":{"shape":"DisableBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline.

    ", + "idempotent":true + }, "DisableControl":{ "name":"DisableControl", "http":{ @@ -68,7 +89,27 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " + "documentation":"

    This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " + }, + "EnableBaseline":{ + "name":"EnableBaseline", + "http":{ + "method":"POST", + "requestUri":"/enable-baseline", + "responseCode":200 + }, + "input":{"shape":"EnableBaselineInput"}, + "output":{"shape":"EnableBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target.

    " }, "EnableControl":{ "name":"EnableControl", @@ -90,6 +131,42 @@ ], "documentation":"

    This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, + "GetBaseline":{ + "name":"GetBaseline", + "http":{ + "method":"POST", + "requestUri":"/get-baseline", + "responseCode":200 + }, + "input":{"shape":"GetBaselineInput"}, + "output":{"shape":"GetBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieve details about an existing Baseline resource by specifying its identifier.

    " + }, + "GetBaselineOperation":{ + "name":"GetBaselineOperation", + "http":{ + "method":"POST", + "requestUri":"/get-baseline-operation", + "responseCode":200 + }, + "input":{"shape":"GetBaselineOperationInput"}, + "output":{"shape":"GetBaselineOperationOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure.

    " + }, "GetControlOperation":{ "name":"GetControlOperation", "http":{ @@ -108,6 +185,24 @@ ], "documentation":"

    Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, + "GetEnabledBaseline":{ + "name":"GetEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/get-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"GetEnabledBaselineInput"}, + "output":{"shape":"GetEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieve details of an EnabledBaseline resource by specifying its identifier.

    " + }, "GetEnabledControl":{ "name":"GetEnabledControl", "http":{ @@ -162,6 +257,40 @@ ], "documentation":"

    Returns the status of the specified landing zone operation. Details for an operation are available for 60 days.

    " }, + "ListBaselines":{ + "name":"ListBaselines", + "http":{ + "method":"POST", + "requestUri":"/list-baselines", + "responseCode":200 + }, + "input":{"shape":"ListBaselinesInput"}, + "output":{"shape":"ListBaselinesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a summary list of all available baselines.

    " + }, + "ListEnabledBaselines":{ + "name":"ListEnabledBaselines", + "http":{ + "method":"POST", + "requestUri":"/list-enabled-baselines", + "responseCode":200 + }, + "input":{"shape":"ListEnabledBaselinesInput"}, + "output":{"shape":"ListEnabledBaselinesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources.

    " + }, "ListEnabledControls":{ "name":"ListEnabledControls", "http":{ @@ -213,6 +342,26 @@ ], "documentation":"

    Returns a list of tags associated with the resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, + "ResetEnabledBaseline":{ + "name":"ResetEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/reset-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"ResetEnabledBaselineInput"}, + "output":{"shape":"ResetEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU.

    " + }, "ResetLandingZone":{ "name":"ResetLandingZone", "http":{ @@ -264,6 +413,26 @@ ], "documentation":"

    Removes tags from a resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, + "UpdateEnabledBaseline":{ + "name":"UpdateEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/update-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"UpdateEnabledBaselineInput"}, + "output":{"shape":"UpdateEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Updates an EnabledBaseline resource's applied parameters or version.

    " + }, "UpdateEnabledControl":{ "name":"UpdateEnabledControl", "http":{ @@ -324,6 +493,89 @@ "min":20, "pattern":"^arn:aws[0-9a-zA-Z_\\-:\\/]+$" }, + "BaselineArn":{ + "type":"string", + "pattern":"^arn:[a-z-]+:controltower:[a-z0-9-]*:[0-9]{0,12}:baseline/[A-Z0-9]{16}$" + }, + "BaselineOperation":{ + "type":"structure", + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time of the operation (if applicable), in ISO 8601 format.

    " + }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The identifier of the specified operation.

    " + }, + "operationType":{ + "shape":"BaselineOperationType", + "documentation":"

    An enumerated type (enum) with possible values of ENABLE_BASELINE, DISABLE_BASELINE, UPDATE_ENABLED_BASELINE, or RESET_ENABLED_BASELINE.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time of the operation, in ISO 8601 format.

    " + }, + "status":{ + "shape":"BaselineOperationStatus", + "documentation":"

    An enumerated type (enum) with possible values of SUCCEEDED, FAILED, or IN_PROGRESS.

    " + }, + "statusMessage":{ + "shape":"String", + "documentation":"

    A status message that gives more information about the operation's status, if applicable.

    " + } + }, + "documentation":"

    An object of shape BaselineOperation, returning details about the specified Baseline operation ID.

    " + }, + "BaselineOperationStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "IN_PROGRESS" + ] + }, + "BaselineOperationType":{ + "type":"string", + "enum":[ + "ENABLE_BASELINE", + "DISABLE_BASELINE", + "UPDATE_ENABLED_BASELINE", + "RESET_ENABLED_BASELINE" + ] + }, + "BaselineSummary":{ + "type":"structure", + "required":[ + "arn", + "name" + ], + "members":{ + "arn":{ + "shape":"String", + "documentation":"

    The full ARN of a Baseline.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    A summary description of a Baseline.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The human-readable name of a Baseline.

    " + } + }, + "documentation":"

    Returns a summary of information about a Baseline object.

    " + }, + "BaselineVersion":{ + "type":"string", + "max":10, + "min":1, + "pattern":"^\\d+(?:\\.\\d+){0,2}$" + }, + "Baselines":{ + "type":"list", + "member":{"shape":"BaselineSummary"} + }, "ConflictException":{ "type":"structure", "required":["message"], @@ -394,7 +646,7 @@ "members":{ "manifest":{ "shape":"Manifest", - "documentation":"

    The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone.

    " + "documentation":"

    The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

    " }, "tags":{ "shape":"TagMap", @@ -443,6 +695,26 @@ } } }, + "DisableBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

    Identifier of the EnabledBaseline resource to be deactivated, in ARN format.

    " + } + } + }, + "DisableBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The ID (in UUID format) of the asynchronous DisableBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

    " + } + } + }, "DisableControlInput":{ "type":"structure", "required":[ @@ -452,7 +724,7 @@ "members":{ "controlIdentifier":{ "shape":"ControlIdentifier", - "documentation":"

    The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the landing zone Region deny control. For information on how to find the controlIdentifier, see the overview page.

    " + "documentation":"

    The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page.

    " }, "targetIdentifier":{ "shape":"TargetIdentifier", @@ -495,6 +767,53 @@ }, "documentation":"

    The drift summary of the enabled control.

    Amazon Web Services Control Tower expects the enabled control configuration to include all supported and governed Regions. If the enabled control differs from the expected configuration, it is defined to be in a state of drift. You can repair this drift by resetting the enabled control.

    " }, + "EnableBaselineInput":{ + "type":"structure", + "required":[ + "baselineIdentifier", + "baselineVersion", + "targetIdentifier" + ], + "members":{ + "baselineIdentifier":{ + "shape":"Arn", + "documentation":"

    The ARN of the baseline to be enabled.

    " + }, + "baselineVersion":{ + "shape":"BaselineVersion", + "documentation":"

    The specific version to be enabled of the specified baseline.

    " + }, + "parameters":{ + "shape":"EnabledBaselineParameters", + "documentation":"

    A list of key-value objects that specify enablement parameters, where key is a string and value is a document of any type.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Tags associated with input to EnableBaseline.

    " + }, + "targetIdentifier":{ + "shape":"Arn", + "documentation":"

    The ARN of the target on which the baseline will be enabled. Only OUs are supported as targets.

    " + } + } + }, + "EnableBaselineOutput":{ + "type":"structure", + "required":[ + "arn", + "operationIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

    The ARN of the EnabledBaseline resource.

    " + }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The ID (in UUID format) of the asynchronous EnableBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

    " + } + } + }, "EnableControlInput":{ "type":"structure", "required":[ @@ -504,11 +823,11 @@ "members":{ "controlIdentifier":{ "shape":"ControlIdentifier", - "documentation":"

    The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the landing zone Region deny control. For information on how to find the controlIdentifier, see the overview page.

    " + "documentation":"

    The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page.

    " }, "parameters":{ "shape":"EnabledControlParameters", - "documentation":"

    An array of EnabledControlParameter objects

    " + "documentation":"

    A list of input parameter values, which are specified to configure the control when you enable it.

    " }, "tags":{ "shape":"TagMap", @@ -534,6 +853,148 @@ } } }, + "EnabledBaselineBaselineIdentifiers":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":5, + "min":1 + }, + "EnabledBaselineDetails":{ + "type":"structure", + "required":[ + "arn", + "baselineIdentifier", + "statusSummary", + "targetIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

    The ARN of the EnabledBaseline resource.

    " + }, + "baselineIdentifier":{ + "shape":"String", + "documentation":"

    The specific Baseline enabled as part of the EnabledBaseline resource.

    " + }, + "baselineVersion":{ + "shape":"String", + "documentation":"

    The enabled version of the Baseline.

    " + }, + "parameters":{ + "shape":"EnabledBaselineParameterSummaries", + "documentation":"

    Shows the parameters that are applied when enabling this Baseline.

    " + }, + "statusSummary":{"shape":"EnablementStatusSummary"}, + "targetIdentifier":{ + "shape":"String", + "documentation":"

    The target on which to enable the Baseline.

    " + } + }, + "documentation":"

    Details of the EnabledBaseline resource.

    " + }, + "EnabledBaselineFilter":{ + "type":"structure", + "members":{ + "baselineIdentifiers":{ + "shape":"EnabledBaselineBaselineIdentifiers", + "documentation":"

    Identifiers for the Baseline objects returned as part of the filter operation.

    " + }, + "targetIdentifiers":{ + "shape":"EnabledBaselineTargetIdentifiers", + "documentation":"

    Identifiers for the targets of the Baseline filter operation.

    " + } + }, + "documentation":"

    A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both.

    " + }, + "EnabledBaselineParameter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"String", + "documentation":"

    A string denoting the parameter key.

    " + }, + "value":{ + "shape":"EnabledBaselineParameterDocument", + "documentation":"

    A low-level Document object of any type (for example, a Java Object).

    " + } + }, + "documentation":"

    A key-value parameter to an EnabledBaseline resource.

    " + }, + "EnabledBaselineParameterDocument":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "EnabledBaselineParameterSummaries":{ + "type":"list", + "member":{"shape":"EnabledBaselineParameterSummary"} + }, + "EnabledBaselineParameterSummary":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"String", + "documentation":"

    A string denoting the parameter key.

    " + }, + "value":{ + "shape":"EnabledBaselineParameterDocument", + "documentation":"

    A low-level document object of any type (for example, a Java Object).

    " + } + }, + "documentation":"

    Summary of an applied parameter to an EnabledBaseline resource.

    " + }, + "EnabledBaselineParameters":{ + "type":"list", + "member":{"shape":"EnabledBaselineParameter"} + }, + "EnabledBaselineSummary":{ + "type":"structure", + "required":[ + "arn", + "baselineIdentifier", + "statusSummary", + "targetIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

    The ARN of the EnabledBaseline resource

    " + }, + "baselineIdentifier":{ + "shape":"String", + "documentation":"

    The specific baseline that is enabled as part of the EnabledBaseline resource.

    " + }, + "baselineVersion":{ + "shape":"String", + "documentation":"

    The enabled version of the baseline.

    " + }, + "statusSummary":{"shape":"EnablementStatusSummary"}, + "targetIdentifier":{ + "shape":"String", + "documentation":"

    The target upon which the baseline is enabled.

    " + } + }, + "documentation":"

    Returns a summary of information about an EnabledBaseline object.

    " + }, + "EnabledBaselineTargetIdentifiers":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":5, + "min":1 + }, + "EnabledBaselines":{ + "type":"list", + "member":{"shape":"EnabledBaselineSummary"} + }, "EnabledControlDetails":{ "type":"structure", "members":{ @@ -577,14 +1038,14 @@ "members":{ "key":{ "shape":"String", - "documentation":"

    The key of a key/value pair. It is of type string.

    " + "documentation":"

    The key of a key/value pair.

    " }, "value":{ "shape":"Document", - "documentation":"

    The value of a key/value pair. It can be of type array string, number, object, or boolean.

    " + "documentation":"

    The value of a key/value pair.

    " } }, - "documentation":"

    A set of parameters that configure the behavior of the enabled control. A key/value pair, where Key is of type String and Value is of type Document.

    " + "documentation":"

    A key/value pair, where Key is of type String and Value is of type Document.

    " }, "EnabledControlParameterSummaries":{ "type":"list", @@ -664,6 +1125,57 @@ }, "documentation":"

    The deployment summary of the enabled control.

    " }, + "GetBaselineInput":{ + "type":"structure", + "required":["baselineIdentifier"], + "members":{ + "baselineIdentifier":{ + "shape":"BaselineArn", + "documentation":"

    The ARN of the Baseline resource to be retrieved.

    " + } + } + }, + "GetBaselineOperationInput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The operation ID returned from mutating asynchronous APIs (Enable, Disable, Update, Reset).

    " + } + } + }, + "GetBaselineOperationOutput":{ + "type":"structure", + "required":["baselineOperation"], + "members":{ + "baselineOperation":{ + "shape":"BaselineOperation", + "documentation":"

    A baselineOperation object that shows information about the specified operation ID.

    " + } + } + }, + "GetBaselineOutput":{ + "type":"structure", + "required":[ + "arn", + "name" + ], + "members":{ + "arn":{ + "shape":"BaselineArn", + "documentation":"

    The baseline ARN.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    A description of the baseline.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    A user-friendly name for the baseline.

    " + } + } + }, "GetControlOperationInput":{ "type":"structure", "required":["operationIdentifier"], @@ -684,6 +1196,25 @@ } } }, + "GetEnabledBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

    Identifier of the EnabledBaseline resource to be retrieved, in ARN format.

    " + } + } + }, + "GetEnabledBaselineOutput":{ + "type":"structure", + "members":{ + "enabledBaselineDetails":{ + "shape":"EnabledBaselineDetails", + "documentation":"

    Details of the EnabledBaseline resource.

    " + } + } + }, "GetEnabledControlInput":{ "type":"structure", "required":["enabledControlIdentifier"], @@ -781,11 +1312,11 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

    The landing zone manifest JSON text file that specifies the landing zone configurations.

    " + "documentation":"

    The landing zone manifest.yaml text file that specifies the landing zone configurations.

    " }, "status":{ "shape":"LandingZoneStatus", - "documentation":"

    The landing zone deployment status.

    " + "documentation":"

    The landing zone deployment status. One of ACTIVE, PROCESSING, FAILED.

    " }, "version":{ "shape":"LandingZoneVersion", @@ -878,6 +1409,80 @@ "min":3, "pattern":"^\\d+.\\d+$" }, + "ListBaselinesInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListBaselinesMaxResults", + "documentation":"

    The maximum number of results to be shown.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    A pagination token.

    " + } + } + }, + "ListBaselinesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":4 + }, + "ListBaselinesOutput":{ + "type":"structure", + "required":["baselines"], + "members":{ + "baselines":{ + "shape":"Baselines", + "documentation":"

    A list of Baseline object details.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    A pagination token.

    " + } + } + }, + "ListEnabledBaselinesInput":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"EnabledBaselineFilter", + "documentation":"

    A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both.

    " + }, + "maxResults":{ + "shape":"ListEnabledBaselinesMaxResults", + "documentation":"

    The maximum number of results to be shown.

    " + }, + "nextToken":{ + "shape":"ListEnabledBaselinesNextToken", + "documentation":"

    A pagination token.

    " + } + } + }, + "ListEnabledBaselinesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":5 + }, + "ListEnabledBaselinesNextToken":{ + "type":"string", + "pattern":"\\S+" + }, + "ListEnabledBaselinesOutput":{ + "type":"structure", + "required":["enabledBaselines"], + "members":{ + "enabledBaselines":{ + "shape":"EnabledBaselines", + "documentation":"

    Retuens a list of summaries of EnabledBaseline resources.

    " + }, + "nextToken":{ + "shape":"ListEnabledBaselinesNextToken", + "documentation":"

    A pagination token.

    " + } + } + }, "ListEnabledControlsInput":{ "type":"structure", "required":["targetIdentifier"], @@ -1004,6 +1609,26 @@ "max":50, "min":1 }, + "ResetEnabledBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

    Specifies the ID of the EnabledBaseline resource to be re-enabled, in ARN format.

    " + } + } + }, + "ResetEnabledBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The ID (in UUID format) of the asynchronous ResetEnabledBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

    " + } + } + }, "ResetLandingZoneInput":{ "type":"structure", "required":["landingZoneIdentifier"], @@ -1123,13 +1748,13 @@ }, "retryAfterSeconds":{ "shape":"Integer", - "documentation":"

    The number of seconds to wait before retrying.

    ", + "documentation":"

    The number of seconds the caller should wait before retrying.

    ", "location":"header", "locationName":"Retry-After" }, "serviceCode":{ "shape":"String", - "documentation":"

    The ID of the service that is associated with the error.

    " + "documentation":"

    The ID of the service that is associated with the error.

    " } }, "documentation":"

    The request was denied due to request throttling.

    ", @@ -1170,6 +1795,37 @@ "members":{ } }, + "UpdateEnabledBaselineInput":{ + "type":"structure", + "required":[ + "baselineVersion", + "enabledBaselineIdentifier" + ], + "members":{ + "baselineVersion":{ + "shape":"BaselineVersion", + "documentation":"

    Specifies the new Baseline version, to which the EnabledBaseline should be updated.

    " + }, + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

    Specifies the EnabledBaseline resource to be updated.

    " + }, + "parameters":{ + "shape":"EnabledBaselineParameters", + "documentation":"

    Parameters to apply when making an update.

    " + } + } + }, + "UpdateEnabledBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

    The ID (in UUID format) of the asynchronous UpdateEnabledBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

    " + } + } + }, "UpdateEnabledControlInput":{ "type":"structure", "required":[ @@ -1211,7 +1867,7 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

    The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone.

    " + "documentation":"

    The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

    " }, "version":{ "shape":"LandingZoneVersion", diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index c224141a79c5..573d1c295dd3 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index 8321cbd9f0e9..f11cd1f21cf6 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/customization.config b/services/costexplorer/src/main/resources/codegen-resources/customization.config index 09691b4543c1..e4d57b5ab148 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/customization.config +++ b/services/costexplorer/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,6 @@ { - "excludedSimpleMethods" : ["getCostAndUsage"] + "excludedSimpleMethods": [ + "getCostAndUsage" + ], + "useSraAuth": true } diff --git a/services/costoptimizationhub/pom.xml b/services/costoptimizationhub/pom.xml index 9416f71c2c40..5d1439234a72 100644 --- a/services/costoptimizationhub/pom.xml +++ b/services/costoptimizationhub/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT costoptimizationhub AWS Java SDK :: Services :: Cost Optimization Hub diff --git a/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config b/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json index d2006d8af591..664c769f5938 100644 --- a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json +++ b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json @@ -844,7 +844,7 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

    The enrollment status of a specific account ID in the organization.

    " + "documentation":"

    The account ID of a member account in the organization.

    " }, "includeOrganizationInfo":{ "shape":"PrimitiveBoolean", @@ -863,9 +863,13 @@ "ListEnrollmentStatusesResponse":{ "type":"structure", "members":{ + "includeMemberAccounts":{ + "shape":"Boolean", + "documentation":"

    The enrollment status of all member accounts in the organization if the account is the management account.

    " + }, "items":{ "shape":"AccountEnrollmentStatuses", - "documentation":"

    The account enrollment statuses.

    " + "documentation":"

    The enrollment status of a specific account ID, including creation and last updated timestamps.

    " }, "nextToken":{ "shape":"String", diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 5d586b960e68..03d9d02dd6ce 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 723dc1ca7a96..a4edd29066ec 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/customization.config b/services/databasemigration/src/main/resources/codegen-resources/customization.config index c746548fecc0..cee5fb159fe9 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/customization.config +++ b/services/databasemigration/src/main/resources/codegen-resources/customization.config @@ -1,19 +1,20 @@ { - "verifiedSimpleMethods": [ - "describeAccountAttributes", - "describeCertificates", - "describeConnections", - "describeEndpointTypes", - "describeEndpoints", - "describeEventCategories", - "describeEventSubscriptions", - "describeEvents", - "describeOrderableReplicationInstances", - "describeReplicationInstances", - "describeReplicationSubnetGroups", - "describeReplicationTasks" - ], - "excludedSimpleMethods": [ - "describeReplicationTaskAssessmentResults" - ] + "verifiedSimpleMethods": [ + "describeAccountAttributes", + "describeCertificates", + "describeConnections", + "describeEndpointTypes", + "describeEndpoints", + "describeEventCategories", + "describeEventSubscriptions", + "describeEvents", + "describeOrderableReplicationInstances", + "describeReplicationInstances", + "describeReplicationSubnetGroups", + "describeReplicationTasks" + ], + "excludedSimpleMethods": [ + "describeReplicationTaskAssessmentResults" + ], + "useSraAuth": true } diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index b5699abab583..3650dd5378c8 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 430a453035f7..cc5d3800a138 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index e80659b43193..b813d12f2e6c 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index b066fa0d02f8..6a4846faeb05 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/customization.config b/services/datasync/src/main/resources/codegen-resources/customization.config index 296e08b3695c..750e78ea924c 100644 --- a/services/datasync/src/main/resources/codegen-resources/customization.config +++ b/services/datasync/src/main/resources/codegen-resources/customization.config @@ -1,9 +1,10 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listAgents", "listLocations", "listTaskExecutions", "listTasks" ], - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/datazone/pom.xml b/services/datazone/pom.xml index 4942ab2b8e9d..509344d0190a 100644 --- a/services/datazone/pom.xml +++ b/services/datazone/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT datazone AWS Java SDK :: Services :: Data Zone diff --git a/services/dax/pom.xml b/services/dax/pom.xml index 5c2bc3aae5d5..c9ac99f09a00 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index a974bcb4ffef..df9b552f4b72 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/detective/src/main/resources/codegen-resources/service-2.json b/services/detective/src/main/resources/codegen-resources/service-2.json index d42856e1462d..3bfa54668f4f 100644 --- a/services/detective/src/main/resources/codegen-resources/service-2.json +++ b/services/detective/src/main/resources/codegen-resources/service-2.json @@ -74,7 +74,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective.

    Before you try to enable Detective, make sure that your account has been enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable Detective, it checks whether your data volume is within the Detective quota. If it exceeds the quota, then you cannot enable Detective.

    The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

    CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

    An account can only be the administrator account for one behavior graph within a Region. If the same account calls CreateGraph with the same administrator account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

    " + "documentation":"

    Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective.

    The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

    CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

    An account can only be the administrator account for one behavior graph within a Region. If the same account calls CreateGraph with the same administrator account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

    " }, "CreateMembers":{ "name":"CreateMembers", @@ -201,7 +201,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Returns the investigation results of an investigation for a behavior graph.

    " + "documentation":"

    Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. GetInvestigation returns the investigation results of an investigation for a behavior graph.

    " }, "GetMembers":{ "name":"GetMembers", @@ -265,7 +265,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Get the indicators from an investigation

    " + "documentation":"

    Gets the indicators from an investigation. You can use the information from the indicators to determine if an IAM user and/or IAM role is involved in an unusual activity that could indicate malicious behavior and its impact.

    " }, "ListInvestigations":{ "name":"ListInvestigations", @@ -282,7 +282,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    List all Investigations.

    " + "documentation":"

    Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. ListInvestigations lists all active Detective investigations.

    " }, "ListInvitations":{ "name":"ListInvitations", @@ -379,7 +379,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    initiate an investigation on an entity in a graph

    " + "documentation":"

    Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. StartInvestigation initiates an investigation on an entity in a behavior graph.

    " }, "StartMonitoringMember":{ "name":"StartMonitoringMember", @@ -462,7 +462,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Update the state of an investigation.

    " + "documentation":"

    Updates the state of an investigation.

    " }, "UpdateOrganizationConfiguration":{ "name":"UpdateOrganizationConfiguration", @@ -787,7 +787,7 @@ }, "EndInclusive":{ "shape":"Timestamp", - "documentation":"

    A timestamp representing the end date of the time period until when data is filtered , including the end date.

    " + "documentation":"

    A timestamp representing the end date of the time period until when data is filtered, including the end date.

    " } }, "documentation":"

    Contains details on the time range used to filter data.

    " @@ -962,7 +962,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "InvestigationId":{ "shape":"InvestigationId", @@ -975,7 +975,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "InvestigationId":{ "shape":"InvestigationId", @@ -983,35 +983,35 @@ }, "EntityArn":{ "shape":"EntityArn", - "documentation":"

    The unique Amazon Resource Name (ARN) of the IAM user and IAM role.

    " + "documentation":"

    The unique Amazon Resource Name (ARN). Detective supports IAM user ARNs and IAM role ARNs.

    " }, "EntityType":{ "shape":"EntityType", - "documentation":"

    Type of entity. For example, Amazon Web Services accounts, such as IAM user and role.

    " + "documentation":"

    Type of entity. For example, Amazon Web Services accounts, such as an IAM user and/or IAM role.

    " }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

    The UTC time stamp of the creation time of the investigation report.

    " + "documentation":"

    The creation time of the investigation report in UTC time stamp format.

    " }, "ScopeStartTime":{ "shape":"Timestamp", - "documentation":"

    The start date and time for the scope time set to generate the investigation report.

    " + "documentation":"

    The start date and time used to set the scope time within which you want to generate the investigation report. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " }, "ScopeEndTime":{ "shape":"Timestamp", - "documentation":"

    The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " + "documentation":"

    The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " }, "Status":{ "shape":"Status", - "documentation":"

    Status based on the completion status of the investigation.

    " + "documentation":"

    The status based on the completion status of the investigation.

    " }, "Severity":{ "shape":"Severity", - "documentation":"

    Severity based on the likelihood and impact of the indicators of compromise discovered in the investigation.

    " + "documentation":"

    The severity assigned is based on the likelihood and impact of the indicators of compromise discovered in the investigation.

    " }, "State":{ "shape":"State", - "documentation":"

    The current state of the investigation. An archived investigation indicates you have completed reviewing the investigation.

    " + "documentation":"

    The current state of the investigation. An archived investigation indicates that you have completed reviewing the investigation.

    " } } }, @@ -1083,7 +1083,7 @@ "members":{ "StartingIpAddress":{ "shape":"IpAddress", - "documentation":"

    IP address where the resource was first used in the impossible travel

    " + "documentation":"

    IP address where the resource was first used in the impossible travel.

    " }, "EndingIpAddress":{ "shape":"IpAddress", @@ -1091,7 +1091,7 @@ }, "StartingLocation":{ "shape":"Location", - "documentation":"

    Location where the resource was first used in the impossible travel

    " + "documentation":"

    Location where the resource was first used in the impossible travel.

    " }, "EndingLocation":{ "shape":"Location", @@ -1109,14 +1109,14 @@ "members":{ "IndicatorType":{ "shape":"IndicatorType", - "documentation":"

    The type of indicator.

    " + "documentation":"

    The type of indicator.

    " }, "IndicatorDetail":{ "shape":"IndicatorDetail", - "documentation":"

    Details about the indicator of compromise.

    " + "documentation":"

    Details about the indicators of compromise that are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident.

    " } }, - "documentation":"

    Investigations triages indicators of compromises such as a finding and surfaces only the most critical and suspicious issues, so you can focus on high-level investigations.

    " + "documentation":"

    Detective investigations triages indicators of compromises such as a finding and surfaces only the most critical and suspicious issues, so you can focus on high-level investigations. An Indicator lets you determine if an Amazon Web Services resource is involved in unusual activity that could indicate malicious behavior and its impact.

    " }, "IndicatorDetail":{ "type":"structure", @@ -1131,7 +1131,7 @@ }, "FlaggedIpAddressDetail":{ "shape":"FlaggedIpAddressDetail", - "documentation":"

    Suspicious IP addresses that are flagged, which indicates critical or severe threats based on threat intelligence by Detective. This indicator is derived from AWS threat intelligence.

    " + "documentation":"

    Suspicious IP addresses that are flagged, which indicates critical or severe threats based on threat intelligence by Detective. This indicator is derived from Amazon Web Services threat intelligence.

    " }, "NewGeolocationDetail":{ "shape":"NewGeolocationDetail", @@ -1154,7 +1154,7 @@ "documentation":"

    Contains details about related finding groups.

    " } }, - "documentation":"

    Details about the indicators of compromise which are used to determine if a resource is involved in a security incident.

    " + "documentation":"

    Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

    " }, "IndicatorType":{ "type":"string", @@ -1203,7 +1203,7 @@ }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

    The UTC time stamp of the creation time of the investigation report.

    " + "documentation":"

    The time stamp of the creation time of the investigation report. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " }, "EntityArn":{ "shape":"EntityArn", @@ -1214,7 +1214,7 @@ "documentation":"

    Type of entity. For example, Amazon Web Services accounts, such as IAM user and role.

    " } }, - "documentation":"

    Details about the investigation related to a potential security event identified by Detective

    " + "documentation":"

    Details about the investigation related to a potential security event identified by Detective.

    " }, "InvestigationDetails":{ "type":"list", @@ -1306,7 +1306,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "InvestigationId":{ "shape":"InvestigationId", @@ -1314,15 +1314,15 @@ }, "IndicatorType":{ "shape":"IndicatorType", - "documentation":"

    See Detective investigations..

    " + "documentation":"

    For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

    " }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

    List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " + "documentation":"

    Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    List the maximum number of indicators in a page.

    " + "documentation":"

    Lists the maximum number of indicators in a page.

    " } } }, @@ -1331,7 +1331,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "InvestigationId":{ "shape":"InvestigationId", @@ -1339,11 +1339,11 @@ }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

    List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " + "documentation":"

    Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " }, "Indicators":{ "shape":"Indicators", - "documentation":"

    Indicators of compromise listed based on severity.

    " + "documentation":"

    Lists the indicators of compromise.

    " } } }, @@ -1353,19 +1353,19 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

    List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " + "documentation":"

    Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    List the maximum number of investigations in a page.

    " + "documentation":"

    Lists the maximum number of investigations in a page.

    " }, "FilterCriteria":{ "shape":"FilterCriteria", - "documentation":"

    Filter the investigation results based on a criteria.

    " + "documentation":"

    Filters the investigation results based on a criteria.

    " }, "SortCriteria":{ "shape":"SortCriteria", @@ -1378,11 +1378,11 @@ "members":{ "InvestigationDetails":{ "shape":"InvestigationDetails", - "documentation":"

    Investigations details lists the summary of uncommon behavior or malicious activity which indicates a compromise.

    " + "documentation":"

    Lists the summary of uncommon behavior or malicious activity which indicates a compromise.

    " }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

    List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + "documentation":"

    Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

    Each pagination token expires after 24 hours.

    " } } }, @@ -1635,7 +1635,7 @@ }, "IsNewForEntireAccount":{ "shape":"IsNewForEntireAccount", - "documentation":"

    Checks if the ASO is for new for the entire account.

    " + "documentation":"

    Checks if the Autonomous System Organization (ASO) is new for the entire account.

    " } }, "documentation":"

    Details new Autonomous System Organizations (ASOs) used either at the resource or account level.

    " @@ -1653,7 +1653,7 @@ }, "IsNewForEntireAccount":{ "shape":"IsNewForEntireAccount", - "documentation":"

    Checks if the gelocation is new for the entire account.

    " + "documentation":"

    Checks if the geolocation is new for the entire account.

    " } }, "documentation":"

    Details new geolocations used either at the resource or account level. For example, lists an observed geolocation that is an infrequent or unused location based on previous user activity.

    " @@ -1698,7 +1698,7 @@ "members":{ "Arn":{ "shape":"EntityArn", - "documentation":"

    The ARN of the related finding.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the related finding.

    " }, "Type":{ "shape":"Type", @@ -1750,7 +1750,7 @@ "documentation":"

    The type of resource that has exceeded the service quota.

    " } }, - "documentation":"

    This request cannot be completed for one of the following reasons.

    • The request would cause the number of member accounts in the behavior graph to exceed the maximum allowed. A behavior graph cannot have more than 1200 member accounts.

    • The request would cause the data rate for the behavior graph to exceed the maximum allowed.

    • Detective is unable to verify the data rate for the member account. This is usually because the member account is not enrolled in Amazon GuardDuty.

    ", + "documentation":"

    This request cannot be completed for one of the following reasons.

    • This request cannot be completed if it would cause the number of member accounts in the behavior graph to exceed the maximum allowed. A behavior graph cannot have more than 1,200 member accounts.

    • This request cannot be completed if the current volume ingested is above the limit of 10 TB per day. Detective will not allow you to add additional member accounts.

    ", "error":{"httpStatusCode":402}, "exception":true }, @@ -1796,7 +1796,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "EntityArn":{ "shape":"EntityArn", @@ -1808,7 +1808,7 @@ }, "ScopeEndTime":{ "shape":"Timestamp", - "documentation":"

    The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " + "documentation":"

    The data and time when the investigation ended. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

    " } } }, @@ -1881,11 +1881,11 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address where the TTP was observed.

    " + "documentation":"

    The IP address where the tactics, techniques, and procedure (TTP) was observed.

    " }, "APIName":{ "shape":"APIName", - "documentation":"

    The name of the API where the TTP was observed.

    " + "documentation":"

    The name of the API where the tactics, techniques, and procedure (TTP) was observed.

    " }, "APISuccessCount":{ "shape":"APISuccessCount", @@ -2061,7 +2061,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

    The ARN of the behavior graph.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the behavior graph.

    " }, "InvestigationId":{ "shape":"InvestigationId", @@ -2116,5 +2116,5 @@ "value":{"shape":"DatasourcePackageUsageInfo"} } }, - "documentation":"

    Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

    The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.

    To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.

    Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.

    The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.

    Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.

    The administrator account for a behavior graph can use the Detective API to do the following:

    • Enable and disable Detective. Enabling Detective creates a new behavior graph.

    • View the list of member accounts in a behavior graph.

    • Add member accounts to a behavior graph.

    • Remove member accounts from a behavior graph.

    • Apply tags to a behavior graph.

    The organization management account can use the Detective API to select the delegated administrator for Detective.

    The Detective administrator account for an organization can use the Detective API to do the following:

    • Perform all of the functions of an administrator account.

    • Determine whether to automatically enable new organization accounts as member accounts in the organization behavior graph.

    An invited member account can use the Detective API to do the following:

    • View the list of behavior graphs that they are invited to.

    • Accept an invitation to contribute to a behavior graph.

    • Decline an invitation to contribute to a behavior graph.

    • Remove their account from a behavior graph.

    All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

    We replaced the term \"master account\" with the term \"administrator account.\" An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.

    " + "documentation":"

    Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

    The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.

    To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.

    Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.

    The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.

    Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.

    The administrator account for a behavior graph can use the Detective API to do the following:

    • Enable and disable Detective. Enabling Detective creates a new behavior graph.

    • View the list of member accounts in a behavior graph.

    • Add member accounts to a behavior graph.

    • Remove member accounts from a behavior graph.

    • Apply tags to a behavior graph.

    The organization management account can use the Detective API to select the delegated administrator for Detective.

    The Detective administrator account for an organization can use the Detective API to do the following:

    • Perform all of the functions of an administrator account.

    • Determine whether to automatically enable new organization accounts as member accounts in the organization behavior graph.

    An invited member account can use the Detective API to do the following:

    • View the list of behavior graphs that they are invited to.

    • Accept an invitation to contribute to a behavior graph.

    • Decline an invitation to contribute to a behavior graph.

    • Remove their account from a behavior graph.

    All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

    We replaced the term \"master account\" with the term \"administrator account\". An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.

    " } diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 5429f88f1415..5caee3c78047 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 9f31659717eb..f66971846c58 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 6fd006dad494..4b30448d08fb 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index 813eaadf8e2c..0e32efd16c5d 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 8d4e6d997bf8..a6a312f1bf4c 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 9ef77cf21c90..e98b428f024a 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 4c8d64543609..a6bb2d03290a 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/docdbelastic/src/main/resources/codegen-resources/customization.config b/services/docdbelastic/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/docdbelastic/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/drs/pom.xml b/services/drs/pom.xml index a90436baf59a..c201d16062c0 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 584e4c60109c..9c9c2f549b52 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index 5907221872ee..c4d7cbde3f46 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -893,7 +893,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    This operation only applies to Version 2019.11.21 (Current) of global tables.

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    ", + "documentation":"

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    This operation only applies to Version 2019.11.21 (Current) of global tables.

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request on the base table nor any replicas. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    ", "endpointdiscovery":{ } }, diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index dc94152c3cb7..f3d15bad65d5 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index cfb0840716be..5817ce42039c 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index ccb14cf838e8..f7f923ec9ec3 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ec2instanceconnect/src/main/resources/codegen-resources/customization.config b/services/ec2instanceconnect/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/ec2instanceconnect/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index de5dbe32e519..107ae7573501 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecr/src/main/resources/codegen-resources/customization.config b/services/ecr/src/main/resources/codegen-resources/customization.config index 8ff796031895..090cacbeec00 100644 --- a/services/ecr/src/main/resources/codegen-resources/customization.config +++ b/services/ecr/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "describeRepositories", "getAuthorizationToken" - ] + ], + "useSraAuth": true } diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index a8db25518653..ffa169d3a977 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecrpublic/src/main/resources/codegen-resources/customization.config b/services/ecrpublic/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/ecrpublic/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 72ad8e1b16fb..eeedee5f9c03 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/customization.config b/services/ecs/src/main/resources/codegen-resources/customization.config index b0f923be73f8..ff9d8d650d5d 100644 --- a/services/ecs/src/main/resources/codegen-resources/customization.config +++ b/services/ecs/src/main/resources/codegen-resources/customization.config @@ -1,19 +1,20 @@ { - "verifiedSimpleMethods": [ - "createCluster", - "listContainerInstances", - "listServices", - "listTasks", - "describeClusters", - "listAccountSettings", - "listClusters", - "listTaskDefinitionFamilies", - "listTaskDefinitions" - ], - "excludedSimpleMethods": [ - "discoverPollEndpoint", - "registerContainerInstance", - "submitContainerStateChange", - "submitTaskStateChange" - ] + "verifiedSimpleMethods": [ + "createCluster", + "listContainerInstances", + "listServices", + "listTasks", + "describeClusters", + "listAccountSettings", + "listClusters", + "listTaskDefinitionFamilies", + "listTaskDefinitions" + ], + "excludedSimpleMethods": [ + "discoverPollEndpoint", + "registerContainerInstance", + "submitContainerStateChange", + "submitTaskStateChange" + ], + "useSraAuth": true } diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index d45ef92ba01e..9e82e520ba38 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -572,7 +572,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Modifies an account setting. Account settings are set on a per-Region basis.

    If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

    When you specify serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    When you specify awsvpcTrunking, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    When you specify containerInsights, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    Modifies an account setting. Account settings are set on a per-Region basis.

    If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

    " }, "PutAccountSettingDefault":{ "name":"PutAccountSettingDefault", @@ -4404,7 +4404,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the ENI limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the default wait time to retire a Fargate task due to required maintenance is affected.

    When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.

    When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to set the wait time to retire a Fargate task to the default. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The resource name for which to modify the account setting.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -4430,7 +4430,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The Amazon ECS resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the wait time to retire a Fargate task is affected.

    The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The Amazon ECS account setting name to modify.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -4833,7 +4833,7 @@ }, "failures":{ "shape":"Failures", - "documentation":"

    Any failures associated with the call.

    " + "documentation":"

    Any failures associated with the call.

    For information about how to address failures, see Service event messages and API failure reasons in the Amazon Elastic Container Service Developer Guide.

    " } } }, diff --git a/services/efs/pom.xml b/services/efs/pom.xml index bac611b1d908..6fea6441864a 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 24aab8e10090..2fc6631f6d2c 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/eks/src/main/resources/codegen-resources/customization.config b/services/eks/src/main/resources/codegen-resources/customization.config index 6a19a2f5c394..a12a922892bf 100644 --- a/services/eks/src/main/resources/codegen-resources/customization.config +++ b/services/eks/src/main/resources/codegen-resources/customization.config @@ -2,5 +2,6 @@ "verifiedSimpleMethods": [ "listClusters" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/eksauth/pom.xml b/services/eksauth/pom.xml index 2a5707d661a2..eae14db4721f 100644 --- a/services/eksauth/pom.xml +++ b/services/eksauth/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT eksauth AWS Java SDK :: Services :: EKS Auth diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index cb69a07b07e0..9690985dcbd3 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 836a6fc61916..8e1788e7007d 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config index f0f2660d2737..2d5e2d16e58a 100644 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,6 @@ // 'Builder' class, which is invalid Java "Builder" : "PlatformBuilder" }, - "shapeModifiers" : { "CreatePlatformVersionResult" : { "modify": [ @@ -43,5 +42,6 @@ "describeEvents", "listAvailableSolutionStacks", "listPlatformVersions" - ] + ], + "useSraAuth": true } diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config.out b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config.out deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 3f1bfb222225..ade11596359e 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticinference/src/main/resources/codegen-resources/customization.config b/services/elasticinference/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/elasticinference/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 52f7e174e971..6d558113ec7e 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancing/src/main/resources/codegen-resources/customization.config b/services/elasticloadbalancing/src/main/resources/codegen-resources/customization.config index 55c6f1f439ea..e95e265c2893 100644 --- a/services/elasticloadbalancing/src/main/resources/codegen-resources/customization.config +++ b/services/elasticloadbalancing/src/main/resources/codegen-resources/customization.config @@ -1,14 +1,15 @@ { - "verifiedSimpleMethods": [ - "describeAccountLimits", - "describeLoadBalancerPolicies", - "describeLoadBalancerPolicyTypes", - "describeLoadBalancers" - ], - "renameShapes": { - "AccessPointNotFoundException": "LoadBalancerNotFoundException", - "DuplicateAccessPointNameException": "DuplicateLoadBalancerNameException", - "TooManyAccessPointsException": "TooManyLoadBalancersException", - "InvalidEndPointException": "InvalidInstanceException" - } + "verifiedSimpleMethods": [ + "describeAccountLimits", + "describeLoadBalancerPolicies", + "describeLoadBalancerPolicyTypes", + "describeLoadBalancers" + ], + "renameShapes": { + "AccessPointNotFoundException": "LoadBalancerNotFoundException", + "DuplicateAccessPointNameException": "DuplicateLoadBalancerNameException", + "TooManyAccessPointsException": "TooManyLoadBalancersException", + "InvalidEndPointException": "InvalidInstanceException" + }, + "useSraAuth": true } diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 192406c6bf2b..e790874e7740 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index c065aab19de3..684e4a24c9a8 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index 5467da0e5197..aa3ab0d15cc9 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/elastictranscoder/src/main/resources/codegen-resources/customization.config b/services/elastictranscoder/src/main/resources/codegen-resources/customization.config index 5ce889c1817a..c180147a6f9f 100644 --- a/services/elastictranscoder/src/main/resources/codegen-resources/customization.config +++ b/services/elastictranscoder/src/main/resources/codegen-resources/customization.config @@ -1,9 +1,10 @@ { - "verifiedSimpleMethods": [ - "listPipelines", - "listPresets" - ], - "deprecatedOperations": [ - "TestRole" - ] + "verifiedSimpleMethods": [ + "listPipelines", + "listPresets" + ], + "deprecatedOperations": [ + "TestRole" + ], + "useSraAuth": true } diff --git a/services/emr/pom.xml b/services/emr/pom.xml index d5b0f41e99b3..d252fdd6d5e4 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/customization.config b/services/emr/src/main/resources/codegen-resources/customization.config index 1e066e00e14d..1019325a72b7 100644 --- a/services/emr/src/main/resources/codegen-resources/customization.config +++ b/services/emr/src/main/resources/codegen-resources/customization.config @@ -1,25 +1,26 @@ { - "verifiedSimpleMethods": [ - "listClusters", - "listSecurityConfigurations" - ], - "shapeModifiers": { - "InvalidRequestException": { - "modify": [ - { - "ErrorCode": { - "emitPropertyName": "emrErrorCode" - } + "verifiedSimpleMethods": [ + "listClusters", + "listSecurityConfigurations" + ], + "shapeModifiers": { + "InvalidRequestException": { + "modify": [ + { + "ErrorCode": { + "emitPropertyName": "emrErrorCode" + } + } + ] } - ] - } - }, - "excludedSimpleMethods": [ - "cancelSteps", - "modifyInstanceGroups", - "describeJobFlows" - ], - "deprecatedOperations": [ - "DescribeJobFlows" - ] + }, + "excludedSimpleMethods": [ + "cancelSteps", + "modifyInstanceGroups", + "describeJobFlows" + ], + "deprecatedOperations": [ + "DescribeJobFlows" + ], + "useSraAuth": true } diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index 4fba715453ad..63b45ccdd1c8 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -652,7 +652,19 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

    SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

    SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

    To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

    For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

    " + "documentation":"

    SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

    SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

    To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

    For more information, see Managing Cluster Termination in the Amazon EMR Management Guide.

    " + }, + "SetUnhealthyNodeReplacement":{ + "name":"SetUnhealthyNodeReplacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetUnhealthyNodeReplacementInput"}, + "errors":[ + {"shape":"InternalServerError"} + ], + "documentation":"

    Specify whether to enable unhealthy node replacement, which lets Amazon EMR gracefully replace core nodes on a cluster if any nodes become unhealthy. For example, a node becomes unhealthy if disk usage is above 90%. If unhealthy node replacement is on and TerminationProtected are off, Amazon EMR immediately terminates the unhealthy core nodes. To use unhealthy node replacement and retain unhealthy core nodes, use to turn on termination protection. In such cases, Amazon EMR adds the unhealthy nodes to a denylist, reducing job interruptions and failures.

    If unhealthy node replacement is on, Amazon EMR notifies YARN and other applications on the cluster to stop scheduling tasks with these nodes, moves the data, and then terminates the nodes.

    For more information, see graceful node replacement in the Amazon EMR Management Guide.

    " }, "SetVisibleToAllUsers":{ "name":"SetVisibleToAllUsers", @@ -1238,6 +1250,10 @@ "shape":"Boolean", "documentation":"

    Indicates whether Amazon EMR will lock the cluster to prevent the Amazon EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

    " }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

    Indicates whether Amazon EMR should gracefully replace Amazon EC2 core instances that have degraded within the cluster.

    " + }, "VisibleToAllUsers":{ "shape":"Boolean", "documentation":"

    Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions, regardless of IAM permissions policies attached to other IAM principals.

    The default value is true if a value is not provided when creating a cluster using the Amazon EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

    " @@ -3359,6 +3375,10 @@ "shape":"Boolean", "documentation":"

    Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.

    " }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

    Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

    " + }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

    Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

    " @@ -3450,6 +3470,10 @@ "shape":"Boolean", "documentation":"

    Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.

    " }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

    Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

    " + }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

    The Hadoop version for the cluster.

    " @@ -4895,6 +4919,23 @@ }, "documentation":"

    The input argument to the TerminationProtection operation.

    " }, + "SetUnhealthyNodeReplacementInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "UnhealthyNodeReplacement" + ], + "members":{ + "JobFlowIds":{ + "shape":"XmlStringList", + "documentation":"

    The list of strings that uniquely identify the clusters for which to turn on unhealthy node replacement. You can get these identifiers by running the RunJobFlow or the DescribeJobFlows operations.

    " + }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

    Indicates whether to turn on or turn off graceful unhealthy node replacement.

    " + } + } + }, "SetVisibleToAllUsersInput":{ "type":"structure", "required":[ diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 5f178784a6af..be5174e0c5e2 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrcontainers/src/main/resources/codegen-resources/customization.config b/services/emrcontainers/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/emrcontainers/src/main/resources/codegen-resources/customization.config +++ b/services/emrcontainers/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index c96bb213a790..bd479a193703 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml index 410dcaf5db29..c249e7ad1072 100644 --- a/services/entityresolution/pom.xml +++ b/services/entityresolution/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT entityresolution AWS Java SDK :: Services :: Entity Resolution diff --git a/services/entityresolution/src/main/resources/codegen-resources/customization.config b/services/entityresolution/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/entityresolution/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 088622d06c01..751eea4dcedb 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/customization.config b/services/eventbridge/src/main/resources/codegen-resources/customization.config index dbe25a828440..836fbeefbb22 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/customization.config +++ b/services/eventbridge/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "enableEndpointAuthSchemeParams": true, - "allowedEndpointAuthSchemeParams": [ - "EndpointId" - ] + "enableEndpointAuthSchemeParams": true, + "allowedEndpointAuthSchemeParams": [ + "EndpointId" + ], + "useSraAuth": true } diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index 5e64243c8eb2..dc96e6ed39b2 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index cbca71a5420f..b2eb9fed40e4 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 58b4f7ee9ed1..10b7ce4939dd 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index cbff1e8783c6..40cf9a00f5da 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/firehose/src/main/resources/codegen-resources/service-2.json b/services/firehose/src/main/resources/codegen-resources/service-2.json index e7117e9376cc..db142d3f0474 100644 --- a/services/firehose/src/main/resources/codegen-resources/service-2.json +++ b/services/firehose/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

    Creates a Kinesis Data Firehose delivery stream.

    By default, you can create up to 50 delivery streams per Amazon Web Services Region.

    This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

    If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

    To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

    A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

    When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about Amazon Redshift as a destination:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    • We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

    " + "documentation":"

    Creates a Firehose delivery stream.

    By default, you can create up to 50 delivery streams per Amazon Web Services Region.

    This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

    If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

    To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

    A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

    When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about Amazon Redshift as a destination:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    • We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

    " }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -41,7 +41,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes a delivery stream and its data.

    To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

    While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

    " + "documentation":"

    Deletes a delivery stream and its data.

    You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

    DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

    Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

    " }, "DescribeDeliveryStream":{ "name":"DescribeDeliveryStream", @@ -96,7 +96,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

    Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

    Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " + "documentation":"

    Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " }, "PutRecordBatch":{ "name":"PutRecordBatch", @@ -113,7 +113,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

    Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    For information about service quota, see Amazon Kinesis Data Firehose Quota.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

    Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

    If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

    If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " + "documentation":"

    Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    For information about service quota, see Amazon Firehose Quota.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

    If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

    If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " }, "StartDeliveryStreamEncryption":{ "name":"StartDeliveryStreamEncryption", @@ -130,7 +130,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

    Enables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption status of a delivery stream, use DescribeDeliveryStream.

    Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

    For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

    If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

    If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

    You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " + "documentation":"

    Enables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption status of a delivery stream, use DescribeDeliveryStream.

    Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

    For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

    If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

    If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

    You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " }, "StopDeliveryStreamEncryption":{ "name":"StopDeliveryStreamEncryption", @@ -146,7 +146,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Disables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption state of a delivery stream, use DescribeDeliveryStream.

    If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " + "documentation":"

    Disables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption state of a delivery stream, use DescribeDeliveryStream.

    If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " }, "TagDeliveryStream":{ "name":"TagDeliveryStream", @@ -194,7 +194,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

    Updates the specified destination of the specified delivery stream.

    Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

    Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

    If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

    Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

    " + "documentation":"

    Updates the specified destination of the specified delivery stream.

    Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

    Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

    If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

    Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

    " } }, "shapes":{ @@ -244,7 +244,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

    " }, "CollectionEndpoint":{ "shape":"AmazonOpenSearchServerlessCollectionEndpoint", @@ -260,11 +260,11 @@ }, "RetryOptions":{ "shape":"AmazonOpenSearchServerlessRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " }, "S3BackupMode":{ "shape":"AmazonOpenSearchServerlessS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

    " }, "S3Configuration":{"shape":"S3DestinationConfiguration"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -312,7 +312,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

    " }, "CollectionEndpoint":{ "shape":"AmazonOpenSearchServerlessCollectionEndpoint", @@ -328,7 +328,7 @@ }, "RetryOptions":{ "shape":"AmazonOpenSearchServerlessRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " }, "S3Update":{"shape":"S3DestinationUpdate"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -352,10 +352,10 @@ "members":{ "DurationInSeconds":{ "shape":"AmazonOpenSearchServerlessRetryDurationInSeconds", - "documentation":"

    After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " + "documentation":"

    After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " } }, - "documentation":"

    Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

    " + "documentation":"

    Configures retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

    " }, "AmazonOpenSearchServerlessS3BackupMode":{ "type":"string", @@ -404,7 +404,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

    " }, "DomainARN":{ "shape":"AmazonopensearchserviceDomainARN", @@ -420,7 +420,7 @@ }, "TypeName":{ "shape":"AmazonopensearchserviceTypeName", - "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

    " + "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

    " }, "IndexRotationPeriod":{ "shape":"AmazonopensearchserviceIndexRotationPeriod", @@ -432,11 +432,11 @@ }, "RetryOptions":{ "shape":"AmazonopensearchserviceRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " }, "S3BackupMode":{ "shape":"AmazonopensearchserviceS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

    " }, "S3Configuration":{"shape":"S3DestinationConfiguration"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -444,7 +444,7 @@ "VpcConfiguration":{"shape":"VpcConfiguration"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    Describes the configuration of a destination in Amazon OpenSearch Service

    " @@ -462,7 +462,7 @@ }, "ClusterEndpoint":{ "shape":"AmazonopensearchserviceClusterEndpoint", - "documentation":"

    The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

    " + "documentation":"

    The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

    " }, "IndexName":{ "shape":"AmazonopensearchserviceIndexName", @@ -494,7 +494,7 @@ "VpcConfigurationDescription":{"shape":"VpcConfigurationDescription"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    The destination description in Amazon OpenSearch Service.

    " @@ -504,7 +504,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

    " }, "DomainARN":{ "shape":"AmazonopensearchserviceDomainARN", @@ -520,7 +520,7 @@ }, "TypeName":{ "shape":"AmazonopensearchserviceTypeName", - "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " + "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " }, "IndexRotationPeriod":{ "shape":"AmazonopensearchserviceIndexRotationPeriod", @@ -532,14 +532,14 @@ }, "RetryOptions":{ "shape":"AmazonopensearchserviceRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

    " }, "S3Update":{"shape":"S3DestinationUpdate"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    Describes an update for a destination in Amazon OpenSearch Service.

    " @@ -576,10 +576,10 @@ "members":{ "DurationInSeconds":{ "shape":"AmazonopensearchserviceRetryDurationInSeconds", - "documentation":"

    After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " + "documentation":"

    After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " } }, - "documentation":"

    Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.

    " + "documentation":"

    Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.

    " }, "AmazonopensearchserviceS3BackupMode":{ "type":"string", @@ -635,7 +635,7 @@ "documentation":"

    Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs, and vice versa.

    " } }, - "documentation":"

    Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " + "documentation":"

    Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " }, "CloudWatchLoggingOptions":{ "type":"structure", @@ -715,7 +715,7 @@ }, "CopyOptions":{ "shape":"CopyOptions", - "documentation":"

    Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows:

    delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

    delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

    delimiter '|' escape - the delimiter should be escaped.

    fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

    JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

    For more examples, see Amazon Redshift COPY command examples.

    " + "documentation":"

    Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:

    delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

    delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

    delimiter '|' escape - the delimiter should be escaped.

    fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

    JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

    For more examples, see Amazon Redshift COPY command examples.

    " } }, "documentation":"

    Describes a COPY command for Amazon Redshift.

    " @@ -799,6 +799,11 @@ } } }, + "CustomTimeZone":{ + "type":"string", + "max":50, + "min":0 + }, "Data":{ "type":"blob", "max":1024000, @@ -813,18 +818,18 @@ }, "InputFormatConfiguration":{ "shape":"InputFormatConfiguration", - "documentation":"

    Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

    " + "documentation":"

    Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

    " }, "OutputFormatConfiguration":{ "shape":"OutputFormatConfiguration", - "documentation":"

    Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

    " + "documentation":"

    Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

    " }, "Enabled":{ "shape":"BooleanObject", "documentation":"

    Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

    " } }, - "documentation":"

    Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

    " + "documentation":"

    Specifies that you want Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Firehose Record Format Conversion.

    " }, "DataTableColumns":{ "type":"string", @@ -855,7 +860,7 @@ }, "AllowForceDelete":{ "shape":"BooleanObject", - "documentation":"

    Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

    The default value is false.

    " + "documentation":"

    Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.

    The default value is false.

    " } } }, @@ -962,11 +967,11 @@ "members":{ "KeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

    If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

    " + "documentation":"

    If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Firehose uses a service-account CMK.

    " }, "KeyType":{ "shape":"KeyType", - "documentation":"

    Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

    When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

    To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

    " + "documentation":"

    Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.

    When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException.

    To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

    " } }, "documentation":"

    Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

    " @@ -1050,7 +1055,7 @@ }, "ExclusiveStartDestinationId":{ "shape":"DestinationId", - "documentation":"

    The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.

    " + "documentation":"

    The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

    " } } }, @@ -1074,14 +1079,14 @@ "members":{ "OpenXJsonSerDe":{ "shape":"OpenXJsonSerDe", - "documentation":"

    The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

    " + "documentation":"

    The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

    " }, "HiveJsonSerDe":{ "shape":"HiveJsonSerDe", - "documentation":"

    The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

    " + "documentation":"

    The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

    " } }, - "documentation":"

    The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

    " + "documentation":"

    The deserializer you want Firehose to use for converting the input data from JSON. Firehose then serializes the data to its final format using the Serializer. Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

    " }, "DestinationDescription":{ "type":"structure", @@ -1146,21 +1151,21 @@ "members":{ "DefaultDocumentIdFormat":{ "shape":"DefaultDocumentIdFormat", - "documentation":"

    When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

    When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

    " + "documentation":"

    When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

    When the NO_DOCUMENT_ID option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

    " } }, - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " }, "DynamicPartitioningConfiguration":{ "type":"structure", "members":{ "RetryOptions":{ "shape":"RetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

    " }, "Enabled":{ "shape":"BooleanObject", - "documentation":"

    Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.

    " + "documentation":"

    Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.

    " } }, "documentation":"

    The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

    " @@ -1205,7 +1210,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -1221,7 +1226,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

    For Elasticsearch 7.x, don't specify a TypeName.

    " + "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

    For Elasticsearch 7.x, don't specify a TypeName.

    " }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -1233,11 +1238,11 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

    " }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

    You can't change this backup mode after you create the delivery stream.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

    You can't change this backup mode after you create the delivery stream.

    " }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -1257,7 +1262,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    Describes the configuration of a destination in Amazon ES.

    " @@ -1271,11 +1276,11 @@ }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

    The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

    " + "documentation":"

    The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

    " }, "ClusterEndpoint":{ "shape":"ElasticsearchClusterEndpoint", - "documentation":"

    The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

    " + "documentation":"

    The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

    " }, "IndexName":{ "shape":"ElasticsearchIndexName", @@ -1319,7 +1324,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    The destination description in Amazon ES.

    " @@ -1329,7 +1334,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -1345,7 +1350,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " + "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -1357,7 +1362,7 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

    " }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -1373,7 +1378,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

    Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

    " + "documentation":"

    Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

    " } }, "documentation":"

    Describes an update for a destination in Amazon ES.

    " @@ -1410,10 +1415,10 @@ "members":{ "DurationInSeconds":{ "shape":"ElasticsearchRetryDurationInSeconds", - "documentation":"

    After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " + "documentation":"

    After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

    " } }, - "documentation":"

    Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.

    " + "documentation":"

    Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.

    " }, "ElasticsearchS3BackupMode":{ "type":"string", @@ -1471,7 +1476,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -1508,6 +1513,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

    The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

    " + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

    Specify a file extension. It will override the default file extension

    " + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

    The time zone you prefer. UTC is the default.

    " } }, "documentation":"

    Describes the configuration of a destination in Amazon S3.

    " @@ -1536,7 +1549,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -1573,6 +1586,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

    The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

    " + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

    Specify a file extension. It will override the default file extension

    " + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

    The time zone you prefer. UTC is the default.

    " } }, "documentation":"

    Describes a destination in Amazon S3.

    " @@ -1594,7 +1615,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -1631,6 +1652,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

    The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

    " + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

    Specify a file extension. It will override the default file extension

    " + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

    The time zone you prefer. UTC is the default.

    " } }, "documentation":"

    Describes an update for a destination in Amazon S3.

    " @@ -1653,6 +1682,12 @@ }, "documentation":"

    Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

    " }, + "FileExtension":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^$|\\.[0-9a-z!\\-_.*'()]+" + }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -1682,10 +1717,10 @@ "members":{ "TimestampFormats":{ "shape":"ListOfNonEmptyStrings", - "documentation":"

    Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

    " + "documentation":"

    Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf by default.

    " } }, - "documentation":"

    The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

    " + "documentation":"

    The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

    " }, "HttpEndpointAccessKey":{ "type":"string", @@ -1720,7 +1755,7 @@ "documentation":"

    Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).

    " } }, - "documentation":"

    Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " + "documentation":"

    Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " }, "HttpEndpointBufferingIntervalInSeconds":{ "type":"integer", @@ -1802,7 +1837,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

    The buffering options that can be used before data is delivered to the specified destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

    " + "documentation":"

    The buffering options that can be used before data is delivered to the specified destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

    " }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1812,15 +1847,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " + "documentation":"

    Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

    Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " + "documentation":"

    Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

    Describes the S3 bucket backup options for the data that Kinesis Data Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " + "documentation":"

    Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " }, "S3Configuration":{"shape":"S3DestinationConfiguration"} }, @@ -1835,7 +1870,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

    Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " + "documentation":"

    Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1845,15 +1880,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " + "documentation":"

    Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

    Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " + "documentation":"

    Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

    Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " + "documentation":"

    Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " }, "S3DestinationDescription":{"shape":"S3DestinationDescription"} }, @@ -1868,7 +1903,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

    Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " + "documentation":"

    Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

    " }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1878,15 +1913,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " + "documentation":"

    Firehose uses this IAM role for all the permissions that the delivery stream needs.

    " }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

    Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " + "documentation":"

    Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

    Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " + "documentation":"

    Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

    " }, "S3Update":{"shape":"S3DestinationUpdate"} }, @@ -1903,7 +1938,7 @@ "members":{ "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"

    Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

    " + "documentation":"

    Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

    " }, "CommonAttributes":{ "shape":"HttpEndpointCommonAttributesList", @@ -1922,10 +1957,10 @@ "members":{ "DurationInSeconds":{ "shape":"HttpEndpointRetryDurationInSeconds", - "documentation":"

    The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt.

    " + "documentation":"

    The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Firehose waits for acknowledgment from the specified destination after each attempt.

    " } }, - "documentation":"

    Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " + "documentation":"

    Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

    " }, "HttpEndpointS3BackupMode":{ "type":"string", @@ -1973,7 +2008,7 @@ "code":{"shape":"ErrorCode"}, "message":{"shape":"ErrorMessage"} }, - "documentation":"

    Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

    ", + "documentation":"

    Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

    ", "exception":true }, "InvalidSourceException":{ @@ -2040,10 +2075,10 @@ }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", - "documentation":"

    Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

    " + "documentation":"

    Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

    " } }, - "documentation":"

    Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

    " + "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose delivery stream.

    " }, "LimitExceededException":{ "type":"structure", @@ -2207,10 +2242,10 @@ }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", - "documentation":"

    Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

    " + "documentation":"

    Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

    " } }, - "documentation":"

    Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.

    " + "documentation":"

    Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.

    " }, "NoEncryptionConfig":{ "type":"string", @@ -2237,18 +2272,18 @@ "members":{ "ConvertDotsInJsonKeysToUnderscores":{ "shape":"BooleanObject", - "documentation":"

    When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

    The default is false.

    " + "documentation":"

    When set to true, specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

    The default is false.

    " }, "CaseInsensitive":{ "shape":"BooleanObject", - "documentation":"

    When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

    " + "documentation":"

    When set to true, which is the default, Firehose converts JSON keys to lowercase before deserializing them.

    " }, "ColumnToJsonKeyMappings":{ "shape":"ColumnToJsonKeyMappings", "documentation":"

    Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to {\"ts\": \"timestamp\"} to map this key to a column named ts.

    " } }, - "documentation":"

    The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

    " + "documentation":"

    The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

    " }, "OrcCompression":{ "type":"string", @@ -2278,7 +2313,7 @@ }, "BlockSizeBytes":{ "shape":"BlockSizeBytes", - "documentation":"

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    " + "documentation":"

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

    " }, "RowIndexStride":{ "shape":"OrcRowIndexStride", @@ -2290,7 +2325,7 @@ }, "PaddingTolerance":{ "shape":"Proportion", - "documentation":"

    A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

    For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

    Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

    " + "documentation":"

    A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

    For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

    Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

    " }, "Compression":{ "shape":"OrcCompression", @@ -2298,7 +2333,7 @@ }, "BloomFilterColumns":{ "shape":"ListOfNonEmptyStringsWithoutWhitespace", - "documentation":"

    The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.

    " + "documentation":"

    The column names for which you want Firehose to create bloom filters. The default is null.

    " }, "BloomFilterFalsePositiveProbability":{ "shape":"Proportion", @@ -2327,7 +2362,7 @@ "documentation":"

    Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.

    " } }, - "documentation":"

    Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

    " + "documentation":"

    Specifies the serializer that you want Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

    " }, "ParquetCompression":{ "type":"string", @@ -2346,7 +2381,7 @@ "members":{ "BlockSizeBytes":{ "shape":"BlockSizeBytes", - "documentation":"

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

    " + "documentation":"

    The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

    " }, "PageSizeBytes":{ "shape":"ParquetPageSizeBytes", @@ -2418,7 +2453,7 @@ "documentation":"

    The processor parameters.

    " } }, - "documentation":"

    Describes a data processor.

    " + "documentation":"

    Describes a data processor.

    If you want to add a new line delimiter between records in objects that are delivered to Amazon S3, choose AppendDelimiterToRecord as a processor type. You don’t have to put a processor parameter when you select AppendDelimiterToRecord.

    " }, "ProcessorList":{ "type":"list", @@ -2458,7 +2493,8 @@ "BufferIntervalInSeconds", "SubRecordType", "Delimiter", - "CompressionFormat" + "CompressionFormat", + "DataMessageExtraction" ] }, "ProcessorParameterValue":{ @@ -2472,6 +2508,7 @@ "enum":[ "RecordDeAggregation", "Decompression", + "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord" @@ -2629,7 +2666,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -2682,7 +2719,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " }, "S3DestinationDescription":{ "shape":"S3DestinationDescription", @@ -2732,7 +2769,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

    " }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -2767,10 +2804,10 @@ "members":{ "DurationInSeconds":{ "shape":"RedshiftRetryDurationInSeconds", - "documentation":"

    The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

    " + "documentation":"

    The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

    " } }, - "documentation":"

    Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.

    " + "documentation":"

    Configures retry behavior in case Firehose is unable to deliver documents to Amazon Redshift.

    " }, "RedshiftS3BackupMode":{ "type":"string", @@ -2811,10 +2848,10 @@ "members":{ "DurationInSeconds":{ "shape":"RetryDurationInSeconds", - "documentation":"

    The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.

    " + "documentation":"

    The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.

    " } }, - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

    " }, "RoleARN":{ "type":"string", @@ -2850,7 +2887,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -2895,7 +2932,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -2933,7 +2970,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " + "documentation":"

    A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

    " }, "BufferingHints":{ "shape":"BufferingHints", @@ -2959,7 +2996,7 @@ "members":{ "RoleARN":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

    The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

    If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

    " + "documentation":"

    The role that Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.

    If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

    " }, "CatalogId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -2979,10 +3016,10 @@ }, "VersionId":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

    Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

    " + "documentation":"

    Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

    " } }, - "documentation":"

    Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

    " + "documentation":"

    Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

    " }, "SecurityGroupIdList":{ "type":"list", @@ -3002,7 +3039,7 @@ "documentation":"

    A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.

    " } }, - "documentation":"

    The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

    " + "documentation":"

    The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

    " }, "ServiceUnavailableException":{ "type":"structure", @@ -3012,7 +3049,7 @@ "documentation":"

    A message that provides information about the error.

    " } }, - "documentation":"

    The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

    ", + "documentation":"

    The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    ", "exception":true, "fault":true }, @@ -3117,7 +3154,7 @@ }, "RetryOptions":{ "shape":"SnowflakeRetryOptions", - "documentation":"

    The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

    " + "documentation":"

    The time period where Firehose will retry sending data to the chosen HTTP endpoint.

    " }, "S3BackupMode":{ "shape":"SnowflakeS3BackupMode", @@ -3178,7 +3215,7 @@ }, "RetryOptions":{ "shape":"SnowflakeRetryOptions", - "documentation":"

    The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

    " + "documentation":"

    The time period where Firehose will retry sending data to the chosen HTTP endpoint.

    " }, "S3BackupMode":{ "shape":"SnowflakeS3BackupMode", @@ -3243,7 +3280,7 @@ }, "RetryOptions":{ "shape":"SnowflakeRetryOptions", - "documentation":"

    Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

    " + "documentation":"

    Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

    " }, "S3BackupMode":{ "shape":"SnowflakeS3BackupMode", @@ -3289,10 +3326,10 @@ "members":{ "DurationInSeconds":{ "shape":"SnowflakeRetryDurationInSeconds", - "documentation":"

    the time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

    " + "documentation":"

    the time period where Firehose will retry sending data to the chosen HTTP endpoint.

    " } }, - "documentation":"

    Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

    " + "documentation":"

    Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

    " }, "SnowflakeRole":{ "type":"string", @@ -3362,7 +3399,7 @@ "documentation":"

    The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.

    " } }, - "documentation":"

    Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

    " + "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose delivery stream.

    " }, "SplunkBufferingHints":{ "type":"structure", @@ -3399,7 +3436,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

    " + "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

    " }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3411,15 +3448,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

    The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " + "documentation":"

    The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

    " }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

    You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

    You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

    " }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -3445,7 +3482,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

    " + "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

    " }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3457,15 +3494,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

    The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " + "documentation":"

    The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

    " }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

    " }, "S3DestinationDescription":{ "shape":"S3DestinationDescription", @@ -3491,7 +3528,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

    " + "documentation":"

    The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

    " }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3503,15 +3540,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

    The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " + "documentation":"

    The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

    " }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

    The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

    " }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

    Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

    You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

    " + "documentation":"

    Specifies how you want Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

    You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

    " }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -3542,10 +3579,10 @@ "members":{ "DurationInSeconds":{ "shape":"SplunkRetryDurationInSeconds", - "documentation":"

    The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.

    " + "documentation":"

    The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.

    " } }, - "documentation":"

    Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

    " + "documentation":"

    Configures retry behavior in case Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

    " }, "SplunkS3BackupMode":{ "type":"string", @@ -3765,15 +3802,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

    The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " + "documentation":"

    The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

    " + "documentation":"

    The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

    " }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

    The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " + "documentation":"

    The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " } }, "documentation":"

    The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless destination.

    " @@ -3789,15 +3826,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

    The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " + "documentation":"

    The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

    " + "documentation":"

    The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

    " }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

    The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " + "documentation":"

    The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " }, "VpcId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -3807,5 +3844,5 @@ "documentation":"

    The details of the VPC of the Amazon ES destination.

    " } }, - "documentation":"Amazon Kinesis Data Firehose API Reference

    Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

    " + "documentation":"Amazon Data Firehose

    Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

    " } diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 2e3f3ca375da..d41b11df17a2 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fis/src/main/resources/codegen-resources/customization.config b/services/fis/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/fis/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/fms/pom.xml b/services/fms/pom.xml index 6852699fd8b6..abfc592a1157 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/customization.config b/services/fms/src/main/resources/codegen-resources/customization.config index f5fe1526e9c4..3c0c5caf5f11 100644 --- a/services/fms/src/main/resources/codegen-resources/customization.config +++ b/services/fms/src/main/resources/codegen-resources/customization.config @@ -1,8 +1,9 @@ { - "excludedSimpleMethods" : [ + "excludedSimpleMethods": [ "getAdminAccount", "getNotificationChannel", "listMemberAccounts", "listPolicies" - ] -} \ No newline at end of file + ], + "useSraAuth": true +} diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index f758e2977163..26daf9b0b8de 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecast/src/main/resources/codegen-resources/customization.config b/services/forecast/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/forecast/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 9f8216037895..ff9352950332 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 91d3e6c1894b..331c699f78c6 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/frauddetector/src/main/resources/codegen-resources/customization.config b/services/frauddetector/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/frauddetector/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/freetier/pom.xml b/services/freetier/pom.xml index 2933ee5abf68..d0a0fc1a589a 100644 --- a/services/freetier/pom.xml +++ b/services/freetier/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT freetier AWS Java SDK :: Services :: Free Tier diff --git a/services/freetier/src/main/resources/codegen-resources/customization.config b/services/freetier/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/freetier/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 155049c9e2c6..4f5b759a6989 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 443b24a9ca37..a70828cdf2d9 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 5a0178285911..4b418f432824 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 25d0f1e02ebf..4c08b53e5623 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 07c946423330..463875af47f0 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/customization.config b/services/glue/src/main/resources/codegen-resources/customization.config index e5d2b586984e..0d85d066f3c5 100644 --- a/services/glue/src/main/resources/codegen-resources/customization.config +++ b/services/glue/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,6 @@ { - "excludedSimpleMethods" : ["*"] + "excludedSimpleMethods": [ + "*" + ], + "useSraAuth": true } diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 1031731e53d3..fcd07422fce8 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/grafana/src/main/resources/codegen-resources/customization.config b/services/grafana/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/grafana/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 488549cbae0b..7bee9d6f8f1a 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index e5fa67c008c5..015e3cc082e0 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/greengrassv2/src/main/resources/codegen-resources/customization.config b/services/greengrassv2/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/greengrassv2/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index a07473e2bed5..9ce6a0dcc7e9 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/groundstation/src/main/resources/codegen-resources/customization.config b/services/groundstation/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/groundstation/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index 7e97960669fb..1a8101353758 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index f4392f7fa2ee..508e0a24169c 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -494,7 +494,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Lists Amazon GuardDuty findings statistics for the specified detector ID.

    " + "documentation":"

    Lists Amazon GuardDuty findings statistics for the specified detector ID.

    There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints.

    " }, "GetIPSet":{ "name":"GetIPSet", @@ -720,7 +720,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Lists Amazon GuardDuty findings for the specified detector ID.

    " + "documentation":"

    Lists GuardDuty findings for the specified detector ID.

    There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints.

    " }, "ListIPSets":{ "name":"ListIPSets", @@ -934,7 +934,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Updates the Amazon GuardDuty detector specified by the detectorId.

    There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints.

    " + "documentation":"

    Updates the GuardDuty detector specified by the detectorId.

    There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints.

    " }, "UpdateFilter":{ "name":"UpdateFilter", @@ -3567,7 +3567,8 @@ "Email":{ "type":"string", "max":64, - "min":1 + "min":1, + "sensitive":true }, "EnableOrganizationAdminAccountRequest":{ "type":"structure", @@ -5686,7 +5687,7 @@ "type":"structure", "members":{ "IpAddressV4":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

    The IPv4 local address of the connection.

    ", "locationName":"ipAddressV4" } @@ -6089,7 +6090,7 @@ "locationName":"privateDnsName" }, "PrivateIpAddress":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

    The private IP address of the EC2 instance.

    ", "locationName":"privateIpAddress" }, @@ -6658,7 +6659,7 @@ "locationName":"privateDnsName" }, "PrivateIpAddress":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

    The private IP address of the EC2 instance.

    ", "locationName":"privateIpAddress" } @@ -6916,7 +6917,7 @@ "locationName":"geoLocation" }, "IpAddressV4":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

    The IPv4 remote address of the connection.

    ", "locationName":"ipAddressV4" }, @@ -7568,6 +7569,10 @@ "type":"list", "member":{"shape":"SecurityGroup"} }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "Service":{ "type":"structure", "members":{ diff --git a/services/health/pom.xml b/services/health/pom.xml index 317ca5f9a64c..b3807c2f226f 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/health/src/main/resources/codegen-resources/customization.config b/services/health/src/main/resources/codegen-resources/customization.config index e7e0e53e4a7b..287a16eb4a72 100644 --- a/services/health/src/main/resources/codegen-resources/customization.config +++ b/services/health/src/main/resources/codegen-resources/customization.config @@ -1,7 +1,8 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "describeEvents", "describeEntityAggregates", "describeEventTypes" - ] -} \ No newline at end of file + ], + "useSraAuth": true +} diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 69b14c52bc03..ff9362b34239 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json index 52c8404b6c90..b06ae2be7c17 100644 --- a/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/healthlake/src/main/resources/codegen-resources/service-2.json b/services/healthlake/src/main/resources/codegen-resources/service-2.json index ce4e920381f3..bfc582a26178 100644 --- a/services/healthlake/src/main/resources/codegen-resources/service-2.json +++ b/services/healthlake/src/main/resources/codegen-resources/service-2.json @@ -546,7 +546,7 @@ "members":{ "ImportJobProperties":{ "shape":"ImportJobProperties", - "documentation":"

    The properties of the Import job request, including the ID, ARN, name, and the status of the job.

    " + "documentation":"

    The properties of the Import job request, including the ID, ARN, name, status of the job, and the progress report of the job.

    " } } }, @@ -613,6 +613,8 @@ "type":"string", "enum":["R4"] }, + "GenericDouble":{"type":"double"}, + "GenericLong":{"type":"long"}, "IamRoleArn":{ "type":"string", "max":2048, @@ -681,6 +683,10 @@ "documentation":"

    The input data configuration that was supplied when the Import job was created.

    " }, "JobOutputDataConfig":{"shape":"OutputDataConfig"}, + "JobProgressReport":{ + "shape":"JobProgressReport", + "documentation":"

    Displays the progress of the import job, including total resources scanned, total resources ingested, and total size of data ingested.

    " + }, "DataAccessRoleArn":{ "shape":"IamRoleArn", "documentation":"

    The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input data.

    " @@ -690,7 +696,7 @@ "documentation":"

    An explanation of any errors that may have occurred during the FHIR import job.

    " } }, - "documentation":"

    Displays the properties of the import job, including the ID, Arn, Name, and the status of the data store.

    " + "documentation":"

    Displays the properties of the import job, including the ID, Arn, Name, the status of the job, and the progress report of the job.

    " }, "ImportJobPropertiesList":{ "type":"list", @@ -728,6 +734,44 @@ "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" }, + "JobProgressReport":{ + "type":"structure", + "members":{ + "TotalNumberOfScannedFiles":{ + "shape":"GenericLong", + "documentation":"

    The number of files scanned from input S3 bucket.

    " + }, + "TotalSizeOfScannedFilesInMB":{ + "shape":"GenericDouble", + "documentation":"

    The size (in MB) of the files scanned from the input S3 bucket.

    " + }, + "TotalNumberOfImportedFiles":{ + "shape":"GenericLong", + "documentation":"

    The number of files imported so far.

    " + }, + "TotalNumberOfResourcesScanned":{ + "shape":"GenericLong", + "documentation":"

    The number of resources scanned from the input S3 bucket.

    " + }, + "TotalNumberOfResourcesImported":{ + "shape":"GenericLong", + "documentation":"

    The number of resources imported so far.

    " + }, + "TotalNumberOfResourcesWithCustomerError":{ + "shape":"GenericLong", + "documentation":"

    The number of resources that failed due to customer error.

    " + }, + "TotalNumberOfFilesReadWithCustomerError":{ + "shape":"GenericLong", + "documentation":"

    The number of files that failed to be read from the input S3 bucket due to customer error.

    " + }, + "Throughput":{ + "shape":"GenericDouble", + "documentation":"

    The throughput (in MB/sec) of the import job.

    " + } + }, + "documentation":"

    The progress report of an import job.

    " + }, "JobStatus":{ "type":"string", "enum":[ @@ -882,7 +926,7 @@ "members":{ "ImportJobPropertiesList":{ "shape":"ImportJobPropertiesList", - "documentation":"

    The properties of a listed FHIR import jobs, including the ID, ARN, name, and the status of the job.

    " + "documentation":"

    The properties of a listed FHIR import jobs, including the ID, ARN, name, the status of the job, and the progress report of the job.

    " }, "NextToken":{ "shape":"NextToken", diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 87d329bc5748..0668e433d711 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/honeycode/src/main/resources/codegen-resources/customization.config b/services/honeycode/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/honeycode/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 84787d6fcb38..cf19e072e982 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/customization.config b/services/iam/src/main/resources/codegen-resources/customization.config index f73e03152d6c..6216df814396 100644 --- a/services/iam/src/main/resources/codegen-resources/customization.config +++ b/services/iam/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true, "verifiedSimpleMethods": [ "createAccessKey", "deleteAccountPasswordPolicy", diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 62a7b39fecee..2d02dd9078f8 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index a9aa39dc90fc..64dd8ee57425 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/customization.config b/services/imagebuilder/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/imagebuilder/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 316018ff2542..7ccb0d92fe49 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector/src/main/resources/codegen-resources/customization.config b/services/inspector/src/main/resources/codegen-resources/customization.config index 9be5e5c24056..1e30b7396300 100644 --- a/services/inspector/src/main/resources/codegen-resources/customization.config +++ b/services/inspector/src/main/resources/codegen-resources/customization.config @@ -1,58 +1,59 @@ { - "verifiedSimpleMethods": [ - "describeCrossAccountAccessRole", - "listAssessmentRuns", - "listAssessmentTargets", - "listAssessmentTemplates", - "listEventSubscriptions", - "listFindings", - "listRulesPackages" - ], - "shapeModifiers": { - "AccessDeniedException": { - "modify": [ - { - "errorCode": { - "emitPropertyName": "inspectorErrorCode" - } + "verifiedSimpleMethods": [ + "describeCrossAccountAccessRole", + "listAssessmentRuns", + "listAssessmentTargets", + "listAssessmentTemplates", + "listEventSubscriptions", + "listFindings", + "listRulesPackages" + ], + "shapeModifiers": { + "AccessDeniedException": { + "modify": [ + { + "errorCode": { + "emitPropertyName": "inspectorErrorCode" + } + } + ] + }, + "InvalidCrossAccountRoleException": { + "modify": [ + { + "errorCode": { + "emitPropertyName": "inspectorErrorCode" + } + } + ] + }, + "InvalidInputException": { + "modify": [ + { + "errorCode": { + "emitPropertyName": "inspectorErrorCode" + } + } + ] + }, + "LimitExceededException": { + "modify": [ + { + "errorCode": { + "emitPropertyName": "inspectorErrorCode" + } + } + ] + }, + "NoSuchEntityException": { + "modify": [ + { + "errorCode": { + "emitPropertyName": "inspectorErrorCode" + } + } + ] } - ] }, - "InvalidCrossAccountRoleException": { - "modify": [ - { - "errorCode": { - "emitPropertyName": "inspectorErrorCode" - } - } - ] - }, - "InvalidInputException": { - "modify": [ - { - "errorCode": { - "emitPropertyName": "inspectorErrorCode" - } - } - ] - }, - "LimitExceededException": { - "modify": [ - { - "errorCode": { - "emitPropertyName": "inspectorErrorCode" - } - } - ] - }, - "NoSuchEntityException": { - "modify": [ - { - "errorCode": { - "emitPropertyName": "inspectorErrorCode" - } - } - ] - } - } + "useSraAuth": true } diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 607752019baf..c145c5a09917 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/inspector2/src/main/resources/codegen-resources/customization.config b/services/inspector2/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/inspector2/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/inspectorscan/pom.xml b/services/inspectorscan/pom.xml index 76511f735d67..406d4b1ae173 100644 --- a/services/inspectorscan/pom.xml +++ b/services/inspectorscan/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT inspectorscan AWS Java SDK :: Services :: Inspector Scan diff --git a/services/inspectorscan/src/main/resources/codegen-resources/customization.config b/services/inspectorscan/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/inspectorscan/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 12cdfe08c790..8eb180859ea7 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json index e9b36550b4d4..a9c688187e2e 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json +++ b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json @@ -843,6 +843,10 @@ "InternetHealth":{ "shape":"InternetHealth", "documentation":"

    The calculated health at a specific location.

    " + }, + "Ipv4Prefixes":{ + "shape":"Ipv4PrefixList", + "documentation":"

    The IPv4 prefixes at the client location that was impacted by the health event.

    " } }, "documentation":"

    Information about a location impacted by a health event in Amazon CloudWatch Internet Monitor.

    Geographic regions are hierarchically categorized into country, subdivision, metro and city geographic granularities. The geographic region is identified based on the IP address used at the client locations.

    " @@ -897,6 +901,10 @@ }, "documentation":"

    Publish internet measurements to an Amazon S3 bucket in addition to CloudWatch Logs.

    " }, + "Ipv4PrefixList":{ + "type":"list", + "member":{"shape":"String"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -1036,7 +1044,7 @@ }, "MinTrafficImpact":{ "shape":"Percentage", - "documentation":"

    The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

    If you don't set a minimum traffic impact threshold, the default value is 0.01%.

    " + "documentation":"

    The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

    If you don't set a minimum traffic impact threshold, the default value is 0.1%.

    " } }, "documentation":"

    A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for a local performance or availability issue, when scores cross a threshold for one or more city-networks.

    Defines the percentages, for performance scores or availability scores, that are the local thresholds for when Amazon CloudWatch Internet Monitor creates a health event. Also defines whether a local threshold is enabled or disabled, and the minimum percentage of overall traffic that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

    If you don't set a local health event threshold, the default value is 60%.

    For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

    " @@ -1356,7 +1364,7 @@ }, "QueryType":{ "shape":"QueryType", - "documentation":"

    The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface:

    • MEASUREMENTS: TBD definition

    • TOP_LOCATIONS: TBD definition

    • TOP_LOCATION_DETAILS: TBD definition

    For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide.

    " + "documentation":"

    The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface:

    • MEASUREMENTS: Provides availability score, performance score, total traffic, and round-trip times, at 5 minute intervals.

    • TOP_LOCATIONS: Provides availability score, performance score, total traffic, and time to first byte (TTFB) information, for the top location and ASN combinations that you're monitoring, by traffic volume.

    • TOP_LOCATION_DETAILS: Provides TTFB for Amazon CloudFront, your current configuration, and the best performing EC2 configuration, at 1 hour intervals.

    For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide.

    " }, "FilterParameters":{ "shape":"FilterParameters", diff --git a/services/iot/pom.xml b/services/iot/pom.xml index cbe5dddfe692..9c8767cca174 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/customization.config b/services/iot/src/main/resources/codegen-resources/customization.config index ac445a5bb430..346279451f9f 100644 --- a/services/iot/src/main/resources/codegen-resources/customization.config +++ b/services/iot/src/main/resources/codegen-resources/customization.config @@ -55,5 +55,6 @@ "union": true } }, - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iot/src/main/resources/codegen-resources/service-2.json b/services/iot/src/main/resources/codegen-resources/service-2.json index 030dc586dc0c..9a6d1ee3b810 100644 --- a/services/iot/src/main/resources/codegen-resources/service-2.json +++ b/services/iot/src/main/resources/codegen-resources/service-2.json @@ -1714,7 +1714,7 @@ {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a unique endpoint specific to the Amazon Web Services account making the call.

    Requires permission to access the DescribeEndpoint action.

    " + "documentation":"

    Returns or creates a unique endpoint specific to the Amazon Web Services account making the call.

    The first time DescribeEndpoint is called, an endpoint is created. All subsequent calls to DescribeEndpoint return the same endpoint.

    Requires permission to access the DescribeEndpoint action.

    " }, "DescribeEventConfigurations":{ "name":"DescribeEventConfigurations", @@ -2382,7 +2382,7 @@ {"shape":"InternalFailureException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    Gets a registration code used to register a CA certificate with IoT.

    Requires permission to access the GetRegistrationCode action.

    " + "documentation":"

    Gets a registration code used to register a CA certificate with IoT.

    IoT will create a registration code as part of this API call if the registration code doesn't exist or has been deleted. If you already have a registration code, this API call will return the same registration code.

    Requires permission to access the GetRegistrationCode action.

    " }, "GetStatistics":{ "name":"GetStatistics", @@ -7011,6 +7011,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

    An object that specifies the TLS configuration for a domain.

    " + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

    The server certificate configuration.

    " } } }, @@ -9486,6 +9490,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

    An object that specifies the TLS configuration for a domain.

    " + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

    The server certificate configuration.

    " } } }, @@ -10910,6 +10918,7 @@ }, "documentation":"

    Parameters used when defining a mitigation action that enable Amazon Web Services IoT Core logging.

    " }, + "EnableOCSPCheck":{"type":"boolean"}, "EnableTopicRuleRequest":{ "type":"structure", "required":["ruleName"], @@ -17621,6 +17630,16 @@ "max":1, "min":0 }, + "ServerCertificateConfig":{ + "type":"structure", + "members":{ + "enableOCSPCheck":{ + "shape":"EnableOCSPCheck", + "documentation":"

    A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not.

    For more information, see Configuring OCSP server-certificate stapling in domain configuration from Amazon Web Services IoT Core Developer Guide.

    " + } + }, + "documentation":"

    The server certificate configuration.

    " + }, "ServerCertificateStatus":{ "type":"string", "enum":[ @@ -19819,6 +19838,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

    An object that specifies the TLS configuration for a domain.

    " + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

    The server certificate configuration.

    " } } }, diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index 91c57d13c565..cb081ffea31d 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config index b947f5dbc959..920374af8241 100644 --- a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config +++ b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "excludedSimpleMethods" : [ + "excludedSimpleMethods": [ "listDevices" - ] + ], + "useSraAuth": true } diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 0ee591ab5b1c..f05f8712ff3d 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index 410a9a883474..d0cb784bb81c 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/customization.config b/services/iotanalytics/src/main/resources/codegen-resources/customization.config index ba5ea64efd7f..b2a63c8d17a4 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/iotanalytics/src/main/resources/codegen-resources/customization.config @@ -1,16 +1,17 @@ { - "verifiedSimpleMethods": [ - "listChannels", - "listDatasets", - "listDatastores", - "listPipelines" - ], - "excludedSimpleMethods": [ - "describeLoggingOptions" - ], - "shapeModifiers": { - "DatastoreStorage": { - "union": true - } - } + "verifiedSimpleMethods": [ + "listChannels", + "listDatasets", + "listDatastores", + "listPipelines" + ], + "excludedSimpleMethods": [ + "describeLoggingOptions" + ], + "shapeModifiers": { + "DatastoreStorage": { + "union": true + } + }, + "useSraAuth": true } diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index bbda0a5df0c6..c631e17abf99 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdataplane/src/main/resources/codegen-resources/customization.config b/services/iotdataplane/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotdataplane/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 6ef581ed7c84..2625949ab829 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config b/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 07af50820b1a..62859b66058d 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/iotevents/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/iotevents/src/main/resources/codegen-resources/endpoint-rule-set.json index 678f9935ed15..62cd33544a10 100644 --- a/services/iotevents/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/iotevents/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/iotevents/src/main/resources/codegen-resources/service-2.json b/services/iotevents/src/main/resources/codegen-resources/service-2.json index 419c705db8c3..ef139def34e4 100644 --- a/services/iotevents/src/main/resources/codegen-resources/service-2.json +++ b/services/iotevents/src/main/resources/codegen-resources/service-2.json @@ -590,7 +590,7 @@ "AlarmModelArn":{"type":"string"}, "AlarmModelDescription":{ "type":"string", - "max":128 + "max":1024 }, "AlarmModelName":{ "type":"string", @@ -1356,7 +1356,7 @@ }, "DetectorModelDescription":{ "type":"string", - "max":128 + "max":1024 }, "DetectorModelName":{ "type":"string", @@ -1738,7 +1738,7 @@ }, "InputDescription":{ "type":"string", - "max":128 + "max":1024 }, "InputIdentifier":{ "type":"structure", diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 016d71b2c81b..f75e584883d0 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/ioteventsdata/src/main/resources/codegen-resources/customization.config b/services/ioteventsdata/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/ioteventsdata/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index 6131e64a1b85..186ca8482a1e 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleethub/src/main/resources/codegen-resources/customization.config b/services/iotfleethub/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotfleethub/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 24e712e1cd9a..ab54a1809db7 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/customization.config b/services/iotfleetwise/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotfleetwise/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index a9dda422cdd7..8af5f4ff90cd 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index 091e00f1c01a..8b31f450fe73 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotroborunner/src/main/resources/codegen-resources/customization.config b/services/iotroborunner/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotroborunner/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index fbb3b9d961b9..9110879ea019 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config b/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index c2ea4ddabb4a..b1d70e9f7054 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index d0e66597c468..f73b9489ee77 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config b/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index bedb333710dc..79fdd7d9ba87 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iottwinmaker/src/main/resources/codegen-resources/customization.config b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/iottwinmaker/src/main/resources/codegen-resources/customization.config +++ b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index bad2a3a6325a..d71b29749cc7 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/iotwireless/src/main/resources/codegen-resources/customization.config b/services/iotwireless/src/main/resources/codegen-resources/customization.config index 21b15d9542cb..29c4244e1e2d 100644 --- a/services/iotwireless/src/main/resources/codegen-resources/customization.config +++ b/services/iotwireless/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "underscoresInNameBehavior": "ALLOW" + "underscoresInNameBehavior": "ALLOW", + "useSraAuth": true } diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 373ed309c2de..45a7a7827346 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivs/src/main/resources/codegen-resources/service-2.json b/services/ivs/src/main/resources/codegen-resources/service-2.json index c1a7604dda70..8787c389406a 100644 --- a/services/ivs/src/main/resources/codegen-resources/service-2.json +++ b/services/ivs/src/main/resources/codegen-resources/service-2.json @@ -798,7 +798,7 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

    " + "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

    " }, "name":{ "shape":"ChannelName", @@ -904,7 +904,7 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

    " + "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

    " }, "name":{ "shape":"ChannelName", @@ -974,7 +974,7 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.) Default: LOW.

    " + "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

    " }, "name":{ "shape":"ChannelName", @@ -2524,7 +2524,7 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

    " + "documentation":"

    Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers.

    " }, "name":{ "shape":"ChannelName", diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 1303baaa94c3..092b9d0ab44a 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 9ad3ba6901ac..05f96ea69af4 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index e7b359991d5d..25a765412463 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 7f3b7c24e37b..eef1932f66e6 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index e6e4b5f53a1d..3d403ee4a7ff 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index 83a4d4189b21..dd81dc8fc503 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/kendraranking/src/main/resources/codegen-resources/customization.config b/services/kendraranking/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/kendraranking/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index bf48b471bd4c..5c6dcefaf6b3 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/keyspaces/src/main/resources/codegen-resources/service-2.json b/services/keyspaces/src/main/resources/codegen-resources/service-2.json index d5d1903d7b03..087dfd7c3ce7 100644 --- a/services/keyspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/keyspaces/src/main/resources/codegen-resources/service-2.json @@ -133,7 +133,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Returns auto scaling related settings of the specified table in JSON format. If the table is a multi-Region table, the Amazon Web Services Region specific auto scaling settings of the table are included.

    Amazon Keyspaces auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic. For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

    " + "documentation":"

    Returns auto scaling related settings of the specified table in JSON format. If the table is a multi-Region table, the Amazon Web Services Region specific auto scaling settings of the table are included.

    Amazon Keyspaces auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic. For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

    GetTableAutoScalingSettings can't be used as an action in an IAM policy.

    To define permissions for GetTableAutoScalingSettings, you must allow the following two actions in the IAM policy statement's Action element:

    • application-autoscaling:DescribeScalableTargets

    • application-autoscaling:DescribeScalingPolicies

    " }, "ListKeyspaces":{ "name":"ListKeyspaces", @@ -306,7 +306,7 @@ "documentation":"

    Amazon Keyspaces supports the target tracking auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.

    " } }, - "documentation":"

    The optional auto scaling settings for a table with provisioned throughput capacity.

    To turn on auto scaling for a table in throughputMode:PROVISIONED, you must specify the following parameters.

    Configure the minimum and maximum units for write and read capacity. The auto scaling policy ensures that capacity never goes below the minimum or above the maximum range.

    • minimumUnits: The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

    • maximumUnits: The maximum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

    • scalingPolicy: Amazon Keyspaces supports the target tracking scaling policy. The auto scaling target is the provisioned read and write capacity of the table.

      • targetTrackingScalingPolicyConfiguration: To define the target tracking policy, you must define the target value.

        • targetValue: The target utilization rate of the table. Amazon Keyspaces auto scaling ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define targetValue as a percentage. A double between 20 and 90. (Required)

        • disableScaleIn: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that capacity for a table can be automatically scaled down on your behalf. (Optional)

        • scaleInCooldown: A cooldown period in seconds between scaling activities that lets the table stabilize before another scale in activity starts. If no value is provided, the default is 0. (Optional)

        • scaleOutCooldown: A cooldown period in seconds between scaling activities that lets the table stabilize before another scale out activity starts. If no value is provided, the default is 0. (Optional)

    For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

    " + "documentation":"

    The optional auto scaling settings for a table with provisioned throughput capacity.

    To turn on auto scaling for a table in throughputMode:PROVISIONED, you must specify the following parameters.

    Configure the minimum and maximum capacity units. The auto scaling policy ensures that capacity never goes below the minimum or above the maximum range.

    • minimumUnits: The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

    • maximumUnits: The maximum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

    • scalingPolicy: Amazon Keyspaces supports the target tracking scaling policy. The auto scaling target is the provisioned capacity of the table.

      • targetTrackingScalingPolicyConfiguration: To define the target tracking policy, you must define the target value.

        • targetValue: The target utilization rate of the table. Amazon Keyspaces auto scaling ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define targetValue as a percentage. A double between 20 and 90. (Required)

        • disableScaleIn: A boolean that specifies if scale-in is disabled or enabled for the table. This parameter is disabled by default. To turn on scale-in, set the boolean value to FALSE. This means that capacity for a table can be automatically scaled down on your behalf. (Optional)

        • scaleInCooldown: A cooldown period in seconds between scaling activities that lets the table stabilize before another scale in activity starts. If no value is provided, the default is 0. (Optional)

        • scaleOutCooldown: A cooldown period in seconds between scaling activities that lets the table stabilize before another scale out activity starts. If no value is provided, the default is 0. (Optional)

    For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

    " }, "AutoScalingSpecification":{ "type":"structure", @@ -320,7 +320,7 @@ "documentation":"

    The auto scaling settings for the table's read capacity.

    " } }, - "documentation":"

    The optional auto scaling settings for read and write capacity of a table in provisioned capacity mode.

    " + "documentation":"

    The optional auto scaling capacity settings for a table in provisioned capacity mode.

    " }, "BooleanObject":{"type":"boolean"}, "CapacitySpecification":{ diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 3619241eac06..5299e0fe7e1a 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index b2fefc8c6d2f..012e319805de 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index dac4063c1f63..c4c8f41f8561 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config index e5484b499613..55339d099194 100644 --- a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listApplications" - ] + ], + "useSraAuth": true } diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 135f00ac76e3..a8df0db1ab41 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config index e0972ef57fee..3b1171a52063 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config @@ -1,9 +1,10 @@ { - "verifiedSimpleMethods": [ - "listStreams" - ], - "excludedSimpleMethods": [ - "listTagsForStream", - "describeStream" - ] + "verifiedSimpleMethods": [ + "listStreams" + ], + "excludedSimpleMethods": [ + "listTagsForStream", + "describeStream" + ], + "useSraAuth": true } diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json index aa6afa6a3d54..597f5c6d956e 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json @@ -1630,7 +1630,7 @@ }, "NextToken":{ "type":"string", - "max":512, + "max":1024, "min":0, "pattern":"[a-zA-Z0-9+/=]*" }, diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 2dd497100a2b..cdc4e190a314 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config index 232ca942ec4a..184f750769fa 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "excludedSimpleMethods" : [ + "excludedSimpleMethods": [ "getHLSStreamingSessionURL" - ] + ], + "useSraAuth": true } diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 3016cacdd9d8..1a12b1a05448 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index bc4f7a78cf16..ca2a1a175b88 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config b/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index 38a1c182b676..62b8fa9a9e2e 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 26cea3de3b00..8d8cf19c1beb 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/kms/src/main/resources/codegen-resources/customization.config b/services/kms/src/main/resources/codegen-resources/customization.config index 08a4b4db7d13..3ebde6fbb1de 100644 --- a/services/kms/src/main/resources/codegen-resources/customization.config +++ b/services/kms/src/main/resources/codegen-resources/customization.config @@ -1,10 +1,11 @@ { - "verifiedSimpleMethods": [ - "createKey", - "generateRandom", - "retireGrant", - "describeCustomKeyStores", - "listAliases", - "listKeys" - ] + "verifiedSimpleMethods": [ + "createKey", + "generateRandom", + "retireGrant", + "describeCustomKeyStores", + "listAliases", + "listKeys" + ], + "useSraAuth": true } diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 35508cd39c99..1aee3d8e05bb 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lakeformation/src/main/resources/codegen-resources/customization.config b/services/lakeformation/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/lakeformation/src/main/resources/codegen-resources/customization.config +++ b/services/lakeformation/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 4602bb44c05d..24ef2ee4e685 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index 0e37c74942cd..abbac4f91e67 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -637,7 +637,7 @@ {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

    For asynchronous function invocation, use Invoke.

    Invokes a function asynchronously.

    ", + "documentation":"

    For asynchronous function invocation, use Invoke.

    Invokes a function asynchronously.

    If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.

    ", "deprecated":true }, "InvokeWithResponseStream":{ @@ -1742,7 +1742,7 @@ "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the event source.

    • Amazon Kinesis – The ARN of the data stream or a stream consumer.

    • Amazon DynamoDB Streams – The ARN of the stream.

    • Amazon Simple Queue Service – The ARN of the queue.

    • Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.

    • Amazon MQ – The ARN of the broker.

    • Amazon DocumentDB – The ARN of the DocumentDB change stream.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the event source.

    • Amazon Kinesis – The ARN of the data stream or a stream consumer.

    • Amazon DynamoDB Streams – The ARN of the stream.

    • Amazon Simple Queue Service – The ARN of the queue.

    • Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).

    • Amazon MQ – The ARN of the broker.

    • Amazon DocumentDB – The ARN of the DocumentDB change stream.

    " }, "FunctionName":{ "shape":"FunctionName", @@ -1778,7 +1778,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

    (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

    " + "documentation":"

    (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

    " }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", @@ -1928,7 +1928,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

    " + "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

    " }, "SnapStart":{ "shape":"SnapStart", @@ -2405,7 +2405,7 @@ "documentation":"

    The size of the function's /tmp directory.

    " } }, - "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

    " + "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

    " }, "EphemeralStorageSize":{ "type":"integer", @@ -2469,7 +2469,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

    (Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

    " + "documentation":"

    (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.

    " }, "Topics":{ "shape":"Topics", @@ -2792,7 +2792,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

    The size of the function’s /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

    " + "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

    " }, "SnapStart":{ "shape":"SnapStartResponse", @@ -3574,7 +3574,7 @@ }, "ClientContext":{ "shape":"String", - "documentation":"

    Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

    ", + "documentation":"

    Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.

    ", "location":"header", "locationName":"X-Amz-Client-Context" }, @@ -4113,7 +4113,7 @@ "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the event source.

    • Amazon Kinesis – The ARN of the data stream or a stream consumer.

    • Amazon DynamoDB Streams – The ARN of the stream.

    • Amazon Simple Queue Service – The ARN of the queue.

    • Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.

    • Amazon MQ – The ARN of the broker.

    • Amazon DocumentDB – The ARN of the DocumentDB change stream.

    ", + "documentation":"

    The Amazon Resource Name (ARN) of the event source.

    • Amazon Kinesis – The ARN of the data stream or a stream consumer.

    • Amazon DynamoDB Streams – The ARN of the stream.

    • Amazon Simple Queue Service – The ARN of the queue.

    • Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).

    • Amazon MQ – The ARN of the broker.

    • Amazon DocumentDB – The ARN of the DocumentDB change stream.

    ", "location":"querystring", "locationName":"EventSourceArn" }, @@ -4311,7 +4311,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

    A runtime identifier. For example, go1.x.

    The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

    ", + "documentation":"

    A runtime identifier. For example, java21.

    The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

    ", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4359,7 +4359,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

    A runtime identifier. For example, go1.x.

    The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

    ", + "documentation":"

    A runtime identifier. For example, java21.

    The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

    ", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4525,11 +4525,11 @@ }, "ApplicationLogLevel":{ "shape":"ApplicationLogLevel", - "documentation":"

    Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level and lower.

    " + "documentation":"

    Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.

    " }, "SystemLogLevel":{ "shape":"SystemLogLevel", - "documentation":"

    Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level and lower.

    " + "documentation":"

    Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.

    " }, "LogGroup":{ "shape":"LogGroup", @@ -4639,7 +4639,7 @@ "members":{ "Destination":{ "shape":"DestinationArn", - "documentation":"

    The Amazon Resource Name (ARN) of the destination resource.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the destination resource.

    To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

    To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

    To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

    " } }, "documentation":"

    A destination for events that failed processing.

    " @@ -5275,6 +5275,7 @@ "dotnetcore2.1", "dotnetcore3.1", "dotnet6", + "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", @@ -5830,7 +5831,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

    (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

    " + "documentation":"

    (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

    " }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", @@ -5990,7 +5991,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

    " + "documentation":"

    The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

    " }, "SnapStart":{ "shape":"SnapStart", diff --git a/services/launchwizard/pom.xml b/services/launchwizard/pom.xml index a933177820e2..86a6c2668f7e 100644 --- a/services/launchwizard/pom.xml +++ b/services/launchwizard/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT launchwizard AWS Java SDK :: Services :: Launch Wizard diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index df985c91c8fd..eb34a677ce80 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 972f4ddc712e..b9017e0268ab 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 78caf0d46036..be1074f9c68f 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index d703dd04093f..990099608c19 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index 2a0721e8a147..ad72cc431c31 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanager/src/main/resources/codegen-resources/customization.config b/services/licensemanager/src/main/resources/codegen-resources/customization.config index cb1971ffe74b..63b74335ecc1 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanager/src/main/resources/codegen-resources/customization.config @@ -1,11 +1,12 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "getServiceSettings", "listLicenseConfigurations" ], - "excludedSimpleMethods" : [ + "excludedSimpleMethods": [ "getServiceSettings", "listLicenseConfigurations", "listResourceInventory" - ] + ], + "useSraAuth": true } diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index d3fab406fbed..afef929a1c84 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config b/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index b9f00e8c1739..3ec63a55b073 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config b/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 08d10d0224df..919a7e98efee 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/customization.config b/services/lightsail/src/main/resources/codegen-resources/customization.config index fbd4a2999851..c8b6f27fd0e0 100644 --- a/services/lightsail/src/main/resources/codegen-resources/customization.config +++ b/services/lightsail/src/main/resources/codegen-resources/customization.config @@ -1,27 +1,28 @@ { - "verifiedSimpleMethods": [ - "downloadDefaultKeyPair", - "isVpcPeered", - "peerVpc", - "unpeerVpc", - "getActiveNames", - "getBlueprints", - "getBundles", - "getCloudFormationStackRecords", - "getDiskSnapshots", - "getDisks", - "getDomains", - "getExportSnapshotRecords", - "getInstanceSnapshots", - "getInstances", - "getKeyPairs", - "getLoadBalancers", - "getOperations", - "getRegions", - "getRelationalDatabaseBlueprints", - "getRelationalDatabaseBundles", - "getRelationalDatabaseSnapshots", - "getRelationalDatabases", - "getStaticIps" - ] + "verifiedSimpleMethods": [ + "downloadDefaultKeyPair", + "isVpcPeered", + "peerVpc", + "unpeerVpc", + "getActiveNames", + "getBlueprints", + "getBundles", + "getCloudFormationStackRecords", + "getDiskSnapshots", + "getDisks", + "getDomains", + "getExportSnapshotRecords", + "getInstanceSnapshots", + "getInstances", + "getKeyPairs", + "getLoadBalancers", + "getOperations", + "getRegions", + "getRelationalDatabaseBlueprints", + "getRelationalDatabaseBundles", + "getRelationalDatabaseSnapshots", + "getRelationalDatabases", + "getStaticIps" + ], + "useSraAuth": true } diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index ea9d9a766f8a..5750921dd44c 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -12356,6 +12356,10 @@ "caCertificateIdentifier":{ "shape":"string", "documentation":"

    Indicates the certificate that needs to be associated with the database.

    " + }, + "relationalDatabaseBlueprintId":{ + "shape":"string", + "documentation":"

    This parameter is used to update the major version of the database. Enter the blueprintId for the major version that you want to update to.

    Use the GetRelationalDatabaseBlueprints action to get a list of available blueprint IDs.

    " } } }, diff --git a/services/location/pom.xml b/services/location/pom.xml index d3798746ea16..9c428f235fcf 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/location/src/main/resources/codegen-resources/customization.config b/services/location/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/location/src/main/resources/codegen-resources/customization.config +++ b/services/location/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 8c0a4a2857ee..88a808d5f5b3 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/customization.config b/services/lookoutequipment/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/lookoutequipment/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json index 00e0ece38711..e1ad53b092fb 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json index 185149e6b3ff..396daf6fc2fe 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json @@ -1076,6 +1076,10 @@ "Status":{ "shape":"InferenceSchedulerStatus", "documentation":"

    Indicates the status of the CreateInferenceScheduler operation.

    " + }, + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

    Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

    If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

    For information about using labels with your models, see Understanding labeling.

    For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

    " } } }, @@ -1236,6 +1240,10 @@ "OffCondition":{ "shape":"OffCondition", "documentation":"

    Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.

    " + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

    The Amazon S3 location where you want Amazon Lookout for Equipment to save the pointwise model diagnostics. You must also specify the RoleArn request parameter.

    " } } }, @@ -1401,7 +1409,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/.+" + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/[0-9a-zA-Z_-]{1,200}\\/.+" }, "DatasetIdentifier":{ "type":"string", @@ -1658,7 +1666,7 @@ }, "IngestedFilesSummary":{ "shape":"IngestedFilesSummary", - "documentation":"

    IngestedFilesSummary associated with the given dataset for the latest successful associated ingestion job id.

    " + "documentation":"

    IngestedFilesSummary associated with the given dataset for the latest successful associated ingestion job id.

    " }, "RoleArn":{ "shape":"IamRoleArn", @@ -2021,6 +2029,14 @@ "RetrainingSchedulerStatus":{ "shape":"RetrainingSchedulerStatus", "documentation":"

    Indicates the status of the retraining scheduler.

    " + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

    Configuration information for the model's pointwise model diagnostics.

    " + }, + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

    Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

    If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

    For information about using labels with your models, see Understanding labeling.

    For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

    " } } }, @@ -2165,6 +2181,18 @@ "AutoPromotionResultReason":{ "shape":"AutoPromotionResultReason", "documentation":"

    Indicates the reason for the AutoPromotionResult. For example, a model might not be promoted if its performance was worse than the active version, if there was an error during training, or if the retraining scheduler was using MANUAL promote mode. The model will be promoted in MANAGED promote mode if the performance is better than the previous model.

    " + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

    The Amazon S3 location where Amazon Lookout for Equipment saves the pointwise model diagnostics for the model version.

    " + }, + "ModelDiagnosticsResultsObject":{ + "shape":"S3Object", + "documentation":"

    The Amazon S3 output prefix for where Lookout for Equipment saves the pointwise model diagnostics for the model version.

    " + }, + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

    Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

    If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

    For information about using labels with your models, see Understanding labeling.

    For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

    " } } }, @@ -2766,7 +2794,7 @@ }, "SensorsWithShortDateRange":{ "shape":"SensorsWithShortDateRange", - "documentation":"

    Parameter that describes the total number of sensors that have a short date range of less than 90 days of data overall.

    " + "documentation":"

    Parameter that describes the total number of sensors that have a short date range of less than 14 days of data overall.

    " } }, "documentation":"

    Entity that comprises aggregated information on sensors having insufficient data.

    " @@ -3109,7 +3137,7 @@ }, "InferenceExecutionSummaries":{ "shape":"InferenceExecutionSummaries", - "documentation":"

    Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

    " + "documentation":"

    Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

    If you don't supply the InferenceSchedulerName request parameter, or if you supply the name of an inference scheduler that doesn't exist, ListInferenceExecutions returns an empty array in InferenceExecutionSummaries.

    " } } }, @@ -3187,7 +3215,7 @@ "members":{ "LabelGroupName":{ "shape":"LabelGroupName", - "documentation":"

    Retruns the name of the label group.

    " + "documentation":"

    Returns the name of the label group.

    " }, "IntervalStartTime":{ "shape":"Timestamp", @@ -3224,7 +3252,7 @@ }, "LabelSummaries":{ "shape":"LabelSummaries", - "documentation":"

    A summary of the items in the label group.

    " + "documentation":"

    A summary of the items in the label group.

    If you don't supply the LabelGroupName request parameter, or if you supply the name of a label group that doesn't exist, ListLabels returns an empty array in LabelSummaries.

    " } } }, @@ -3279,7 +3307,7 @@ }, "ModelVersionSummaries":{ "shape":"ModelVersionSummaries", - "documentation":"

    Provides information on the specified model version, including the created time, model and dataset ARNs, and status.

    " + "documentation":"

    Provides information on the specified model version, including the created time, model and dataset ARNs, and status.

    If you don't supply the ModelName request parameter, or if you supply the name of a model that doesn't exist, ListModelVersions returns an empty array in ModelVersionSummaries.

    " } } }, @@ -3458,6 +3486,36 @@ "min":20, "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+" }, + "ModelDiagnosticsOutputConfiguration":{ + "type":"structure", + "required":["S3OutputConfiguration"], + "members":{ + "S3OutputConfiguration":{ + "shape":"ModelDiagnosticsS3OutputConfiguration", + "documentation":"

    The Amazon S3 location for the pointwise model diagnostics.

    " + }, + "KmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

    The Amazon Web Services Key Management Service (KMS) key identifier to encrypt the pointwise model diagnostics files.

    " + } + }, + "documentation":"

    Output configuration information for the pointwise model diagnostics for an Amazon Lookout for Equipment model.

    " + }, + "ModelDiagnosticsS3OutputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

    The name of the Amazon S3 bucket where the pointwise model diagnostics are located. You must be the owner of the Amazon S3 bucket.

    " + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

    The Amazon S3 prefix for the location of the pointwise model diagnostics. The prefix specifies the folder and evaluation result file name. (bucket).

    When you call CreateModel or UpdateModel, specify the path within the bucket that you want Lookout for Equipment to save the model to. During training, Lookout for Equipment creates the model evaluation model as a compressed JSON file with the name model_diagnostics_results.json.gz.

    When you call DescribeModel or DescribeModelVersion, prefix contains the file path and filename of the model evaluation file.

    " + } + }, + "documentation":"

    The Amazon S3 location for the pointwise model diagnostics for an Amazon Lookout for Equipment model.

    " + }, "ModelMetrics":{ "type":"string", "max":50000, @@ -3476,6 +3534,14 @@ "MANUAL" ] }, + "ModelQuality":{ + "type":"string", + "enum":[ + "QUALITY_THRESHOLD_MET", + "CANNOT_DETERMINE_QUALITY", + "POOR_QUALITY_DETECTED" + ] + }, "ModelStatus":{ "type":"string", "enum":[ @@ -3543,6 +3609,11 @@ "RetrainingSchedulerStatus":{ "shape":"RetrainingSchedulerStatus", "documentation":"

    Indicates the status of the retraining scheduler.

    " + }, + "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"}, + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

    Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

    If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

    For information about using labels with your models, see Understanding labeling.

    For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

    " } }, "documentation":"

    Provides information about the specified machine learning model, including dataset and model names and ARNs, as well as status.

    " @@ -3555,7 +3626,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+\\/.+\\/model-version\\/[0-9]{1,}$" + "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/[0-9a-zA-Z_-]{1,200}\\/.+\\/model-version\\/[0-9]{1,}$" }, "ModelVersionSourceType":{ "type":"string", @@ -3609,6 +3680,10 @@ "SourceType":{ "shape":"ModelVersionSourceType", "documentation":"

    Indicates how this model version was generated.

    " + }, + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

    Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

    If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

    For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

    " } }, "documentation":"

    Contains information about the specific model version.

    " @@ -3888,7 +3963,7 @@ "members":{ "AffectedSensorCount":{ "shape":"Integer", - "documentation":"

    Indicates the number of sensors that have less than 90 days of data.

    " + "documentation":"

    Indicates the number of sensors that have less than 14 days of data.

    " } }, "documentation":"

    Entity that comprises information on sensors that have shorter date range.

    " @@ -4303,6 +4378,10 @@ "RoleArn":{ "shape":"IamRoleArn", "documentation":"

    The ARN of the model to update.

    " + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

    The Amazon S3 location where you want Amazon Lookout for Equipment to save the pointwise model diagnostics for the model. You must also specify the RoleArn request parameter.

    " } } }, diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 29733846b946..f0f52674e7af 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 02120be3f004..3ba0897507c1 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index bc919946ad73..5a55d1301122 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index ff4ce853b932..ad4461ce4e3a 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 0d2d7a1e970f..67c49832b579 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/macie2/src/main/resources/codegen-resources/customization.config b/services/macie2/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/macie2/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index b069a72c01bd..303fd733a637 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml index 2ac0d20e4879..155ad7b7ee2b 100644 --- a/services/managedblockchainquery/pom.xml +++ b/services/managedblockchainquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT managedblockchainquery AWS Java SDK :: Services :: Managed Blockchain Query diff --git a/services/marketplaceagreement/pom.xml b/services/marketplaceagreement/pom.xml index e606a7dc21f1..24e09c4d4ab9 100644 --- a/services/marketplaceagreement/pom.xml +++ b/services/marketplaceagreement/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplaceagreement AWS Java SDK :: Services :: Marketplace Agreement diff --git a/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config b/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index aa71c57dc243..c83bdf404bc4 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json index 7e346fbcd431..b34ecb89f1d0 100644 --- a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

    Returns metadata and content for multiple entities.

    " + "documentation":"

    Returns metadata and content for multiple entities. This is the Batch version of the DescribeEntity API and uses the same IAM permission action as DescribeEntity API.

    " }, "CancelChangeSet":{ "name":"CancelChangeSet", @@ -296,7 +296,7 @@ "documentation":"

    The visibility of the AMI product.

    " } }, - "documentation":"

    Object containing all the filter fields for AMI products. Client can add a maximum of 8 filters in a single ListEntities request.

    " + "documentation":"

    Object containing all the filter fields for AMI products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "AmiProductLastModifiedDateFilter":{ "type":"structure", @@ -685,7 +685,7 @@ "documentation":"

    The visibility of the container product.

    " } }, - "documentation":"

    Object containing all the filter fields for container products. Client can add a maximum of 8 filters in a single ListEntities request.

    " + "documentation":"

    Object containing all the filter fields for container products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "ContainerProductLastModifiedDateFilter":{ "type":"structure", @@ -841,7 +841,7 @@ "documentation":"

    The last date on which the data product was modified.

    " } }, - "documentation":"

    Object containing all the filter fields for data products. Client can add a maximum of 8 filters in a single ListEntities request.

    " + "documentation":"

    Object containing all the filter fields for data products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "DataProductLastModifiedDateFilter":{ "type":"structure", @@ -1015,6 +1015,10 @@ "shape":"ChangeSetName", "documentation":"

    The optional name provided in the StartChangeSet request. If you do not provide a name, one is set by default.

    " }, + "Intent":{ + "shape":"Intent", + "documentation":"

    The optional intent provided in the StartChangeSet request. If you do not provide an intent, APPLY is set by default.

    " + }, "StartTime":{ "shape":"DateTimeISO8601", "documentation":"

    The date and time, in ISO 8601 format (2018-02-27T13:45:22Z), the request started.

    " @@ -1400,6 +1404,13 @@ "min":1, "pattern":"^[\\w\\-@]+$" }, + "Intent":{ + "type":"string", + "enum":[ + "VALIDATE", + "APPLY" + ] + }, "InternalServiceException":{ "type":"structure", "members":{ @@ -1674,7 +1685,7 @@ "documentation":"

    Allows filtering on the LastModifiedDate of an offer.

    " } }, - "documentation":"

    A filter for offers entity.

    " + "documentation":"

    Object containing all the filter fields for offers entity. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "OfferLastModifiedDateFilter":{ "type":"structure", @@ -2069,7 +2080,7 @@ "documentation":"

    Allows filtering on the LastModifiedDate of a ResaleAuthorization.

    " } }, - "documentation":"

    A filter for ResaleAuthorization entity.

    " + "documentation":"

    Object containing all the filter fields for resale authorization entity. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "ResaleAuthorizationLastModifiedDateFilter":{ "type":"structure", @@ -2541,7 +2552,7 @@ "documentation":"

    The last date on which the SaaS product was modified.

    " } }, - "documentation":"

    Object containing all the filter fields for SaaS products. Client can add a maximum of 8 filters in a single ListEntities request.

    " + "documentation":"

    Object containing all the filter fields for SaaS products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

    " }, "SaaSProductLastModifiedDateFilter":{ "type":"structure", @@ -2719,6 +2730,10 @@ "ChangeSetTags":{ "shape":"TagList", "documentation":"

    A list of objects specifying each key name and value for the ChangeSetTags property.

    " + }, + "Intent":{ + "shape":"Intent", + "documentation":"

    The intent related to the request. The default is APPLY. To test your request before applying changes to your entities, use VALIDATE. This feature is currently available for adding versions to single-AMI products. For more information, see Add a new version.

    " } } }, diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 11ac004d0fc1..1fe8109bbf8a 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config index 987b3b240013..06d04ab92414 100644 --- a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "renameShapes" : { - "MarketplaceCommerceAnalyticsException" : "MarketplaceCommerceAnalyticsServiceException" - } + "renameShapes": { + "MarketplaceCommerceAnalyticsException": "MarketplaceCommerceAnalyticsServiceException" + }, + "useSraAuth": true } diff --git a/services/marketplacedeployment/pom.xml b/services/marketplacedeployment/pom.xml index 67e7149c201c..5690ebadb109 100644 --- a/services/marketplacedeployment/pom.xml +++ b/services/marketplacedeployment/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplacedeployment AWS Java SDK :: Services :: Marketplace Deployment diff --git a/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config b/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 553a0c1e0dc6..d948acc4a90c 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index e09b1c2d0e0f..409eaca25b70 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/marketplacemetering/src/main/resources/codegen-resources/customization.config b/services/marketplacemetering/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/marketplacemetering/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacemetering/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index f9f5cb7363b3..da348070553d 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconnect/src/main/resources/codegen-resources/customization.config b/services/mediaconnect/src/main/resources/codegen-resources/customization.config index 1a1907a47709..536539ba6155 100644 --- a/services/mediaconnect/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconnect/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listEntitlements", "listFlows" - ] + ], + "useSraAuth": true } diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index e086022a51ef..14e75c5d91f7 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/customization.config b/services/mediaconvert/src/main/resources/codegen-resources/customization.config index e5d2b586984e..0d85d066f3c5 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconvert/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,6 @@ { - "excludedSimpleMethods" : ["*"] + "excludedSimpleMethods": [ + "*" + ], + "useSraAuth": true } diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index c80188c2e4a1..91fdb0d5ccc0 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/customization.config b/services/medialive/src/main/resources/codegen-resources/customization.config index 2b6d3476b2dd..e12d733fb0a5 100644 --- a/services/medialive/src/main/resources/codegen-resources/customization.config +++ b/services/medialive/src/main/resources/codegen-resources/customization.config @@ -1,14 +1,15 @@ { - "verifiedSimpleMethods": [ - "listChannels", - "listInputSecurityGroups", - "listInputs", - "listOfferings", - "listReservations" - ], - "excludedSimpleMethods": [ - "createChannel", - "createInput", - "createInputSecurityGroup" - ] + "verifiedSimpleMethods": [ + "listChannels", + "listInputSecurityGroups", + "listInputs", + "listOfferings", + "listReservations" + ], + "excludedSimpleMethods": [ + "createChannel", + "createInput", + "createInputSecurityGroup" + ], + "useSraAuth": true } diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index c5d8b4c8502e..2d5301abd8c2 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -3015,6 +3015,56 @@ } ], "documentation": "Update reservation." + }, + "RestartChannelPipelines": { + "name": "RestartChannelPipelines", + "http": { + "method": "POST", + "requestUri": "/prod/channels/{channelId}/restartChannelPipelines", + "responseCode": 200 + }, + "input": { + "shape": "RestartChannelPipelinesRequest" + }, + "output": { + "shape": "RestartChannelPipelinesResponse", + "documentation": "MediaLive successfully initiated the restart request." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Verify that the syntax is correct." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials that you sent." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "NotFoundException", + "documentation": "The channel or pipeline you specified doesn't exist." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Restart pipelines in one channel that is currently running." } }, "shapes": { @@ -4524,7 +4574,7 @@ "Accessibility": { "shape": "AccessibilityType", "locationName": "accessibility", - "documentation": "Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds." + "documentation": "Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds. This signaling is added to HLS output group and MediaPackage output group." }, "CaptionSelectorName": { "shape": "__string", @@ -4734,7 +4784,7 @@ }, "CdiInputResolution": { "type": "string", - "documentation": "Maximum CDI input resolution; SD is 480i and 576i up to 30 frames-per-second (fps), HD is 720p up to 60 fps / 1080i up to 30 fps, FHD is 1080p up to 60 fps, UHD is 2160p up to 60 fps\n", + "documentation": "Maximum CDI input resolution; SD is 480i and 576i up to 30 frames-per-second (fps), HD is 720p up to 60 fps / 1080i up to 30 fps, FHD is 1080p up to 60 fps, UHD is 2160p up to 60 fps", "enum": [ "SD", "HD", @@ -4774,7 +4824,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -4919,7 +4969,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -5119,7 +5169,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.\n", + "documentation": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.", "idempotencyToken": true }, "Reserved": { @@ -5195,7 +5245,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.\n", + "documentation": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.", "idempotencyToken": true }, "Reserved": { @@ -5263,7 +5313,7 @@ "MediaConnectFlows": { "shape": "__listOfMediaConnectFlowRequest", "locationName": "mediaConnectFlows", - "documentation": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n" + "documentation": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues." }, "Name": { "shape": "__string", @@ -5273,7 +5323,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "idempotencyToken": true }, "RoleArn": { @@ -5284,7 +5334,7 @@ "Sources": { "shape": "__listOfInputSourceRequest", "locationName": "sources", - "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." }, "Tags": { "shape": "Tags", @@ -5323,7 +5373,7 @@ "MediaConnectFlows": { "shape": "__listOfMediaConnectFlowRequest", "locationName": "mediaConnectFlows", - "documentation": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n" + "documentation": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues." }, "Name": { "shape": "__string", @@ -5333,7 +5383,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "idempotencyToken": true }, "RoleArn": { @@ -5344,7 +5394,7 @@ "Sources": { "shape": "__listOfInputSourceRequest", "locationName": "sources", - "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." }, "Tags": { "shape": "Tags", @@ -5439,7 +5489,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.\n", + "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.", "idempotencyToken": true }, "Tags": { @@ -5472,7 +5522,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.\n", + "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.", "idempotencyToken": true } }, @@ -5505,7 +5555,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.\n", + "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.", "idempotencyToken": true } }, @@ -5560,7 +5610,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.\n", + "documentation": "Unique request ID. This prevents retries from creating multiple\nresources.", "idempotencyToken": true }, "Tags": { @@ -5605,7 +5655,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "idempotencyToken": true }, "Tags": { @@ -5628,7 +5678,7 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "idempotencyToken": true }, "Tags": { @@ -5716,7 +5766,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -6175,7 +6225,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -6448,7 +6498,7 @@ "InputClass": { "shape": "InputClass", "locationName": "inputClass", - "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.\n" + "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input." }, "InputDevices": { "shape": "__listOfInputDeviceSettings", @@ -6463,7 +6513,7 @@ "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", - "documentation": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs.\n" + "documentation": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs." }, "MediaConnectFlows": { "shape": "__listOfMediaConnectFlow", @@ -9738,7 +9788,7 @@ "InputClass": { "shape": "InputClass", "locationName": "inputClass", - "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.\n" + "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input." }, "InputDevices": { "shape": "__listOfInputDeviceSettings", @@ -9753,7 +9803,7 @@ "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", - "documentation": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs.\n" + "documentation": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs." }, "MediaConnectFlows": { "shape": "__listOfMediaConnectFlow", @@ -9905,7 +9955,7 @@ "Ip": { "shape": "__string", "locationName": "ip", - "documentation": "The system-generated static IP address of endpoint.\nIt remains fixed for the lifetime of the input.\n" + "documentation": "The system-generated static IP address of endpoint.\nIt remains fixed for the lifetime of the input." }, "Port": { "shape": "__string", @@ -9915,7 +9965,7 @@ "Url": { "shape": "__string", "locationName": "url", - "documentation": "This represents the endpoint that the customer stream will be\npushed to.\n" + "documentation": "This represents the endpoint that the customer stream will be\npushed to." }, "Vpc": { "shape": "InputDestinationVpc", @@ -9930,7 +9980,7 @@ "StreamName": { "shape": "__string", "locationName": "streamName", - "documentation": "A unique name for the location the RTMP stream is being pushed\nto.\n" + "documentation": "A unique name for the location the RTMP stream is being pushed\nto." } }, "documentation": "Endpoint settings for a PUSH type input." @@ -9941,12 +9991,12 @@ "AvailabilityZone": { "shape": "__string", "locationName": "availabilityZone", - "documentation": "The availability zone of the Input destination.\n" + "documentation": "The availability zone of the Input destination." }, "NetworkInterfaceId": { "shape": "__string", "locationName": "networkInterfaceId", - "documentation": "The network interface ID of the Input destination in the VPC.\n" + "documentation": "The network interface ID of the Input destination in the VPC." } }, "documentation": "The properties for a VPC type input destination." @@ -10606,7 +10656,7 @@ }, "InputPreference": { "type": "string", - "documentation": "Input preference when deciding which input to make active when a previously failed input has recovered.\nIf \\\"EQUAL_INPUT_PREFERENCE\\\", then the active input will stay active as long as it is healthy.\nIf \\\"PRIMARY_INPUT_PREFERRED\\\", then always switch back to the primary input when it is healthy.\n", + "documentation": "Input preference when deciding which input to make active when a previously failed input has recovered.\nIf \\\"EQUAL_INPUT_PREFERENCE\\\", then the active input will stay active as long as it is healthy.\nIf \\\"PRIMARY_INPUT_PREFERRED\\\", then always switch back to the primary input when it is healthy.", "enum": [ "EQUAL_INPUT_PREFERENCE", "PRIMARY_INPUT_PREFERRED" @@ -10635,7 +10685,7 @@ }, "InputResolution": { "type": "string", - "documentation": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines\n", + "documentation": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines", "enum": [ "SD", "HD", @@ -10776,7 +10826,7 @@ "Url": { "shape": "__string", "locationName": "url", - "documentation": "This represents the customer's source URL where stream is\npulled from.\n" + "documentation": "This represents the customer's source URL where stream is\npulled from." }, "Username": { "shape": "__string", @@ -10805,7 +10855,7 @@ "Url": { "shape": "__string", "locationName": "url", - "documentation": "This represents the customer's source URL where stream is\npulled from.\n" + "documentation": "This represents the customer's source URL where stream is\npulled from." }, "Username": { "shape": "__string", @@ -10817,7 +10867,7 @@ }, "InputSourceType": { "type": "string", - "documentation": "There are two types of input sources, static and dynamic. If an input source is dynamic you can\nchange the source url of the input dynamically using an input switch action. Currently, two input types\nsupport a dynamic url at this time, MP4_FILE and TS_FILE. By default all input sources are static.\n", + "documentation": "There are two types of input sources, static and dynamic. If an input source is dynamic you can\nchange the source url of the input dynamically using an input switch action. Currently, two input types\nsupport a dynamic url at this time, MP4_FILE and TS_FILE. By default all input sources are static.", "enum": [ "STATIC", "DYNAMIC" @@ -10909,15 +10959,15 @@ "SecurityGroupIds": { "shape": "__listOf__string", "locationName": "securityGroupIds", - "documentation": "A list of up to 5 EC2 VPC security group IDs to attach to the Input VPC network interfaces.\nRequires subnetIds. If none are specified then the VPC default security group will be used.\n" + "documentation": "A list of up to 5 EC2 VPC security group IDs to attach to the Input VPC network interfaces.\nRequires subnetIds. If none are specified then the VPC default security group will be used." }, "SubnetIds": { "shape": "__listOf__string", "locationName": "subnetIds", - "documentation": "A list of 2 VPC subnet IDs from the same VPC.\nSubnet IDs must be mapped to two unique availability zones (AZ).\n" + "documentation": "A list of 2 VPC subnet IDs from the same VPC.\nSubnet IDs must be mapped to two unique availability zones (AZ)." } }, - "documentation": "Settings for a private VPC Input.\nWhen this property is specified, the input destination addresses will be created in a VPC rather than with public Internet addresses.\nThis property requires setting the roleArn property on Input creation.\nNot compatible with the inputSecurityGroups property.\n", + "documentation": "Settings for a private VPC Input.\nWhen this property is specified, the input destination addresses will be created in a VPC rather than with public Internet addresses.\nThis property requires setting the roleArn property on Input creation.\nNot compatible with the inputSecurityGroups property.", "required": [ "SubnetIds" ] @@ -11360,13 +11410,13 @@ "shape": "__string", "location": "querystring", "locationName": "channelClass", - "documentation": "Filter by channel class, 'STANDARD' or 'SINGLE_PIPELINE'\n" + "documentation": "Filter by channel class, 'STANDARD' or 'SINGLE_PIPELINE'" }, "ChannelConfiguration": { "shape": "__string", "location": "querystring", "locationName": "channelConfiguration", - "documentation": "Filter to offerings that match the configuration of an existing channel, e.g. '2345678' (a channel ID)\n" + "documentation": "Filter to offerings that match the configuration of an existing channel, e.g. '2345678' (a channel ID)" }, "Codec": { "shape": "__string", @@ -11389,7 +11439,7 @@ "shape": "__string", "location": "querystring", "locationName": "maximumBitrate", - "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'\n" + "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'" }, "MaximumFramerate": { "shape": "__string", @@ -11418,13 +11468,13 @@ "shape": "__string", "location": "querystring", "locationName": "specialFeature", - "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'\n" + "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'" }, "VideoQuality": { "shape": "__string", "location": "querystring", "locationName": "videoQuality", - "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'\n" + "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'" } }, "documentation": "Placeholder documentation for ListOfferingsRequest" @@ -11468,7 +11518,7 @@ "shape": "__string", "location": "querystring", "locationName": "channelClass", - "documentation": "Filter by channel class, 'STANDARD' or 'SINGLE_PIPELINE'\n" + "documentation": "Filter by channel class, 'STANDARD' or 'SINGLE_PIPELINE'" }, "Codec": { "shape": "__string", @@ -11485,7 +11535,7 @@ "shape": "__string", "location": "querystring", "locationName": "maximumBitrate", - "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'\n" + "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'" }, "MaximumFramerate": { "shape": "__string", @@ -11514,13 +11564,13 @@ "shape": "__string", "location": "querystring", "locationName": "specialFeature", - "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'\n" + "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'" }, "VideoQuality": { "shape": "__string", "location": "querystring", "locationName": "videoQuality", - "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'\n" + "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'" } }, "documentation": "Placeholder documentation for ListReservationsRequest" @@ -13639,7 +13689,7 @@ }, "PreferredChannelPipeline": { "type": "string", - "documentation": "Indicates which pipeline is preferred by the multiplex for program ingest.\nIf set to \\\"PIPELINE_0\\\" or \\\"PIPELINE_1\\\" and an unhealthy ingest causes the multiplex to switch to the non-preferred pipeline,\nit will switch back once that ingest is healthy again. If set to \\\"CURRENTLY_ACTIVE\\\",\nit will not switch back to the other pipeline based on it recovering to a healthy state,\nit will only switch if the active pipeline becomes unhealthy.\n", + "documentation": "Indicates which pipeline is preferred by the multiplex for program ingest.\nIf set to \\\"PIPELINE_0\\\" or \\\"PIPELINE_1\\\" and an unhealthy ingest causes the multiplex to switch to the non-preferred pipeline,\nit will switch back once that ingest is healthy again. If set to \\\"CURRENTLY_ACTIVE\\\",\nit will not switch back to the other pipeline based on it recovering to a healthy state,\nit will only switch if the active pipeline becomes unhealthy.", "enum": [ "CURRENTLY_ACTIVE", "PIPELINE_0", @@ -14017,7 +14067,7 @@ }, "ReservationResolution": { "type": "string", - "documentation": "Resolution based on lines of vertical resolution; SD is less than 720 lines, HD is 720 to 1080 lines, FHD is 1080 lines, UHD is greater than 1080 lines\n", + "documentation": "Resolution based on lines of vertical resolution; SD is less than 720 lines, HD is 720 to 1080 lines, FHD is 1080 lines, UHD is greater than 1080 lines", "enum": [ "SD", "HD", @@ -14919,7 +14969,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -15335,7 +15385,7 @@ "Destinations": { "shape": "__listOfOutputDestination", "locationName": "destinations", - "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n" + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." }, "EgressEndpoints": { "shape": "__listOfChannelEgressEndpoint", @@ -16223,7 +16273,7 @@ "MediaConnectFlows": { "shape": "__listOfMediaConnectFlowRequest", "locationName": "mediaConnectFlows", - "documentation": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n" + "documentation": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues." }, "Name": { "shape": "__string", @@ -16238,7 +16288,7 @@ "Sources": { "shape": "__listOfInputSourceRequest", "locationName": "sources", - "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." } }, "documentation": "Placeholder documentation for UpdateInput" @@ -16417,7 +16467,7 @@ "MediaConnectFlows": { "shape": "__listOfMediaConnectFlowRequest", "locationName": "mediaConnectFlows", - "documentation": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n" + "documentation": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues." }, "Name": { "shape": "__string", @@ -16432,7 +16482,7 @@ "Sources": { "shape": "__listOfInputSourceRequest", "locationName": "sources", - "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." } }, "documentation": "A request to update an input.", @@ -16901,20 +16951,20 @@ "PublicAddressAllocationIds": { "shape": "__listOf__string", "locationName": "publicAddressAllocationIds", - "documentation": "List of public address allocation ids to associate with ENIs that will be created in Output VPC.\nMust specify one for SINGLE_PIPELINE, two for STANDARD channels\n" + "documentation": "List of public address allocation ids to associate with ENIs that will be created in Output VPC.\nMust specify one for SINGLE_PIPELINE, two for STANDARD channels" }, "SecurityGroupIds": { "shape": "__listOf__string", "locationName": "securityGroupIds", - "documentation": "A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces.\nIf none are specified then the VPC default security group will be used\n" + "documentation": "A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces.\nIf none are specified then the VPC default security group will be used" }, "SubnetIds": { "shape": "__listOf__string", "locationName": "subnetIds", - "documentation": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ).\n" + "documentation": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ)." } }, - "documentation": "The properties for a private VPC Output\nWhen this property is specified, the output egress addresses will be created in a user specified VPC\n", + "documentation": "The properties for a private VPC Output\nWhen this property is specified, the output egress addresses will be created in a user specified VPC", "required": [ "SubnetIds" ] @@ -16925,25 +16975,25 @@ "AvailabilityZones": { "shape": "__listOf__string", "locationName": "availabilityZones", - "documentation": "The Availability Zones where the vpc subnets are located.\nThe first Availability Zone applies to the first subnet in the list of subnets.\nThe second Availability Zone applies to the second subnet.\n" + "documentation": "The Availability Zones where the vpc subnets are located.\nThe first Availability Zone applies to the first subnet in the list of subnets.\nThe second Availability Zone applies to the second subnet." }, "NetworkInterfaceIds": { "shape": "__listOf__string", "locationName": "networkInterfaceIds", - "documentation": "A list of Elastic Network Interfaces created by MediaLive in the customer's VPC\n" + "documentation": "A list of Elastic Network Interfaces created by MediaLive in the customer's VPC" }, "SecurityGroupIds": { "shape": "__listOf__string", "locationName": "securityGroupIds", - "documentation": "A list of up EC2 VPC security group IDs attached to the Output VPC network interfaces.\n" + "documentation": "A list of up EC2 VPC security group IDs attached to the Output VPC network interfaces." }, "SubnetIds": { "shape": "__listOf__string", "locationName": "subnetIds", - "documentation": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ).\n" + "documentation": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ)." } }, - "documentation": "The properties for a private VPC Output\n" + "documentation": "The properties for a private VPC Output" }, "WavCodingMode": { "type": "string", @@ -17944,6 +17994,140 @@ "shape": "InputDeviceUhdAudioChannelPairConfig" }, "documentation": "Placeholder documentation for __listOfInputDeviceUhdAudioChannelPairConfig" + }, + "ChannelPipelineIdToRestart": { + "type": "string", + "documentation": "Property of RestartChannelPipelinesRequest", + "enum": [ + "PIPELINE_0", + "PIPELINE_1" + ] + }, + "RestartChannelPipelinesRequest": { + "type": "structure", + "members": { + "ChannelId": { + "shape": "__string", + "location": "uri", + "locationName": "channelId", + "documentation": "ID of channel" + }, + "PipelineIds": { + "shape": "__listOfChannelPipelineIdToRestart", + "locationName": "pipelineIds", + "documentation": "An array of pipelines to restart in this channel. Format PIPELINE_0 or PIPELINE_1." + } + }, + "documentation": "Pipelines to restart.", + "required": [ + "ChannelId" + ] + }, + "RestartChannelPipelinesResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The unique arn of the channel." + }, + "CdiInputSpecification": { + "shape": "CdiInputSpecification", + "locationName": "cdiInputSpecification", + "documentation": "Specification of CDI inputs for this channel" + }, + "ChannelClass": { + "shape": "ChannelClass", + "locationName": "channelClass", + "documentation": "The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline." + }, + "Destinations": { + "shape": "__listOfOutputDestination", + "locationName": "destinations", + "documentation": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager." + }, + "EgressEndpoints": { + "shape": "__listOfChannelEgressEndpoint", + "locationName": "egressEndpoints", + "documentation": "The endpoints where outgoing connections initiate from" + }, + "EncoderSettings": { + "shape": "EncoderSettings", + "locationName": "encoderSettings" + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique id of the channel." + }, + "InputAttachments": { + "shape": "__listOfInputAttachment", + "locationName": "inputAttachments", + "documentation": "List of input attachments for channel." + }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification", + "documentation": "Specification of network and file inputs for this channel" + }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, + "Maintenance": { + "shape": "MaintenanceStatus", + "locationName": "maintenance", + "documentation": "Maintenance settings for this channel." + }, + "MaintenanceStatus": { + "shape": "__string", + "locationName": "maintenanceStatus", + "documentation": "The time in milliseconds by when the PVRE restart must occur." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "The name of the channel. (user-mutable)" + }, + "PipelineDetails": { + "shape": "__listOfPipelineDetail", + "locationName": "pipelineDetails", + "documentation": "Runtime details for the pipelines of a running channel." + }, + "PipelinesRunningCount": { + "shape": "__integer", + "locationName": "pipelinesRunningCount", + "documentation": "The number of currently healthy pipelines." + }, + "RoleArn": { + "shape": "__string", + "locationName": "roleArn", + "documentation": "The Amazon Resource Name (ARN) of the role assumed when running the Channel." + }, + "State": { + "shape": "ChannelState", + "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + }, + "Vpc": { + "shape": "VpcOutputSettingsDescription", + "locationName": "vpc", + "documentation": "Settings for VPC output" + } + }, + "documentation": "Placeholder documentation for RestartChannelPipelinesResponse" + }, + "__listOfChannelPipelineIdToRestart": { + "type": "list", + "member": { + "shape": "ChannelPipelineIdToRestart" + }, + "documentation": "Placeholder documentation for __listOfChannelPipelineIdToRestart" } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 53f267fcfc00..4d7504877224 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackage/src/main/resources/codegen-resources/customization.config b/services/mediapackage/src/main/resources/codegen-resources/customization.config index ee587bd61c35..e6ec2246b9fc 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/customization.config +++ b/services/mediapackage/src/main/resources/codegen-resources/customization.config @@ -10,5 +10,6 @@ // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. "__AdTriggersElement": "AdTriggersElement", "__PeriodTriggersElement": "PeriodTriggersElement" - } + }, + "useSraAuth": true } diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index f956abc8894c..dc824270d0f5 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index d43070b2e7e6..a694af0560e0 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 8663e17f0d6d..19585862fc67 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index d6870f78f355..280c729f9571 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 21d0f8237055..4edc5659665c 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 6c2ce5019787..188556baa41f 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -555,6 +555,7 @@ }, "AdBreak":{ "type":"structure", + "required":["OffsetMillis"], "members":{ "AdBreakMetadata":{ "shape":"AdBreakMetadataList", diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index 092904578522..93e818b7e3d4 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index f66b0eff724b..09b0eacbed37 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/memorydb/src/main/resources/codegen-resources/customization.config b/services/memorydb/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/memorydb/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index e57e32dbe097..5745ec66b258 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index ae2868ec1683..a981209a9a35 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhub/src/main/resources/codegen-resources/customization.config b/services/migrationhub/src/main/resources/codegen-resources/customization.config index e5d2b586984e..0d85d066f3c5 100644 --- a/services/migrationhub/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhub/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,6 @@ { - "excludedSimpleMethods" : ["*"] + "excludedSimpleMethods": [ + "*" + ], + "useSraAuth": true } diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 317a7a42d289..1dd8c110b0da 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index fd832b7a4836..2916a9315a9b 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index 832711a9ce92..0d1f11f86182 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index 10e468e65153..bdd11fbf3a4e 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 0502ec2a6bf2..3e32acbb015d 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 10b5970f4b55..3fd476325b93 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index bfd8b6a4acc2..fa8e91cb352f 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 1a18774ef112..364d3e7f4965 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/mwaa/src/main/resources/codegen-resources/customization.config b/services/mwaa/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/mwaa/src/main/resources/codegen-resources/customization.config +++ b/services/mwaa/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 22405ebdee3c..67670084bc14 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/neptune/src/main/resources/codegen-resources/customization.config b/services/neptune/src/main/resources/codegen-resources/customization.config index 71ed6d26cabc..d4adcefb73e5 100644 --- a/services/neptune/src/main/resources/codegen-resources/customization.config +++ b/services/neptune/src/main/resources/codegen-resources/customization.config @@ -39,5 +39,6 @@ "interceptors": [ "software.amazon.awssdk.services.neptune.internal.CopyDbClusterSnapshotPresignInterceptor", "software.amazon.awssdk.services.neptune.internal.CreateDbClusterPresignInterceptor" - ] + ], + "useSraAuth": true } diff --git a/services/neptunedata/pom.xml b/services/neptunedata/pom.xml index 09c26bd6fcc4..7b8ffb351df3 100644 --- a/services/neptunedata/pom.xml +++ b/services/neptunedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT neptunedata AWS Java SDK :: Services :: Neptunedata diff --git a/services/neptunedata/src/main/resources/codegen-resources/customization.config b/services/neptunedata/src/main/resources/codegen-resources/customization.config index 1afec6c42d9f..a271f651f8c6 100644 --- a/services/neptunedata/src/main/resources/codegen-resources/customization.config +++ b/services/neptunedata/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "customErrorCodeFieldName": "code" + "customErrorCodeFieldName": "code", + "useSraAuth": true } diff --git a/services/neptunegraph/pom.xml b/services/neptunegraph/pom.xml index 5d7688f7c0f9..a5f9ad44f7ff 100644 --- a/services/neptunegraph/pom.xml +++ b/services/neptunegraph/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT neptunegraph AWS Java SDK :: Services :: Neptune Graph diff --git a/services/neptunegraph/src/main/resources/codegen-resources/customization.config b/services/neptunegraph/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/neptunegraph/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/neptunegraph/src/main/resources/codegen-resources/service-2.json b/services/neptunegraph/src/main/resources/codegen-resources/service-2.json index 97110e7f3c5e..35740ac3219f 100644 --- a/services/neptunegraph/src/main/resources/codegen-resources/service-2.json +++ b/services/neptunegraph/src/main/resources/codegen-resources/service-2.json @@ -225,7 +225,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Execute an openCypher query. Currently, the SDK does not support parameterized queries. If you want to make a parameterized query call, you can use an HTTP request.

    Non-parametrized queries are not considered for plan caching. You can force plan caching with planCache=enabled. The plan cache will be reused only for the same exact query. Slight variations in the query will not be able to reuse the query plan cache.

    ", + "documentation":"

    Execute an openCypher query. Currently, the SDK does not support parameterized queries. If you want to make a parameterized query call, you can use an HTTP request.

    When invoking this operation in a Neptune Analytics cluster, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query:

    • neptune-graph:ReadDataViaQuery

    • neptune-graph:WriteDataViaQuery

    • neptune-graph:DeleteDataViaQuery

    Non-parametrized queries are not considered for plan caching. You can force plan caching with planCache=enabled. The plan cache will be reused only for the same exact query. Slight variations in the query will not be able to reuse the query plan cache.

    ", "endpoint":{ "hostPrefix":"{graphIdentifier}." }, @@ -353,7 +353,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Retrieves the status of a specified query.

    ", + "documentation":"

    Retrieves the status of a specified query.

    When invoking this operation in a Neptune Analytics cluster, the IAM user or role making the request must have the neptune-graph:GetQueryStatus IAM action attached.

    ", "endpoint":{ "hostPrefix":"{graphIdentifier}." }, @@ -727,7 +727,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable.

    " + "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable.

    " }, "kmsKeyIdentifier":{ "shape":"KmsKeyArn", @@ -793,7 +793,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated.

    " + "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated.

    " }, "vectorSearchConfiguration":{ "shape":"VectorSearchConfiguration", @@ -898,7 +898,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable).

    " + "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable).

    " }, "kmsKeyIdentifier":{ "shape":"KmsKeyArn", @@ -1220,6 +1220,17 @@ } } }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "DocumentValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Document"} + }, "EdgeLabels":{ "type":"list", "member":{"shape":"String"} @@ -1270,6 +1281,10 @@ "shape":"QueryLanguage", "documentation":"

    The query language the query is written in. Currently only openCypher is supported.

    " }, + "parameters":{ + "shape":"DocumentValuedMap", + "documentation":"

    The data parameters the query can use in JSON format. For example: {\"name\": \"john\", \"age\": 20}. (optional)

    " + }, "planCache":{ "shape":"PlanCacheType", "documentation":"

    Query plan cache is a feature that saves the query plan and reuses it on successive executions of the same query. This reduces query latency, and works for both READ and UPDATE queries. The plan cache is an LRU cache with a 5 minute TTL and a capacity of 1000.

    " @@ -2505,7 +2520,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable).

    " + "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable).

    " } } }, @@ -2795,7 +2810,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable.

    " + "documentation":"

    Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable.

    " }, "provisionedMemory":{ "shape":"ProvisionedMemory", diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index aa87ddde86db..63998e21bbdd 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index dab324135f65..91b5f295a019 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/networkmanager/src/main/resources/codegen-resources/customization.config b/services/networkmanager/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/networkmanager/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/networkmonitor/pom.xml b/services/networkmonitor/pom.xml index 4e65734039cc..5a18c7dce3fc 100644 --- a/services/networkmonitor/pom.xml +++ b/services/networkmonitor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT networkmonitor AWS Java SDK :: Services :: Network Monitor diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index 411fd6e81d4c..8f2a34e209aa 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index 4e76d7468507..165c2f7dbfe8 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/oam/src/main/resources/codegen-resources/customization.config b/services/oam/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/oam/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 2642224fceee..7c51e468b248 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/customization.config b/services/omics/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/omics/src/main/resources/codegen-resources/customization.config +++ b/services/omics/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index f63cf71ef985..0437085ac3c0 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearch/src/main/resources/codegen-resources/customization.config b/services/opensearch/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/opensearch/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/opensearch/src/main/resources/codegen-resources/service-2.json b/services/opensearch/src/main/resources/codegen-resources/service-2.json index 91e2e8add013..240da0c77905 100644 --- a/services/opensearch/src/main/resources/codegen-resources/service-2.json +++ b/services/opensearch/src/main/resources/codegen-resources/service-2.json @@ -1983,7 +1983,7 @@ "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

    Whether to enable or disable cold storage on the domain.

    " + "documentation":"

    Whether to enable or disable cold storage on the domain. You must enable UltraWarm storage to enable cold storage.

    " } }, "documentation":"

    Container for the parameters required to enable cold storage for an OpenSearch Service domain. For more information, see Cold storage for Amazon OpenSearch Service.

    " @@ -2546,7 +2546,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    The result of a DescribeDomainAutoTunes request.

    " @@ -2812,7 +2812,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Contains a list of connections matching the filter criteria.

    " @@ -2882,7 +2882,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Contains a list of connections matching the filter criteria.

    " @@ -2952,7 +2952,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for the response returned by the DescribePackages operation.

    " @@ -2986,7 +2986,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " }, "ReservedInstanceOfferings":{ "shape":"ReservedInstanceOfferingList", @@ -3024,7 +3024,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " }, "ReservedInstances":{ "shape":"ReservedInstanceList", @@ -3979,7 +3979,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for response returned by GetPackageVersionHistory operation.

    " @@ -4018,7 +4018,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for the response returned by the GetUpgradeHistory operation.

    " @@ -4373,7 +4373,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    The result of a ListDomainMaintenances request that contains information about the requested actions.

    " @@ -4434,7 +4434,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for the response parameters to the ListDomainsForPackage operation.

    " @@ -4490,7 +4490,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } } }, @@ -4528,7 +4528,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for the response parameters to the ListPackagesForDomain operation.

    " @@ -4566,7 +4566,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } } }, @@ -4620,7 +4620,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } }, "documentation":"

    Container for the parameters for response received from the ListVersions operation.

    " @@ -4656,7 +4656,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } } }, @@ -4691,7 +4691,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } } }, @@ -4719,7 +4719,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " } } }, @@ -4862,7 +4862,7 @@ }, "NextToken":{ "type":"string", - "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

    " + "documentation":"

    When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

    " }, "NodeId":{ "type":"string", @@ -5006,6 +5006,14 @@ "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", + "or1.medium.search", + "or1.large.search", + "or1.xlarge.search", + "or1.2xlarge.search", + "or1.4xlarge.search", + "or1.8xlarge.search", + "or1.12xlarge.search", + "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", @@ -6866,5 +6874,5 @@ ] } }, - "documentation":"

    Use the Amazon OpenSearch Service configuration API to create, configure, and manage OpenSearch Service domains.

    For sample code that uses the configuration API, see the Amazon OpenSearch Service Developer Guide . The guide also contains sample code for sending signed HTTP requests to the OpenSearch APIs. The endpoint for configuration service requests is Region specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported Regions and endpoints, see Amazon Web Services service endpoints.

    " + "documentation":"

    Use the Amazon OpenSearch Service configuration API to create, configure, and manage OpenSearch Service domains. The endpoint for configuration service requests is Region specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported Regions and endpoints, see Amazon Web Services service endpoints.

    " } diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index bf2bc69950d0..64d4e8593878 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 727c60f7ce15..cdc27910db3c 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 347904251425..3378e819024c 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index d3b3c279c2a6..b82c58a1f517 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/customization.config b/services/organizations/src/main/resources/codegen-resources/customization.config index b4fe8b6eb29e..f2a10e299aa2 100644 --- a/services/organizations/src/main/resources/codegen-resources/customization.config +++ b/services/organizations/src/main/resources/codegen-resources/customization.config @@ -1,15 +1,16 @@ { - "verifiedSimpleMethods": [ - "createOrganization", - "deleteOrganization", - "enableAllFeatures", - "leaveOrganization", - "describeOrganization", - "listAccounts", - "listCreateAccountStatus", - "listHandshakesForOrganization", - "listHandshakesForAccount", - "listRoots", - "listAWSServiceAccessForOrganization" - ] + "verifiedSimpleMethods": [ + "createOrganization", + "deleteOrganization", + "enableAllFeatures", + "leaveOrganization", + "describeOrganization", + "listAccounts", + "listCreateAccountStatus", + "listHandshakesForOrganization", + "listHandshakesForAccount", + "listRoots", + "listAWSServiceAccessForOrganization" + ], + "useSraAuth": true } diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 53cb0b41f41e..aa9e0efcc06b 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/osis/src/main/resources/codegen-resources/customization.config b/services/osis/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/osis/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index d4cff8c31375..443a42a40eae 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/outposts/src/main/resources/codegen-resources/customization.config b/services/outposts/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/outposts/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index 41be6420e78b..bebc983d32fc 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index 89739d46d44e..1538539bd21e 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/customization.config b/services/paymentcryptography/src/main/resources/codegen-resources/customization.config index fb50d0a7a6eb..49a9cfc3d510 100644 --- a/services/paymentcryptography/src/main/resources/codegen-resources/customization.config +++ b/services/paymentcryptography/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index 55ae9564f833..b80b61474520 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config b/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/pcaconnectorad/pom.xml b/services/pcaconnectorad/pom.xml index 2118b8dca70d..97cc778145f8 100644 --- a/services/pcaconnectorad/pom.xml +++ b/services/pcaconnectorad/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pcaconnectorad AWS Java SDK :: Services :: Pca Connector Ad diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config b/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index bf6d79b05640..ebbc629df8c5 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalize/src/main/resources/codegen-resources/customization.config b/services/personalize/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/personalize/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 0da816852307..5106a6bd084d 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 755348f609e1..d3aa59c502ab 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 89cab63a9460..ba65f8e45c2c 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index a725b5270880..8a49e33c3d42 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/customization.config b/services/pinpoint/src/main/resources/codegen-resources/customization.config index 4d02e0dff44c..207ab79bd3ba 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/customization.config +++ b/services/pinpoint/src/main/resources/codegen-resources/customization.config @@ -1,8 +1,10 @@ { - "excludedSimpleMethods" : ["*"], - "renameShapes": { - // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. - "__EndpointTypesElement": "EndpointTypesElement" - }, - "underscoresInNameBehavior": "ALLOW" + "excludedSimpleMethods": [ + "*" + ], + "renameShapes": { + "__EndpointTypesElement": "EndpointTypesElement" + }, + "underscoresInNameBehavior": "ALLOW", + "useSraAuth": true } diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 0607f13635ce..ea9b8071e202 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointemail/src/main/resources/codegen-resources/customization.config b/services/pinpointemail/src/main/resources/codegen-resources/customization.config index 6bef416549e5..1ec2187f17f6 100644 --- a/services/pinpointemail/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointemail/src/main/resources/codegen-resources/customization.config @@ -1,13 +1,14 @@ { - "verifiedSimpleMethods": [ - "getAccount", - "getDeliverabilityDashboardOptions" - ], - "excludedSimpleMethods": [ - "getDedicatedIps", - "listConfigurationSets", - "listDedicatedIpPools", - "listDeliverabilityTestReports", - "listEmailIdentities" - ] + "verifiedSimpleMethods": [ + "getAccount", + "getDeliverabilityDashboardOptions" + ], + "excludedSimpleMethods": [ + "getDedicatedIps", + "listConfigurationSets", + "listDedicatedIpPools", + "listDeliverabilityTestReports", + "listEmailIdentities" + ], + "useSraAuth": true } diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 8050a63a5610..271dd0b2b87a 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config index 7bd34e376145..d5a857d46d17 100644 --- a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "excludedSimpleMethods" : [ + "excludedSimpleMethods": [ "listConfigurationSets" - ] + ], + "useSraAuth": true } diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 00cf15a46777..d2f0caeabb58 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 76a47abe5200..029ff33e873c 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 88774ef47737..eb4db105dff1 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index d178237a2e4f..47a5455d2e23 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -1112,7 +1112,8 @@ "Isabelle", "Zayd", "Danielle", - "Gregory" + "Gregory", + "Burcu" ] }, "VoiceList":{ diff --git a/services/pom.xml b/services/pom.xml index be66336a3004..b41b1f5640fe 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT services AWS Java SDK :: Services @@ -397,6 +397,8 @@ neptunegraph networkmonitor supplychain + artifact + chatbot The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index ad380fcef890..754f33af4294 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/pricing/src/main/resources/codegen-resources/endpoint-rule-set.json index a87f285e6f48..309ef25b5844 100644 --- a/services/pricing/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/pricing/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/pricing/src/main/resources/codegen-resources/service-2.json b/services/pricing/src/main/resources/codegen-resources/service-2.json index 61fa4423d60c..4af40a45f39f 100644 --- a/services/pricing/src/main/resources/codegen-resources/service-2.json +++ b/services/pricing/src/main/resources/codegen-resources/service-2.json @@ -27,6 +27,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns the metadata for one service or a list of the metadata for all services. Use this without a service code to get the service codes for all services. Use it with a service code, such as AmazonEC2, to get information specific to that service, such as the attribute names available for that service. For example, some of the attribute names available for EC2 are volumeType, maxIopsVolume, operation, locationType, and instanceCapacity10xlarge.

    " @@ -44,6 +45,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.

    " @@ -60,7 +62,8 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"}, {"shape":"AccessDeniedException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

    This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response.

    " }, @@ -77,6 +80,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns a list of all products that match the filter criteria.

    " @@ -95,6 +99,7 @@ {"shape":"NotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

    This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API.

    " @@ -338,7 +343,8 @@ }, "documentation":"

    An error on the server occurred during the processing of your request. Try again later.

    ", "exception":true, - "fault":true + "fault":true, + "retryable":{"throttling":false} }, "InvalidNextTokenException":{ "type":"structure", @@ -443,7 +449,7 @@ "type":"string", "max":2048, "min":18, - "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9_/.-]{1,1023}" + "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9+_/.-]{1,1023}" }, "PriceListJsonItems":{ "type":"list", @@ -487,6 +493,15 @@ }, "String":{"type":"string"}, "SynthesizedJsonPriceListJsonItem":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

    You've made too many requests exceeding service quotas.

    ", + "exception":true, + "retryable":{"throttling":true} + }, "errorMessage":{"type":"string"} }, "documentation":"

    The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following:

    • Build cost control and scenario planning tools

    • Reconcile billing data

    • Forecast future spend for budgeting purposes

    • Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

    For more information, see Using the Amazon Web Services Price List API in the Billing User Guide.

    " diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index 599d4c257be3..1e45455fa343 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/privatenetworks/src/main/resources/codegen-resources/customization.config b/services/privatenetworks/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/privatenetworks/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/proton/pom.xml b/services/proton/pom.xml index c9a444c9b9b0..3e59a986c727 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/proton/src/main/resources/codegen-resources/customization.config b/services/proton/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/proton/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/qbusiness/pom.xml b/services/qbusiness/pom.xml index c60deba6d4f7..b2375127f538 100644 --- a/services/qbusiness/pom.xml +++ b/services/qbusiness/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT qbusiness AWS Java SDK :: Services :: Q Business diff --git a/services/qbusiness/src/main/resources/codegen-resources/customization.config b/services/qbusiness/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/qbusiness/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/qbusiness/src/main/resources/codegen-resources/service-2.json b/services/qbusiness/src/main/resources/codegen-resources/service-2.json index 87fb91b593ad..84bd023d0924 100644 --- a/services/qbusiness/src/main/resources/codegen-resources/service-2.json +++ b/services/qbusiness/src/main/resources/codegen-resources/service-2.json @@ -131,7 +131,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates an Amazon Q index.

    To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when the index is ready to use.

    Once the index is active, you can index your documents using the BatchPutDocument API or the CreateDataSource API.

    " + "documentation":"

    Creates an Amazon Q index.

    To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when the index is ready to use.

    Once the index is active, you can index your documents using the BatchPutDocument API or the CreateDataSource API.

    " }, "CreatePlugin":{ "name":"CreatePlugin", @@ -1455,7 +1455,7 @@ "documentation":"

    Performs a logical OR operation on all supplied filters.

    " } }, - "documentation":"

    Enables filtering of Amazon Q web experience responses based on document attributes or metadata fields.

    " + "documentation":"

    Enables filtering of responses based on document attributes or metadata fields.

    " }, "AttributeFilters":{ "type":"list", @@ -1623,6 +1623,12 @@ "type":"boolean", "box":true }, + "BoostingDurationInSeconds":{ + "type":"long", + "box":true, + "max":999999999, + "min":0 + }, "ChatSyncInput":{ "type":"structure", "required":[ @@ -2351,6 +2357,21 @@ "type":"list", "member":{"shape":"DataSource"} }, + "DateAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingDurationInSeconds":{ + "shape":"BoostingDurationInSeconds", + "documentation":"

    Specifies the duration, in seconds, of a boost applies to a DATE type document attribute.

    " + }, + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

    Specifies how much a document attribute is boosted.

    " + } + }, + "documentation":"

    Provides information on boosting DATE type document attributes.

    For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

    " + }, "DeleteApplicationRequest":{ "type":"structure", "required":["applicationId"], @@ -2694,6 +2715,45 @@ }, "documentation":"

    A document attribute or metadata field.

    " }, + "DocumentAttributeBoostingConfiguration":{ + "type":"structure", + "members":{ + "dateConfiguration":{ + "shape":"DateAttributeBoostingConfiguration", + "documentation":"

    Provides information on boosting DATE type document attributes.

    " + }, + "numberConfiguration":{ + "shape":"NumberAttributeBoostingConfiguration", + "documentation":"

    Provides information on boosting NUMBER type document attributes.

    " + }, + "stringConfiguration":{ + "shape":"StringAttributeBoostingConfiguration", + "documentation":"

    Provides information on boosting STRING type document attributes.

    " + }, + "stringListConfiguration":{ + "shape":"StringListAttributeBoostingConfiguration", + "documentation":"

    Provides information on boosting STRING_LIST type document attributes.

    " + } + }, + "documentation":"

    Provides information on boosting supported Amazon Q document attribute types. When an end user chat query matches document attributes that have been boosted, Amazon Q prioritizes generating responses from content that matches the boosted document attributes.

    For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

    For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

    ", + "union":true + }, + "DocumentAttributeBoostingLevel":{ + "type":"string", + "enum":[ + "NONE", + "LOW", + "MEDIUM", + "HIGH", + "VERY_HIGH" + ] + }, + "DocumentAttributeBoostingOverrideMap":{ + "type":"map", + "key":{"shape":"DocumentAttributeKey"}, + "value":{"shape":"DocumentAttributeBoostingConfiguration"}, + "min":1 + }, "DocumentAttributeCondition":{ "type":"structure", "required":[ @@ -2711,7 +2771,7 @@ }, "value":{"shape":"DocumentAttributeValue"} }, - "documentation":"

    The condition used for the target document attribute or metadata field when ingesting documents into Amazon Q. You use this with DocumentAttributeTarget to apply the condition.

    For example, you can create the 'Department' target field and have it prefill department names associated with the documents based on information in the 'Source_URI' field. Set the condition that if the 'Source_URI' field contains 'financial' in its URI value, then prefill the target field 'Department' with the target value 'Finance' for the document.

    Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget. Amazon Q then will map your newly created metadata field to your index field.

    " + "documentation":"

    The condition used for the target document attribute or metadata field when ingesting documents into Amazon Q. You use this with DocumentAttributeTarget to apply the condition.

    For example, you can create the 'Department' target field and have it prefill department names associated with the documents based on information in the 'Source_URI' field. Set the condition that if the 'Source_URI' field contains 'financial' in its URI value, then prefill the target field 'Department' with the target value 'Finance' for the document.

    Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget. Amazon Q then will map your newly created metadata field to your index field.

    " }, "DocumentAttributeConfiguration":{ "type":"structure", @@ -2761,7 +2821,7 @@ }, "value":{"shape":"DocumentAttributeValue"} }, - "documentation":"

    The target document attribute or metadata field you want to alter when ingesting documents into Amazon Q.

    For example, you can delete all customer identification numbers associated with the documents, stored in the document metadata field called 'Customer_ID' by setting the target key as 'Customer_ID' and the deletion flag to TRUE. This removes all customer ID values in the field 'Customer_ID'. This would scrub personally identifiable information from each document's metadata.

    Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget . Amazon Q will then map your newly created document attribute to your index field.

    You can also use this with DocumentAttributeCondition .

    " + "documentation":"

    The target document attribute or metadata field you want to alter when ingesting documents into Amazon Q.

    For example, you can delete all customer identification numbers associated with the documents, stored in the document metadata field called 'Customer_ID' by setting the target key as 'Customer_ID' and the deletion flag to TRUE. This removes all customer ID values in the field 'Customer_ID'. This would scrub personally identifiable information from each document's metadata.

    Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget . Amazon Q will then map your newly created document attribute to your index field.

    You can also use this with DocumentAttributeCondition .

    " }, "DocumentAttributeValue":{ "type":"structure", @@ -3639,7 +3699,7 @@ "documentation":"

    Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions.

    " } }, - "documentation":"

    Provides the configuration information for invoking a Lambda function in Lambda to alter document metadata and content when ingesting documents into Amazon Q.

    You can configure your Lambda function using PreExtractionHookConfiguration if you want to apply advanced alterations on the original or raw documents.

    If you want to apply advanced alterations on the Amazon Q structured documents, you must configure your Lambda function using PostExtractionHookConfiguration.

    You can only invoke one Lambda function. However, this function can invoke other functions it requires.

    For more information, see Custom document enrichment.

    " + "documentation":"

    Provides the configuration information for invoking a Lambda function in Lambda to alter document metadata and content when ingesting documents into Amazon Q.

    You can configure your Lambda function using PreExtractionHookConfiguration if you want to apply advanced alterations on the original or raw documents.

    If you want to apply advanced alterations on the Amazon Q structured documents, you must configure your Lambda function using PostExtractionHookConfiguration.

    You can only invoke one Lambda function. However, this function can invoke other functions it requires.

    For more information, see Custom document enrichment.

    " }, "Index":{ "type":"structure", @@ -3744,7 +3804,7 @@ }, "target":{"shape":"DocumentAttributeTarget"} }, - "documentation":"

    Provides the configuration information for applying basic logic to alter document metadata and content when ingesting documents into Amazon Q.

    To apply advanced logic, to go beyond what you can do with basic logic, see HookConfiguration .

    For more information, see Custom document enrichment.

    " + "documentation":"

    Provides the configuration information for applying basic logic to alter document metadata and content when ingesting documents into Amazon Q.

    To apply advanced logic, to go beyond what you can do with basic logic, see HookConfiguration .

    For more information, see Custom document enrichment.

    " }, "InlineDocumentEnrichmentConfigurations":{ "type":"list", @@ -4567,7 +4627,11 @@ "FACTUALLY_CORRECT", "COMPLETE", "RELEVANT_SOURCES", - "HELPFUL" + "HELPFUL", + "NOT_BASED_ON_DOCUMENTS", + "NOT_COMPLETE", + "NOT_CONCISE", + "OTHER" ] }, "Messages":{ @@ -4582,6 +4646,10 @@ "type":"structure", "required":["indexId"], "members":{ + "boostingOverride":{ + "shape":"DocumentAttributeBoostingOverrideMap", + "documentation":"

    Overrides the default boosts applied by Amazon Q to supported document attribute data types.

    " + }, "indexId":{ "shape":"IndexId", "documentation":"

    The identifier for the Amazon Q index.

    " @@ -4594,6 +4662,28 @@ "max":800, "min":1 }, + "NumberAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

    Specifies the duration, in seconds, of a boost applies to a NUMBER type document attribute.

    " + }, + "boostingType":{ + "shape":"NumberAttributeBoostingType", + "documentation":"

    Specifies how much a document attribute is boosted.

    " + } + }, + "documentation":"

    Provides information on boosting NUMBER type document attributes.

    For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

    " + }, + "NumberAttributeBoostingType":{ + "type":"string", + "enum":[ + "PRIORITIZE_LARGER_VALUES", + "PRIORITIZE_SMALLER_VALUES" + ] + }, "OAuth2ClientCredentialConfiguration":{ "type":"structure", "required":[ @@ -4985,7 +5075,7 @@ }, "ruleType":{ "shape":"RuleType", - "documentation":"

    The type fo rule.

    " + "documentation":"

    The type of rule.

    " } }, "documentation":"

    Guardrail rules for an Amazon Q application. Amazon Q supports only one rule at a time.

    " @@ -5244,6 +5334,48 @@ "max":2048, "min":1 }, + "StringAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "attributeValueBoosting":{ + "shape":"StringAttributeValueBoosting", + "documentation":"

    Specifies specific values of a STRING type document attribute being boosted.

    " + }, + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

    Specifies how much a document attribute is boosted.

    " + } + }, + "documentation":"

    Provides information on boosting STRING type document attributes.

    For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

    For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

    " + }, + "StringAttributeValueBoosting":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"StringAttributeValueBoostingLevel"}, + "max":10, + "min":1 + }, + "StringAttributeValueBoostingLevel":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH", + "VERY_HIGH" + ] + }, + "StringListAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

    Specifies how much a document attribute is boosted.

    " + } + }, + "documentation":"

    Provides information on boosting STRING_LIST type document attributes.

    For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

    For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

    " + }, "SubnetId":{ "type":"string", "max":200, @@ -5396,7 +5528,7 @@ "members":{ "description":{ "shape":"TopicDescription", - "documentation":"

    A description for your topic control configuration. Use this outline how the large language model (LLM) should use this topic control configuration.

    " + "documentation":"

    A description for your topic control configuration. Use this to outline how the large language model (LLM) should use this topic control configuration.

    " }, "exampleChatMessages":{ "shape":"ExampleChatMessages", @@ -5995,5 +6127,5 @@ "member":{"shape":"WebExperience"} } }, - "documentation":"

    " + "documentation":"

    Amazon Q is in preview release and is subject to change.

    This is the Amazon Q (for business use) API Reference. Amazon Q is a fully managed, generative-AI powered enterprise chat assistant that you can deploy within your organization. Amazon Q enhances employee productivity by supporting key tasks such as question-answering, knowledge discovery, writing email messages, summarizing text, drafting document outlines, and brainstorming ideas. Users ask questions of Amazon Q and get answers that are presented in a conversational manner. For an introduction to the service, see the Amazon Q (for business use) Developer Guide .

    For an overview of the Amazon Q APIs, see Overview of Amazon Q API operations.

    For information about the IAM access control permissions you need to use this API, see IAM roles for Amazon Q in the Amazon Q (for business use) Developer Guide.

    You can use the following AWS SDKs to access Amazon Q APIs:

    The following resources provide additional information about using the Amazon Q API:

    " } diff --git a/services/qconnect/pom.xml b/services/qconnect/pom.xml index be1907b535a7..ce30fc9cc724 100644 --- a/services/qconnect/pom.xml +++ b/services/qconnect/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT qconnect AWS Java SDK :: Services :: Q Connect diff --git a/services/qconnect/src/main/resources/codegen-resources/customization.config b/services/qconnect/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/qconnect/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index a2392bdc6400..d920c1380d4b 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldb/src/main/resources/codegen-resources/customization.config b/services/qldb/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/qldb/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index cdc8e5b689b5..c5956d197a6a 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/qldbsession/src/main/resources/codegen-resources/customization.config b/services/qldbsession/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/qldbsession/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 0a167ddc26ba..8ace7b44e8f2 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/customization.config b/services/quicksight/src/main/resources/codegen-resources/customization.config index cadacf8d10b0..31757703fe90 100644 --- a/services/quicksight/src/main/resources/codegen-resources/customization.config +++ b/services/quicksight/src/main/resources/codegen-resources/customization.config @@ -144,5 +144,6 @@ "DataSourceParameters": { "union": true } - } + }, + "useSraAuth": true } diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index 6da7d18fbced..c9f17d960de8 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -5437,6 +5437,10 @@ "ContributionAnalysisDefaults":{ "shape":"ContributionAnalysisDefaultList", "documentation":"

    The contribution analysis (anomaly configuration) setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a BarChartVisual.

    " @@ -5710,6 +5714,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The palette (chart color) display setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a BoxPlotVisual.

    " @@ -6688,6 +6696,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The palette (chart color) display setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a ComboChartVisual.

    " @@ -7037,6 +7049,16 @@ "COLLECTIVE" ] }, + "ContextMenuOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{ + "shape":"DashboardBehavior", + "documentation":"

    The availability status of the context menu options. If the value of this property is set to ENABLED, dashboard readers can interact with the context menu.

    " + } + }, + "documentation":"

    The context menu options for a visual's interactions.

    " + }, "ContributionAnalysisDefault":{ "type":"structure", "required":[ @@ -8743,6 +8765,10 @@ "ImageScaling":{ "shape":"CustomContentImageScalingConfiguration", "documentation":"

    The sizing options for the size of the custom content visual. This structure is required when the ContentType of the visual is 'IMAGE'.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a CustomContentVisual.

    " @@ -14937,6 +14963,10 @@ "MapStyleOptions":{ "shape":"GeospatialMapStyleOptions", "documentation":"

    The map style options of the filled map visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration for a FilledMapVisual.

    " @@ -16033,6 +16063,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The visual palette configuration of a FunnelChartVisual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a FunnelChartVisual.

    " @@ -16207,6 +16241,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The visual palette configuration of a GaugeChartVisual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a GaugeChartVisual.

    " @@ -16568,7 +16606,11 @@ "shape":"GeospatialPointStyleOptions", "documentation":"

    The point style options of the geospatial map.

    " }, - "VisualPalette":{"shape":"VisualPalette"} + "VisualPalette":{"shape":"VisualPalette"}, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " + } }, "documentation":"

    The configuration of a GeospatialMapVisual.

    " }, @@ -17171,6 +17213,10 @@ "Tooltip":{ "shape":"TooltipOptions", "documentation":"

    The tooltip display setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a heat map.

    " @@ -17335,6 +17381,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The visual palette configuration of a histogram.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration for a HistogramVisual.

    " @@ -17747,6 +17797,10 @@ "CustomNarrative":{ "shape":"CustomNarrativeOptions", "documentation":"

    The custom narrative of the insight visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of an insight visual.

    " @@ -18146,6 +18200,10 @@ "KPIOptions":{ "shape":"KPIOptions", "documentation":"

    The options that determine the presentation of a KPI visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a KPI visual.

    " @@ -18565,6 +18623,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The visual palette configuration of a line chart.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a line chart.

    " @@ -22143,6 +22205,10 @@ "ContributionAnalysisDefaults":{ "shape":"ContributionAnalysisDefaultList", "documentation":"

    The contribution analysis (anomaly configuration) setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a pie chart.

    " @@ -22353,6 +22419,10 @@ "PaginatedReportOptions":{ "shape":"PivotTablePaginatedReportOptions", "documentation":"

    The paginated report options for a pivot table visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration for a PivotTableVisual.

    " @@ -23095,6 +23165,10 @@ "AxesRangeScale":{ "shape":"RadarChartAxesRangeScale", "documentation":"

    The axis behavior options of a radar chart.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a RadarChartVisual.

    " @@ -24375,6 +24449,10 @@ "DataLabels":{ "shape":"DataLabelOptions", "documentation":"

    The data label configuration of a sankey diagram.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a sankey diagram.

    " @@ -24502,6 +24580,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The palette (chart color) display setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a scatter plot.

    " @@ -26838,6 +26920,10 @@ "TableInlineVisualizations":{ "shape":"TableInlineVisualizationList", "documentation":"

    A collection of inline visualizations to display within a chart.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration for a TableVisual.

    " @@ -29052,6 +29138,10 @@ "Tooltip":{ "shape":"TooltipOptions", "documentation":"

    The tooltip display setup of the visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a tree map.

    " @@ -30171,11 +30261,11 @@ }, "GrantPermissions":{ "shape":"ResourcePermissionList", - "documentation":"

    The permissions that you want to grant on a resource.

    " + "documentation":"

    The permissions that you want to grant on a resource. Namespace ARNs are not supported Principal values for folder permissions.

    " }, "RevokePermissions":{ "shape":"ResourcePermissionList", - "documentation":"

    The permissions that you want to revoke from a resource.

    " + "documentation":"

    The permissions that you want to revoke from a resource. Namespace ARNs are not supported Principal values for folder permissions.

    " } } }, @@ -31752,6 +31842,20 @@ "DATA_POINT_MENU" ] }, + "VisualInteractionOptions":{ + "type":"structure", + "members":{ + "VisualMenuOption":{ + "shape":"VisualMenuOption", + "documentation":"

    The on-visual menu options for a visual.

    " + }, + "ContextMenuOption":{ + "shape":"ContextMenuOption", + "documentation":"

    The context menu options for a visual.

    " + } + }, + "documentation":"

    The general visual interactions setup for visual publish options

    " + }, "VisualList":{ "type":"list", "member":{"shape":"Visual"}, @@ -31842,6 +31946,16 @@ }, "documentation":"

    The field well configuration of a waterfall visual.

    " }, + "WaterfallChartColorConfiguration":{ + "type":"structure", + "members":{ + "GroupColorConfiguration":{ + "shape":"WaterfallChartGroupColorConfiguration", + "documentation":"

    The color configuration for individual groups within a waterfall visual.

    " + } + }, + "documentation":"

    The color configuration of a waterfall visual.

    " + }, "WaterfallChartConfiguration":{ "type":"structure", "members":{ @@ -31884,6 +31998,14 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

    The visual palette configuration of a waterfall visual.

    " + }, + "ColorConfiguration":{ + "shape":"WaterfallChartColorConfiguration", + "documentation":"

    The color configuration of a waterfall visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration for a waterfall visual.

    " @@ -31898,6 +32020,24 @@ }, "documentation":"

    The field well configuration of a waterfall visual.

    " }, + "WaterfallChartGroupColorConfiguration":{ + "type":"structure", + "members":{ + "PositiveBarColor":{ + "shape":"HexColor", + "documentation":"

    Defines the color for the positive bars of a waterfall chart.

    " + }, + "NegativeBarColor":{ + "shape":"HexColor", + "documentation":"

    Defines the color for the negative bars of a waterfall chart.

    " + }, + "TotalBarColor":{ + "shape":"HexColor", + "documentation":"

    Defines the color for the total bars of a waterfall chart.

    " + } + }, + "documentation":"

    The color configuration for individual groups within a waterfall visual.

    " + }, "WaterfallChartOptions":{ "type":"structure", "members":{ @@ -32033,6 +32173,10 @@ "WordCloudOptions":{ "shape":"WordCloudOptions", "documentation":"

    The options for a word cloud visual.

    " + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

    The general visual interactions setup for a visual.

    " } }, "documentation":"

    The configuration of a word cloud visual.

    " diff --git a/services/ram/pom.xml b/services/ram/pom.xml index 64cb2be02a83..4e71e7d4d00e 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 720f01a8c565..843008893995 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rbin/src/main/resources/codegen-resources/customization.config b/services/rbin/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/rbin/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/rds/pom.xml b/services/rds/pom.xml index cfee089f7221..6c00edf0e8e7 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index 761053bdeafa..05dc9e7ff0e2 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -4107,7 +4107,7 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

    The identifier for this DB cluster. This parameter is stored as a lowercase string.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Constraints:

    • Must contain from 1 to 63 letters, numbers, or hyphens.

    • First character must be a letter.

    • Can't end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    " + "documentation":"

    The identifier for this DB cluster. This parameter is stored as a lowercase string.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Constraints:

    • Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ DB clusters) letters, numbers, or hyphens.

    • First character must be a letter.

    • Can't end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster1

    " }, "DBClusterParameterGroupName":{ "shape":"String", @@ -4311,7 +4311,7 @@ }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

    The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

    Aurora MySQL

    Example: aurora-mysql5.7, aurora-mysql8.0

    Aurora PostgreSQL

    Example: aurora-postgresql14

    RDS for MySQL

    Example: mysql8.0

    RDS for PostgreSQL

    Example: postgres12

    To list all of the available parameter group families for a DB engine, use the following command:

    aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

    For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

    aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

    The output contains duplicates.

    The following are the valid DB engine values:

    • aurora-mysql

    • aurora-postgresql

    • mysql

    • postgres

    " + "documentation":"

    The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

    Aurora MySQL

    Example: aurora-mysql5.7, aurora-mysql8.0

    Aurora PostgreSQL

    Example: aurora-postgresql14

    RDS for MySQL

    Example: mysql8.0

    RDS for PostgreSQL

    Example: postgres13

    To list all of the available parameter group families for a DB engine, use the following command:

    aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

    For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

    aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

    The output contains duplicates.

    The following are the valid DB engine values:

    • aurora-mysql

    • aurora-postgresql

    • mysql

    • postgres

    " }, "Description":{ "shape":"String", @@ -10939,7 +10939,7 @@ "documentation":"

    One or more filter values. Filter values are case-sensitive.

    " } }, - "documentation":"

    A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

    Currently, wildcards are not supported in filters.

    The following actions can be filtered:

    • DescribeDBClusterBacktracks

    • DescribeDBClusterEndpoints

    • DescribeDBClusters

    • DescribeDBInstances

    • DescribeDBRecommendations

    • DescribePendingMaintenanceActions

    " + "documentation":"

    A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

    Currently, wildcards are not supported in filters.

    The following actions can be filtered:

    • DescribeDBClusterBacktracks

    • DescribeDBClusterEndpoints

    • DescribeDBClusters

    • DescribeDBInstances

    • DescribeDBRecommendations

    • DescribeDBShardGroups

    • DescribePendingMaintenanceActions

    " }, "FilterList":{ "type":"list", diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index eaaa68bbb9c3..8c01c6e980ed 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/rdsdata/src/main/resources/codegen-resources/customization.config b/services/rdsdata/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/rdsdata/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 70f452e4d56f..060df868fc9f 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 4588b90110c5..61ef4f441ee7 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index 119afd1ebc20..ee72b7e08b17 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index c6ff569f0dc0..86733adaac5b 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/repostspace/pom.xml b/services/repostspace/pom.xml index 3efe22763c7a..54736b6e69d8 100644 --- a/services/repostspace/pom.xml +++ b/services/repostspace/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT repostspace AWS Java SDK :: Services :: Repostspace diff --git a/services/repostspace/src/main/resources/codegen-resources/customization.config b/services/repostspace/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/repostspace/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 493269fc5bd1..3f06a29cf162 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resiliencehub/src/main/resources/codegen-resources/customization.config b/services/resiliencehub/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/resiliencehub/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index a62934be129c..8aaa1c470e5b 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-rule-set.json index 003af7baa245..4d806fc8104b 100644 --- a/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -7,6 +7,13 @@ "documentation": "The AWS region used to dispatch the request.", "type": "String" }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, "UseFIPS": { "builtIn": "AWS::UseFIPS", "required": true, @@ -49,6 +56,21 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported", "type": "error" }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, { "conditions": [], "endpoint": { @@ -93,16 +115,19 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], @@ -112,61 +137,51 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "rules": [ + }, { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "PartitionResult" }, - true + "supportsDualStack" ] } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://resource-explorer-2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, + ] + } + ], + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://resource-explorer-2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" }, { "conditions": [], - "endpoint": { - "url": "https://resource-explorer-2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" @@ -223,6 +238,58 @@ ], "type": "tree" }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://resource-explorer-2.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, { "conditions": [], "endpoint": { diff --git a/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-tests.json b/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-tests.json index 6bc38817f06c..de57bc625a41 100644 --- a/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/resourceexplorer2/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,7 +9,21 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -21,7 +35,21 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -33,7 +61,21 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -45,7 +87,21 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -57,7 +113,21 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -69,7 +139,117 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -82,6 +262,7 @@ "params": { "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -94,6 +275,7 @@ }, "params": { "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -105,6 +287,19 @@ "params": { "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index a8566e5dafcf..588165badee9 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroups/src/main/resources/codegen-resources/customization.config b/services/resourcegroups/src/main/resources/codegen-resources/customization.config index 70cabad6601d..10c25cb6a067 100644 --- a/services/resourcegroups/src/main/resources/codegen-resources/customization.config +++ b/services/resourcegroups/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listGroups" - ] + ], + "useSraAuth": true } diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index c9db66ccb92c..debc573ebcdf 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 3cc5e0d52b85..e3042eb7812f 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 63aa061b9493..cd5ec607d262 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index f1e2a00b7d31..41fde7d17d70 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index c8d38cd431ca..439f7c4cdc6d 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/customization.config b/services/route53domains/src/main/resources/codegen-resources/customization.config index cbc750ecf2cd..e71a69a95da0 100644 --- a/services/route53domains/src/main/resources/codegen-resources/customization.config +++ b/services/route53domains/src/main/resources/codegen-resources/customization.config @@ -1,11 +1,12 @@ { - "verifiedSimpleMethods": [ - "resendContactReachabilityEmail", - "listDomains", - "listOperations" - ], - "excludedSimpleMethods": [ - "viewBilling", - "getContactReachabilityStatus" - ] + "verifiedSimpleMethods": [ + "resendContactReachabilityEmail", + "listDomains", + "listOperations" + ], + "excludedSimpleMethods": [ + "viewBilling", + "getContactReachabilityStatus" + ], + "useSraAuth": true } diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json index 3f5f87833147..30bf70666762 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index db2da8ebd49d..42162f1b414f 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -432,7 +432,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

    Transfers a domain from another registrar to Amazon Route 53.

    For more information about transferring domains, see the following topics:

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    " + "documentation":"

    Transfers a domain from another registrar to Amazon Route 53.

    For more information about transferring domains, see the following topics:

    During the transfer of any country code top-level domains (ccTLDs) to Route 53, except for .cc and .tv, updates to the owner contact are ignored and the owner contact data from the registry is used. You can update the owner contact after the transfer is complete. For more information, see UpdateDomainContact.

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    " }, "TransferDomainToAnotherAwsAccount":{ "name":"TransferDomainToAnotherAwsAccount", @@ -668,7 +668,7 @@ "members":{ "Availability":{ "shape":"DomainAvailability", - "documentation":"

    Whether the domain name is available for registering.

    You can register only domains designated as AVAILABLE.

    Valid values:

    AVAILABLE

    The domain name is available.

    AVAILABLE_RESERVED

    The domain name is reserved under specific conditions.

    AVAILABLE_PREORDER

    The domain name is available and can be preordered.

    DONT_KNOW

    The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

    PENDING

    The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

    RESERVED

    The domain name has been reserved for another person or organization.

    UNAVAILABLE

    The domain name is not available.

    UNAVAILABLE_PREMIUM

    The domain name is not available.

    UNAVAILABLE_RESTRICTED

    The domain name is forbidden.

    " + "documentation":"

    Whether the domain name is available for registering.

    You can register only domains designated as AVAILABLE.

    Valid values:

    AVAILABLE

    The domain name is available.

    AVAILABLE_RESERVED

    The domain name is reserved under specific conditions.

    AVAILABLE_PREORDER

    The domain name is available and can be preordered.

    DONT_KNOW

    The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

    INVALID_NAME_FOR_TLD

    The TLD isn't valid. For example, it can contain characters that aren't allowed.

    PENDING

    The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

    RESERVED

    The domain name has been reserved for another person or organization.

    UNAVAILABLE

    The domain name is not available.

    UNAVAILABLE_PREMIUM

    The domain name is not available.

    UNAVAILABLE_RESTRICTED

    The domain name is forbidden.

    " } }, "documentation":"

    The CheckDomainAvailability response includes the following elements.

    " @@ -1259,7 +1259,9 @@ "UNAVAILABLE_PREMIUM", "UNAVAILABLE_RESTRICTED", "RESERVED", - "DONT_KNOW" + "DONT_KNOW", + "INVALID_NAME_FOR_TLD", + "PENDING" ] }, "DomainLimitExceeded":{ @@ -1592,19 +1594,19 @@ }, "AdminPrivacy":{ "shape":"Boolean", - "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the admin contact.

    " + "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the admin contact.

    " }, "RegistrantPrivacy":{ "shape":"Boolean", - "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    " + "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    " }, "TechPrivacy":{ "shape":"Boolean", - "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the technical contact.

    " + "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the technical contact.

    " }, "RegistrarName":{ "shape":"RegistrarName", - "documentation":"

    Name of the registrar of the domain as identified in the registry. Domains with a .com, .net, or .org TLD are registered by Amazon Registrar. All other domains are registered by our registrar associate, Gandi. The value for domains that are registered by Gandi is \"GANDI SAS\".

    " + "documentation":"

    Name of the registrar of the domain as identified in the registry.

    " }, "WhoIsServer":{ "shape":"RegistrarWhoIsServer", @@ -1653,6 +1655,14 @@ "DnssecKeys":{ "shape":"DnssecKeyList", "documentation":"

    A complex type that contains information about the DNSSEC configuration.

    " + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

    Provides details about the domain billing contact.

    " + }, + "BillingPrivacy":{ + "shape":"Boolean", + "documentation":"

    Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the billing contact.

    " } }, "documentation":"

    The GetDomainDetail response includes the following elements.

    " @@ -2035,13 +2045,15 @@ "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", - "INTERNAL_TRANSFER_IN_DOMAIN" + "INTERNAL_TRANSFER_IN_DOMAIN", + "RELEASE_TO_GANDI", + "TRANSFER_ON_RENEW" ] }, "OperationTypeList":{ "type":"list", "member":{"shape":"OperationType"}, - "max":18 + "max":20 }, "Operator":{ "type":"string", @@ -2147,15 +2159,23 @@ }, "PrivacyProtectAdminContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the admin contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    Default: true

    " }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (the domain owner).

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (the domain owner).

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    Default: true

    " }, "PrivacyProtectTechContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    Default: true

    " + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

    Provides detailed contact information. For information about the values that you specify for each element, see ContactDetail.

    " + }, + "PrivacyProtectBillingContact":{ + "shape":"Boolean", + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " } }, "documentation":"

    The RegisterDomain request includes the following elements.

    " @@ -2427,11 +2447,19 @@ }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    Default: true

    " }, "PrivacyProtectTechContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    Default: true

    " + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

    Provides detailed contact information.

    " + }, + "PrivacyProtectBillingContact":{ + "shape":"Boolean", + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " } }, "documentation":"

    The TransferDomain request includes the following elements.

    " @@ -2480,7 +2508,7 @@ }, "Transferable":{ "type":"string", - "documentation":"

    Whether the domain name can be transferred to Route 53.

    You can transfer only domains that have a value of TRANSFERABLE or Transferable.

    Valid values:

    TRANSFERABLE

    The domain name can be transferred to Route 53.

    UNTRANSFERRABLE

    The domain name can't be transferred to Route 53.

    DONT_KNOW

    Reserved for future use.

    DOMAIN_IN_OWN_ACCOUNT

    The domain already exists in the current Amazon Web Services account.

    DOMAIN_IN_ANOTHER_ACCOUNT

    the domain exists in another Amazon Web Services account.

    PREMIUM_DOMAIN

    Premium domain transfer is not supported.

    ", + "documentation":"

    Whether the domain name can be transferred to Route 53.

    You can transfer only domains that have a value of TRANSFERABLE or Transferable.

    Valid values:

    TRANSFERABLE

    The domain name can be transferred to Route 53.

    UNTRANSFERRABLE

    The domain name can't be transferred to Route 53.

    DONT_KNOW

    Reserved for future use.

    DOMAIN_IN_OWN_ACCOUNT

    The domain already exists in the current Amazon Web Services account.

    DOMAIN_IN_ANOTHER_ACCOUNT

    The domain exists in another Amazon Web Services account.

    PREMIUM_DOMAIN

    Premium domain transfer is not supported.

    ", "enum":[ "TRANSFERABLE", "UNTRANSFERABLE", @@ -2511,15 +2539,19 @@ }, "AdminPrivacy":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the admin contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " }, "RegistrantPrivacy":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " }, "TechPrivacy":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " + }, + "BillingPrivacy":{ + "shape":"Boolean", + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

    You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

    " } }, "documentation":"

    The UpdateDomainContactPrivacy request includes the following elements.

    " @@ -2557,6 +2589,10 @@ "Consent":{ "shape":"Consent", "documentation":"

    Customer's consent for the owner change request. Required if the domain is not free (consent price is more than $0.00).

    " + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

    Provides detailed contact information.

    " } }, "documentation":"

    The UpdateDomainContact request includes the following elements.

    " diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 0e92912f813a..cccffe946bc0 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config b/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 7562ef79fc7a..927ade605779 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index a11756616d07..bd0a0c8a25c4 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config b/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index a95149e4a87c..e5fa6a8e7f54 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 1e3a0a535289..cc24cd6da592 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/rum/src/main/resources/codegen-resources/customization.config b/services/rum/src/main/resources/codegen-resources/customization.config index 0e729acd0371..47a49338406e 100644 --- a/services/rum/src/main/resources/codegen-resources/customization.config +++ b/services/rum/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "generateEndpointClientTests": true + "generateEndpointClientTests": true, + "useSraAuth": true } diff --git a/services/s3/pom.xml b/services/s3/pom.xml index d6abab3275e7..a32758eda9d2 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 @@ -204,6 +204,11 @@ org.mockito mockito-junit-jupiter + + org.mockito + mockito-inline + test + net.bytebuddy byte-buddy diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/checksum/AsyncHttpChecksumIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/checksum/AsyncHttpChecksumIntegrationTest.java index d88d54610387..ab0d8fac1dad 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/checksum/AsyncHttpChecksumIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/checksum/AsyncHttpChecksumIntegrationTest.java @@ -36,17 +36,17 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.authcrt.signer.internal.DefaultAwsCrtS3V4aSigner; +import software.amazon.awssdk.core.SdkPlugin; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumValidation; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.S3IntegrationTestBase; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.ChecksumMode; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -222,12 +222,10 @@ void asyncHttpsValidUnsignedTrailer_TwoRequests_withDifferentChunkSize_OfFileAsy @Disabled("Http Async Signing is not supported for S3") void asyncValidSignedTrailerChecksumCalculatedBySdkClient() { - ExecutionAttributes executionAttributes = ExecutionAttributes.builder() - .put(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, - true).build(); + SdkPlugin enablePayloadSigningPlugin = S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin(); s3HttpAsync.putObject(PutObjectRequest.builder() .bucket(BUCKET) - .overrideConfiguration(o -> o.executionAttributes(executionAttributes)) + .overrideConfiguration(o -> o.addPlugin(enablePayloadSigningPlugin)) .key(KEY) .build(), AsyncRequestBody.fromString("Hello world")).join(); String response = s3HttpAsync.getObject(GetObjectRequest.builder().bucket(BUCKET) diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crossregion/S3CrossRegionCrtIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crossregion/S3CrossRegionCrtIntegrationTest.java index 9749b4920539..e6a47bc948d6 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crossregion/S3CrossRegionCrtIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crossregion/S3CrossRegionCrtIntegrationTest.java @@ -46,7 +46,7 @@ static void clearClass() { @BeforeEach public void initialize() { crossRegionS3Client = S3AsyncClient.crtBuilder() - .region(CROSS_REGION) + .region(Region.AWS_GLOBAL) .crossRegionAccessEnabled(true) .build(); } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java index fc4f31b76b1a..4d942d942e7f 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java @@ -24,6 +24,8 @@ import java.nio.ByteBuffer; import java.security.SecureRandom; import java.util.Base64; +import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -37,10 +39,16 @@ import software.amazon.awssdk.core.ClientType; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3IntegrationTestBase; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; +import software.amazon.awssdk.services.s3.internal.multipart.MultipartS3AsyncClient; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.CopyObjectResponse; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.MetadataDirective; @@ -49,6 +57,7 @@ @Timeout(value = 3, unit = TimeUnit.MINUTES) public class S3ClientMultiPartCopyIntegrationTest extends S3IntegrationTestBase { private static final String BUCKET = temporaryBucketName(S3ClientMultiPartCopyIntegrationTest.class); + private static final CapturingInterceptor CAPTURING_INTERCEPTOR = new CapturingInterceptor(); private static final String ORIGINAL_OBJ = "test_file.dat"; private static final String COPIED_OBJ = "test_file_copy.dat"; private static final String ORIGINAL_OBJ_SPECIAL_CHARACTER = "original-special-chars-@$%"; @@ -70,7 +79,8 @@ public static void setUp() throws Exception { .region(DEFAULT_REGION) .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(o -> o.addExecutionInterceptor( - new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC))) + new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC)) + .addExecutionInterceptor(CAPTURING_INTERCEPTOR)) .multipartEnabled(true) .build(); } @@ -115,7 +125,7 @@ void copy_specialCharacters_hasSameContent(S3AsyncClient s3AsyncClient) { @ParameterizedTest(autoCloseArguments = false) @MethodSource("s3AsyncClient") - void copy_ssecServerSideEncryption_shouldSucceed(S3AsyncClient s3AsyncClient) { + void copy_withSSECAndChecksum_shouldSucceed(S3AsyncClient s3AsyncClient) { byte[] originalContent = randomBytes(OBJ_SIZE); byte[] secretKey = generateSecretKey(); String b64Key = Base64.getEncoder().encodeToString(secretKey); @@ -132,6 +142,8 @@ void copy_ssecServerSideEncryption_shouldSucceed(S3AsyncClient s3AsyncClient) { .sseCustomerKeyMD5(b64KeyMd5), AsyncRequestBody.fromBytes(originalContent)).join(); + CAPTURING_INTERCEPTOR.reset(); + CompletableFuture future = s3AsyncClient.copyObject(c -> c .sourceBucket(BUCKET) .sourceKey(ORIGINAL_OBJ) @@ -143,11 +155,13 @@ void copy_ssecServerSideEncryption_shouldSucceed(S3AsyncClient s3AsyncClient) { .copySourceSSECustomerKey(b64Key) .copySourceSSECustomerKeyMD5(b64KeyMd5) .destinationBucket(BUCKET) - .destinationKey(COPIED_OBJ)); + .destinationKey(COPIED_OBJ) + .checksumAlgorithm(ChecksumAlgorithm.CRC32)); CopyObjectResponse copyObjectResponse = future.join(); assertThat(copyObjectResponse.responseMetadata().requestId()).isNotNull(); assertThat(copyObjectResponse.sdkHttpResponse()).isNotNull(); + verifyCopyContainsCrc32Header(s3AsyncClient); } private static byte[] generateSecretKey() { @@ -180,6 +194,12 @@ private void copyObject(String original, String destination, S3AsyncClient s3Asy assertThat(copyObjectResponse.sdkHttpResponse()).isNotNull(); } + private void verifyCopyContainsCrc32Header(S3AsyncClient s3AsyncClient) { + if (s3AsyncClient instanceof MultipartS3AsyncClient) { + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("CRC32"); + } + } + private void validateCopiedObject(byte[] originalContent, String originalKey) { ResponseBytes copiedObject = s3.getObject(r -> r.bucket(BUCKET) .key(originalKey), @@ -192,4 +212,24 @@ public static byte[] randomBytes(long size) { ThreadLocalRandom.current().nextBytes(bytes); return bytes; } + + private static final class CapturingInterceptor implements ExecutionInterceptor { + private String checksumHeader; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + SdkHttpRequest sdkHttpRequest = context.httpRequest(); + Map> headers = sdkHttpRequest.headers(); + String checksumHeaderName = "x-amz-checksum-algorithm"; + if (headers.containsKey(checksumHeaderName)) { + List checksumHeaderVals = headers.get(checksumHeaderName); + assertThat(checksumHeaderVals).hasSize(1); + checksumHeader = checksumHeaderVals.get(0); + } + } + + public void reset() { + checksumHeader = null; + } + } } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java index fa31b5453e5e..3e6811f69b3c 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java @@ -17,6 +17,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.model.ServerSideEncryption.AES256; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; import java.io.ByteArrayInputStream; @@ -24,23 +25,36 @@ import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.nio.file.Files; +import java.security.MessageDigest; +import java.security.SecureRandom; +import java.util.Base64; +import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.UUID; +import javax.crypto.KeyGenerator; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.reactivestreams.Subscriber; import software.amazon.awssdk.core.ClientType; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3IntegrationTestBase; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.utils.ChecksumUtils; +import software.amazon.awssdk.utils.Md5Utils; @Timeout(value = 30, unit = SECONDS) public class S3MultipartClientPutObjectIntegrationTest extends S3IntegrationTestBase { @@ -48,7 +62,8 @@ public class S3MultipartClientPutObjectIntegrationTest extends S3IntegrationTest private static final String TEST_BUCKET = temporaryBucketName(S3MultipartClientPutObjectIntegrationTest.class); private static final String TEST_KEY = "testfile.dat"; private static final int OBJ_SIZE = 19 * 1024 * 1024; - + private static final CapturingInterceptor CAPTURING_INTERCEPTOR = new CapturingInterceptor(); + private static final byte[] CONTENT = RandomStringUtils.randomAscii(OBJ_SIZE).getBytes(Charset.defaultCharset()); private static File testFile; private static S3AsyncClient mpuS3Client; @@ -56,17 +71,14 @@ public class S3MultipartClientPutObjectIntegrationTest extends S3IntegrationTest public static void setup() throws Exception { S3IntegrationTestBase.setUp(); S3IntegrationTestBase.createBucket(TEST_BUCKET); - byte[] CONTENT = - RandomStringUtils.randomAscii(OBJ_SIZE).getBytes(Charset.defaultCharset()); - testFile = File.createTempFile("SplittingPublisherTest", UUID.randomUUID().toString()); Files.write(testFile.toPath(), CONTENT); mpuS3Client = S3AsyncClient .builder() .region(DEFAULT_REGION) .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(o -> o.addExecutionInterceptor( - new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC))) + .overrideConfiguration(o -> o.addExecutionInterceptor(new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC)) + .addExecutionInterceptor(CAPTURING_INTERCEPTOR)) .multipartEnabled(true) .build(); } @@ -78,11 +90,18 @@ public static void teardown() throws Exception { deleteBucketAndAllContents(TEST_BUCKET); } + @BeforeEach + public void reset() { + CAPTURING_INTERCEPTOR.reset(); + } + @Test void putObject_fileRequestBody_objectSentCorrectly() throws Exception { AsyncRequestBody body = AsyncRequestBody.fromFile(testFile.toPath()); mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), body).join(); + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("CRC32"); + ResponseInputStream objContent = S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), ResponseTransformer.toInputStream()); @@ -98,6 +117,8 @@ void putObject_byteAsyncRequestBody_objectSentCorrectly() throws Exception { AsyncRequestBody body = AsyncRequestBody.fromBytes(bytes); mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), body).join(); + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("CRC32"); + ResponseInputStream objContent = S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), ResponseTransformer.toInputStream()); @@ -124,6 +145,8 @@ public void subscribe(Subscriber s) { } }).get(30, SECONDS); + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("CRC32"); + ResponseInputStream objContent = S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), ResponseTransformer.toInputStream()); @@ -133,4 +156,102 @@ public void subscribe(Subscriber s) { assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); } + @Test + void putObject_withSSECAndChecksum_objectSentCorrectly() throws Exception { + byte[] secretKey = generateSecretKey(); + String b64Key = Base64.getEncoder().encodeToString(secretKey); + String b64KeyMd5 = Md5Utils.md5AsBase64(secretKey); + + AsyncRequestBody body = AsyncRequestBody.fromFile(testFile.toPath()); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET) + .key(TEST_KEY) + .sseCustomerKey(b64Key) + .sseCustomerAlgorithm(AES256.name()) + .sseCustomerKeyMD5(b64KeyMd5), + body).join(); + + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("CRC32"); + + ResponseInputStream objContent = + S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET) + .key(TEST_KEY) + .sseCustomerKey(b64Key) + .sseCustomerAlgorithm(AES256.name()) + .sseCustomerKeyMD5(b64KeyMd5), + ResponseTransformer.toInputStream()); + + assertThat(objContent.response().contentLength()).isEqualTo(testFile.length()); + byte[] expectedSum = ChecksumUtils.computeCheckSum(Files.newInputStream(testFile.toPath())); + assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); + } + + @Test + void putObject_withUserSpecifiedChecksumValue_objectSentCorrectly() throws Exception { + String sha1Val = calculateSHA1AsString(); + AsyncRequestBody body = AsyncRequestBody.fromFile(testFile.toPath()); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET) + .key(TEST_KEY) + .checksumSHA1(sha1Val), + body).join(); + + assertThat(CAPTURING_INTERCEPTOR.headers.get("x-amz-checksum-sha1")).contains(sha1Val); + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isNull(); + + ResponseInputStream objContent = + S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), + ResponseTransformer.toInputStream()); + + assertThat(objContent.response().contentLength()).isEqualTo(testFile.length()); + byte[] expectedSum = ChecksumUtils.computeCheckSum(Files.newInputStream(testFile.toPath())); + assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); + } + + @Test + void putObject_withUserSpecifiedChecksumTypeOtherThanCrc32_shouldHonorChecksum() { + AsyncRequestBody body = AsyncRequestBody.fromFile(testFile.toPath()); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET) + .key(TEST_KEY) + .checksumAlgorithm(ChecksumAlgorithm.SHA1), + body).join(); + + assertThat(CAPTURING_INTERCEPTOR.checksumHeader).isEqualTo("SHA1"); + } + + private static String calculateSHA1AsString() throws Exception { + MessageDigest md = MessageDigest.getInstance("SHA-1"); + md.update(CONTENT); + byte[] checksum = md.digest(); + return Base64.getEncoder().encodeToString(checksum); + } + + private static byte[] generateSecretKey() { + KeyGenerator generator; + try { + generator = KeyGenerator.getInstance("AES"); + generator.init(256, new SecureRandom()); + return generator.generateKey().getEncoded(); + } catch (Exception e) { + return null; + } + } + + private static final class CapturingInterceptor implements ExecutionInterceptor { + String checksumHeader; + Map> headers; + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + SdkHttpRequest sdkHttpRequest = context.httpRequest(); + headers = sdkHttpRequest.headers(); + String checksumHeaderName = "x-amz-sdk-checksum-algorithm"; + if (headers.containsKey(checksumHeaderName)) { + List checksumHeaderVals = headers.get(checksumHeaderName); + assertThat(checksumHeaderVals).hasSize(1); + checksumHeader = checksumHeaderVals.get(0); + } + } + + public void reset() { + checksumHeader = null; + } + } } diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/s3express/S3ExpressIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/s3express/S3ExpressIntegrationTest.java index b6edf3585691..d49ea54e86be 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/s3express/S3ExpressIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/s3express/S3ExpressIntegrationTest.java @@ -44,7 +44,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; @@ -62,6 +61,7 @@ import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; @@ -226,7 +226,7 @@ public void putObject_payloadSigningEnabledSra_executesSuccessfully() { S3Client s3Client = S3Client.builder() .region(TEST_REGION) .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(o -> o.putExecutionAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true)) + .addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()) .build(); PutObjectRequest request = PutObjectRequest.builder() diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/PayloadSigningIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/PayloadSigningIntegrationTest.java index ca77bc1cc588..dbe9bc2f7486 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/PayloadSigningIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/signer/PayloadSigningIntegrationTest.java @@ -29,7 +29,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -39,6 +38,7 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3IntegrationTestBase; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.services.s3.utils.S3TestUtils; /** @@ -99,7 +99,8 @@ public void standardSyncApacheHttpClient_httpCauses_signedPayload() { public void standardSyncApacheHttpClient_manuallyEnabled_signedPayload() { S3Client syncClient = s3ClientBuilder() .overrideConfiguration(o -> o.addExecutionInterceptor(capturingInterceptor) - .addExecutionInterceptor(new PayloadSigningInterceptor())) + .addExecutionInterceptor(new CreateRequestBodyIfNeededInterceptor())) + .addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()) .build(); assertThat(syncClient.putObject(b -> b.bucket(BUCKET).key(KEY), RequestBody.fromBytes("helloworld".getBytes()))).isNotNull(); @@ -132,12 +133,11 @@ public List capturedRequests() { } } - private static class PayloadSigningInterceptor implements ExecutionInterceptor { + private static class CreateRequestBodyIfNeededInterceptor implements ExecutionInterceptor { @Override public Optional modifyHttpContent(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); if (!context.requestBody().isPresent() && context.httpRequest().method().equals(SdkHttpMethod.POST)) { return Optional.of(RequestBody.fromBytes(new byte[0])); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crossregion/endpointprovider/BucketEndpointProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crossregion/endpointprovider/BucketEndpointProvider.java index 84f1e69abf8a..bc8d332cb7ee 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crossregion/endpointprovider/BucketEndpointProvider.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crossregion/endpointprovider/BucketEndpointProvider.java @@ -43,9 +43,20 @@ public static BucketEndpointProvider create(S3EndpointProvider delegateEndPointP @Override public CompletableFuture resolveEndpoint(S3EndpointParams endpointParams) { Region crossRegion = regionSupplier.get(); - return delegateEndPointProvider.resolveEndpoint( - endpointParams.copy(c -> c.region(crossRegion == null ? endpointParams.region() : crossRegion) - .useGlobalEndpoint(false))); + S3EndpointParams.Builder endpointParamsBuilder = endpointParams.toBuilder(); + // Check if cross-region resolution has already occurred. + if (crossRegion != null) { + endpointParamsBuilder.region(crossRegion); + } else { + // For global regions, set the region to "us-east-1" to use regional endpoints. + if (Region.AWS_GLOBAL.equals(endpointParams.region())) { + endpointParamsBuilder.region(Region.US_EAST_1); + } + // Disable the global endpoint as S3 can properly redirect regions in the 'x-amz-bucket-region' header + // only for regional endpoints. + endpointParamsBuilder.useGlobalEndpoint(false); + } + return delegateEndPointProvider.resolveEndpoint(endpointParamsBuilder.build()); } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java index 14a2eb9a1af0..6dbaa550628f 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java @@ -45,7 +45,6 @@ import software.amazon.awssdk.crt.s3.ResumeToken; import software.amazon.awssdk.crt.s3.S3Client; import software.amazon.awssdk.crt.s3.S3ClientOptions; -import software.amazon.awssdk.crt.s3.S3MetaRequest; import software.amazon.awssdk.crt.s3.S3MetaRequestOptions; import software.amazon.awssdk.http.Header; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; @@ -54,7 +53,6 @@ import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.AttributeMap; -import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.NumericUtils; import software.amazon.awssdk.utils.http.SdkHttpUtils; @@ -64,7 +62,6 @@ */ @SdkInternalApi public final class S3CrtAsyncHttpClient implements SdkAsyncHttpClient { - private static final Logger log = Logger.loggerFor(S3CrtAsyncHttpClient.class); private final S3Client crtS3Client; @@ -133,10 +130,12 @@ public CompletableFuture execute(AsyncExecuteRequest asyncRequest) { URI uri = asyncRequest.request().getUri(); HttpRequest httpRequest = toCrtRequest(asyncRequest); SdkHttpExecutionAttributes httpExecutionAttributes = asyncRequest.httpExecutionAttributes(); + CompletableFuture s3MetaRequestFuture = new CompletableFuture<>(); S3CrtResponseHandlerAdapter responseHandler = new S3CrtResponseHandlerAdapter(executeFuture, asyncRequest.responseHandler(), - httpExecutionAttributes.getAttribute(CRT_PROGRESS_LISTENER)); + httpExecutionAttributes.getAttribute(CRT_PROGRESS_LISTENER), + s3MetaRequestFuture); S3MetaRequestOptions.MetaRequestType requestType = requestType(asyncRequest); @@ -160,16 +159,19 @@ public CompletableFuture execute(AsyncExecuteRequest asyncRequest) { .withRequestFilePath(requestFilePath) .withSigningConfig(signingConfig); - S3MetaRequest s3MetaRequest = crtS3Client.makeMetaRequest(requestOptions); - S3MetaRequestPauseObservable observable = - httpExecutionAttributes.getAttribute(METAREQUEST_PAUSE_OBSERVABLE); + try { + S3MetaRequestWrapper requestWrapper = new S3MetaRequestWrapper(crtS3Client.makeMetaRequest(requestOptions)); + s3MetaRequestFuture.complete(requestWrapper); - responseHandler.metaRequest(s3MetaRequest); + S3MetaRequestPauseObservable observable = + httpExecutionAttributes.getAttribute(METAREQUEST_PAUSE_OBSERVABLE); - if (observable != null) { - observable.subscribe(s3MetaRequest); + if (observable != null) { + observable.subscribe(requestWrapper); + } + } finally { + signingConfig.close(); } - closeResourceCallback(executeFuture, s3MetaRequest, responseHandler, signingConfig); return executeFuture; } @@ -215,23 +217,6 @@ private static S3MetaRequestOptions.MetaRequestType requestType(AsyncExecuteRequ return S3MetaRequestOptions.MetaRequestType.DEFAULT; } - private static void closeResourceCallback(CompletableFuture executeFuture, - S3MetaRequest s3MetaRequest, - S3CrtResponseHandlerAdapter responseHandler, - AwsSigningConfig signingConfig) { - executeFuture.whenComplete((r, t) -> { - if (executeFuture.isCancelled()) { - log.debug(() -> "The request is cancelled, cancelling meta request"); - responseHandler.cancelRequest(); - s3MetaRequest.cancel(); - signingConfig.close(); - } else { - s3MetaRequest.close(); - signingConfig.close(); - } - }); - } - private static HttpRequest toCrtRequest(AsyncExecuteRequest asyncRequest) { SdkHttpRequest sdkRequest = asyncRequest.request(); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapter.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapter.java index de04329326a5..c4fa2519527b 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapter.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapter.java @@ -25,10 +25,8 @@ import software.amazon.awssdk.crt.CRT; import software.amazon.awssdk.crt.http.HttpHeader; import software.amazon.awssdk.crt.s3.S3FinishedResponseContext; -import software.amazon.awssdk.crt.s3.S3MetaRequest; import software.amazon.awssdk.crt.s3.S3MetaRequestProgress; import software.amazon.awssdk.crt.s3.S3MetaRequestResponseHandler; -import software.amazon.awssdk.http.SdkCancellationException; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; import software.amazon.awssdk.utils.Logger; @@ -46,7 +44,7 @@ public final class S3CrtResponseHandlerAdapter implements S3MetaRequestResponseH private final SimplePublisher responsePublisher = new SimplePublisher<>(); private final SdkHttpResponse.Builder initialHeadersResponse = SdkHttpResponse.builder(); - private volatile S3MetaRequest metaRequest; + private final CompletableFuture metaRequestFuture; private final PublisherListener progressListener; @@ -54,12 +52,35 @@ public final class S3CrtResponseHandlerAdapter implements S3MetaRequestResponseH public S3CrtResponseHandlerAdapter(CompletableFuture executeFuture, SdkAsyncHttpResponseHandler responseHandler, - PublisherListener progressListener) { + PublisherListener progressListener, + CompletableFuture metaRequestFuture) { this.resultFuture = executeFuture; + this.metaRequestFuture = metaRequestFuture; + + resultFuture.whenComplete((r, t) -> { + S3MetaRequestWrapper s3MetaRequest = s3MetaRequest(); + if (s3MetaRequest == null) { + return; + } + + if (t != null) { + s3MetaRequest.cancel(); + } + s3MetaRequest.close(); + }); + this.responseHandler = responseHandler; this.progressListener = progressListener == null ? new NoOpPublisherListener() : progressListener; } + private S3MetaRequestWrapper s3MetaRequest() { + if (!metaRequestFuture.isDone()) { + return null; + } + + return metaRequestFuture.join(); + } + @Override public void onResponseHeaders(int statusCode, HttpHeader[] headers) { // Note, we cannot call responseHandler.onHeaders() here because the response status code and headers may not represent @@ -87,6 +108,13 @@ public int onResponseBody(ByteBuffer bodyBytesIn, long objectRangeStart, long ob return; } + S3MetaRequestWrapper metaRequest = s3MetaRequest(); + if (metaRequest == null) { + // should not happen + failResponseHandlerAndFuture(SdkClientException.create("Unexpected exception occurred: s3metaRequest is not " + + "initialized yet")); + return; + } metaRequest.incrementReadWindow(bytesReceived); }); @@ -115,22 +143,10 @@ private void onSuccessfulResponseComplete() { return; } this.progressListener.subscriberOnComplete(); - completeFutureAndCloseRequest(); + resultFuture.complete(null); }); } - private void completeFutureAndCloseRequest() { - resultFuture.complete(null); - runAndLogError(log.logger(), "Exception thrown in S3MetaRequest#close, ignoring", - () -> metaRequest.close()); - } - - public void cancelRequest() { - SdkCancellationException sdkClientException = - new SdkCancellationException("request is cancelled"); - failResponseHandlerAndFuture(sdkClientException); - } - private void handleError(S3FinishedResponseContext context) { int crtCode = context.getErrorCode(); HttpHeader[] headers = context.getErrorHeaders(); @@ -168,27 +184,21 @@ private void onErrorResponseComplete(byte[] errorPayload) { failResponseHandlerAndFuture(throwable); return null; } - completeFutureAndCloseRequest(); + resultFuture.complete(null); return null; }); } private void failResponseHandlerAndFuture(Throwable exception) { - resultFuture.completeExceptionally(exception); runAndLogError(log.logger(), "Exception thrown in SdkAsyncHttpResponseHandler#onError, ignoring", () -> responseHandler.onError(exception)); - runAndLogError(log.logger(), "Exception thrown in S3MetaRequest#close, ignoring", - () -> metaRequest.close()); + resultFuture.completeExceptionally(exception); } private static boolean isErrorResponse(int responseStatus) { return responseStatus != 0; } - public void metaRequest(S3MetaRequest s3MetaRequest) { - metaRequest = s3MetaRequest; - } - @Override public void onProgress(S3MetaRequestProgress progress) { this.progressListener.subscriberOnNext(progress); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestPauseObservable.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestPauseObservable.java index ce7b78fdd538..5ddb41219d39 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestPauseObservable.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestPauseObservable.java @@ -18,7 +18,6 @@ import java.util.function.Function; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.crt.s3.ResumeToken; -import software.amazon.awssdk.crt.s3.S3MetaRequest; /** * An observable that notifies the observer {@link S3CrtAsyncHttpClient} to pause the request. @@ -26,17 +25,17 @@ @SdkInternalApi public class S3MetaRequestPauseObservable { - private final Function pause; - private volatile S3MetaRequest request; + private final Function pause; + private volatile S3MetaRequestWrapper request; public S3MetaRequestPauseObservable() { - this.pause = S3MetaRequest::pause; + this.pause = S3MetaRequestWrapper::pause; } /** - * Subscribe {@link S3MetaRequest} to be potentially paused later. + * Subscribe {@link S3MetaRequestWrapper} to be potentially paused later. */ - public void subscribe(S3MetaRequest request) { + public void subscribe(S3MetaRequestWrapper request) { this.request = request; } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapper.java new file mode 100644 index 000000000000..72074e1b47a3 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapper.java @@ -0,0 +1,69 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.crt; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.s3.ResumeToken; +import software.amazon.awssdk.crt.s3.S3MetaRequest; + +/** + * A wrapper class that manages the lifecycle of the underlying {@link S3MetaRequest}. This class is needed to ensure we don't + * invoke methods on {@link S3MetaRequest} after it's closed, otherwise CRT will crash. + */ +@SdkInternalApi +public class S3MetaRequestWrapper { + private final S3MetaRequest delegate; + private volatile boolean isClosed; + private final Object lock = new Object(); + + public S3MetaRequestWrapper(S3MetaRequest delegate) { + this.delegate = delegate; + } + + public void close() { + synchronized (lock) { + if (!isClosed) { + isClosed = true; + delegate.close(); + } + } + } + + public void incrementReadWindow(long windowSize) { + synchronized (lock) { + if (!isClosed) { + delegate.incrementReadWindow(windowSize); + } + } + } + + public ResumeToken pause() { + synchronized (lock) { + if (!isClosed) { + return delegate.pause(); + } + } + return null; + } + + public void cancel() { + synchronized (lock) { + if (!isClosed) { + delegate.cancel(); + } + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptor.java new file mode 100644 index 000000000000..b5043a990374 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptor.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.utils.StringUtils; + +/** + * Interceptor that trims object metadata keys of any leading or trailing whitespace for {@code PutObject} and {@code + * CreateMultipartUpload}. This behavior is intended to provide the same functionality as in 1.x. + */ +@SdkInternalApi +public final class ObjectMetadataInterceptor implements ExecutionInterceptor { + @Override + public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttributes executionAttributes) { + SdkRequest request = context.request(); + + switch (executionAttributes.getAttribute(SdkExecutionAttribute.OPERATION_NAME)) { + case "PutObject": + return trimMetadataNames((PutObjectRequest) request); + case "CreateMultipartUpload": + return trimMetadataNames((CreateMultipartUploadRequest) request); + default: + return request; + } + } + + private PutObjectRequest trimMetadataNames(PutObjectRequest putObjectRequest) { + if (!putObjectRequest.hasMetadata()) { + return putObjectRequest; + } + + return putObjectRequest.toBuilder() + .metadata(trimKeys(putObjectRequest.metadata())) + .build(); + } + + private CreateMultipartUploadRequest trimMetadataNames(CreateMultipartUploadRequest createMultipartUploadRequest) { + if (!createMultipartUploadRequest.hasMetadata()) { + return createMultipartUploadRequest; + } + + return createMultipartUploadRequest.toBuilder() + .metadata(trimKeys(createMultipartUploadRequest.metadata())) + .build(); + } + + private Map trimKeys(Map map) { + return map.entrySet().stream() + .collect(Collectors.toMap(e -> StringUtils.trim(e.getKey()), Map.Entry::getValue)); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/S3ExpressChecksumInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/S3ExpressChecksumInterceptor.java index 2cd3401f7487..c303ba248fe5 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/S3ExpressChecksumInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/S3ExpressChecksumInterceptor.java @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.stream.Stream; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.checksums.DefaultChecksumAlgorithm; import software.amazon.awssdk.checksums.spi.ChecksumAlgorithm; @@ -98,8 +97,10 @@ public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttribut } private boolean requestContainsUserCalculatedChecksum(SdkRequest request) { - return Stream.of("ChecksumCRC32", "ChecksumCRC32C", "ChecksumSHA1", "ChecksumSHA256") - .anyMatch(s -> request.getValueForField(s, String.class).isPresent()); + return request.getValueForField("ChecksumCRC32", String.class).isPresent() + || request.getValueForField("ChecksumCRC32C", String.class).isPresent() + || request.getValueForField("ChecksumSHA1", String.class).isPresent() + || request.getValueForField("ChecksumSHA256", String.class).isPresent(); } private boolean shouldAlwaysAddChecksum(ChecksumSpecs checksumSpecs, SdkRequest request) { diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CancelledSubscriber.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CancelledSubscriber.java new file mode 100644 index 000000000000..a9a010d48983 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CancelledSubscriber.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class CancelledSubscriber implements Subscriber { + + @Override + public void onSubscribe(Subscription subscription) { + if (subscription == null) { + throw new NullPointerException("Null subscription"); + } else { + subscription.cancel(); + } + } + + @Override + public void onNext(T t) { + } + + @Override + public void onError(Throwable error) { + if (error == null) { + throw new NullPointerException("Null error published"); + } + } + + @Override + public void onComplete() { + } +} \ No newline at end of file diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java index 1d251ad69678..1906408a59b4 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java @@ -15,13 +15,13 @@ package software.amazon.awssdk.services.s3.internal.multipart; +import static software.amazon.awssdk.services.s3.internal.multipart.SdkPojoConversionUtils.toCompleteMultipartUploadRequest; + import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.IntStream; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkException; @@ -29,8 +29,8 @@ import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Request; import software.amazon.awssdk.services.s3.model.S3Response; import software.amazon.awssdk.utils.Logger; @@ -81,28 +81,13 @@ public int determinePartCount(long contentLength, long partSize) { } public CompletableFuture completeMultipartUpload( - RequestT request, String uploadId, CompletedPart[] parts) { + PutObjectRequest request, String uploadId, CompletedPart[] parts) { log.debug(() -> String.format("Sending completeMultipartUploadRequest, uploadId: %s", uploadId)); - CompleteMultipartUploadRequest completeMultipartUploadRequest = - CompleteMultipartUploadRequest.builder() - .bucket(request.getValueForField("Bucket", String.class).get()) - .key(request.getValueForField("Key", String.class).get()) - .uploadId(uploadId) - .multipartUpload(CompletedMultipartUpload.builder() - .parts(parts) - .build()) - .build(); - return s3AsyncClient.completeMultipartUpload(completeMultipartUploadRequest); - } - public CompletableFuture completeMultipartUpload( - RequestT request, String uploadId, AtomicReferenceArray completedParts) { - CompletedPart[] parts = - IntStream.range(0, completedParts.length()) - .mapToObj(completedParts::get) - .toArray(CompletedPart[]::new); - return completeMultipartUpload(request, uploadId, parts); + CompleteMultipartUploadRequest completeMultipartUploadRequest = toCompleteMultipartUploadRequest(request, uploadId, + parts); + return s3AsyncClient.completeMultipartUpload(completeMultipartUploadRequest); } public BiFunction handleExceptionOrResponse( diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriber.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriber.java new file mode 100644 index 000000000000..59be53e13642 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriber.java @@ -0,0 +1,214 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.NumericUtils; +import software.amazon.awssdk.utils.Pair; + +@SdkInternalApi +public class KnownContentLengthAsyncRequestBodySubscriber implements Subscriber { + + private static final Logger log = Logger.loggerFor(KnownContentLengthAsyncRequestBodySubscriber.class); + + /** + * The number of AsyncRequestBody has been received but yet to be processed + */ + private final AtomicInteger asyncRequestBodyInFlight = new AtomicInteger(0); + private final AtomicBoolean failureActionInitiated = new AtomicBoolean(false); + private final AtomicInteger partNumber = new AtomicInteger(1); + private final MultipartUploadHelper multipartUploadHelper; + private final long partSize; + private final int partCount; + private final int numExistingParts; + private final String uploadId; + private final Collection> futures = new ConcurrentLinkedQueue<>(); + private final PutObjectRequest putObjectRequest; + private final CompletableFuture returnFuture; + private final Map completedParts; + private final Map existingParts; + private Subscription subscription; + private volatile boolean isDone; + private volatile boolean isPaused; + private volatile CompletableFuture completeMpuFuture; + + KnownContentLengthAsyncRequestBodySubscriber(MpuRequestContext mpuRequestContext, + CompletableFuture returnFuture, + MultipartUploadHelper multipartUploadHelper) { + this.partSize = mpuRequestContext.partSize(); + this.partCount = determinePartCount(mpuRequestContext.contentLength(), partSize); + this.putObjectRequest = mpuRequestContext.request().left(); + this.returnFuture = returnFuture; + this.uploadId = mpuRequestContext.uploadId(); + this.existingParts = mpuRequestContext.existingParts(); + this.numExistingParts = NumericUtils.saturatedCast(mpuRequestContext.numPartsCompleted()); + this.completedParts = new ConcurrentHashMap<>(); + this.multipartUploadHelper = multipartUploadHelper; + } + + private int determinePartCount(long contentLength, long partSize) { + return (int) Math.ceil(contentLength / (double) partSize); + } + + public S3ResumeToken pause() { + isPaused = true; + + if (completeMpuFuture != null && completeMpuFuture.isDone()) { + return null; + } + + if (completeMpuFuture != null && !completeMpuFuture.isDone()) { + completeMpuFuture.cancel(true); + } + + long numPartsCompleted = 0; + for (CompletableFuture cf : futures) { + if (!cf.isDone()) { + cf.cancel(true); + } else { + numPartsCompleted++; + } + } + + return S3ResumeToken.builder() + .uploadId(uploadId) + .partSize(partSize) + .totalNumParts((long) partCount) + .numPartsCompleted(numPartsCompleted + numExistingParts) + .build(); + } + + @Override + public void onSubscribe(Subscription s) { + if (this.subscription != null) { + log.warn(() -> "The subscriber has already been subscribed. Cancelling the incoming subscription"); + subscription.cancel(); + return; + } + this.subscription = s; + s.request(1); + returnFuture.whenComplete((r, t) -> { + if (t != null) { + s.cancel(); + if (shouldFailRequest()) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } + } + }); + } + + @Override + public void onNext(AsyncRequestBody asyncRequestBody) { + if (isPaused) { + return; + } + + if (existingParts.containsKey(partNumber.get())) { + partNumber.getAndIncrement(); + asyncRequestBody.subscribe(new CancelledSubscriber<>()); + subscription.request(1); + return; + } + + asyncRequestBodyInFlight.incrementAndGet(); + UploadPartRequest uploadRequest = SdkPojoConversionUtils.toUploadPartRequest(putObjectRequest, + partNumber.getAndIncrement(), + uploadId); + + Consumer completedPartConsumer = + completedPart -> completedParts.put(completedPart.partNumber(), completedPart); + multipartUploadHelper.sendIndividualUploadPartRequest(uploadId, completedPartConsumer, futures, + Pair.of(uploadRequest, asyncRequestBody)) + .whenComplete((r, t) -> { + if (t != null) { + if (shouldFailRequest()) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, + putObjectRequest); + } + } else { + completeMultipartUploadIfFinished(asyncRequestBodyInFlight.decrementAndGet()); + } + }); + subscription.request(1); + } + + private boolean shouldFailRequest() { + return failureActionInitiated.compareAndSet(false, true) && !isPaused; + } + + @Override + public void onError(Throwable t) { + log.debug(() -> "Received onError ", t); + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } + } + + @Override + public void onComplete() { + log.debug(() -> "Received onComplete()"); + isDone = true; + if (!isPaused) { + completeMultipartUploadIfFinished(asyncRequestBodyInFlight.get()); + } + } + + private void completeMultipartUploadIfFinished(int requestsInFlight) { + if (isDone && requestsInFlight == 0) { + CompletedPart[] parts; + if (existingParts.isEmpty()) { + parts = completedParts.values().toArray(new CompletedPart[0]); + } else if (!completedParts.isEmpty()) { + // List of CompletedParts needs to be in ascending order + parts = mergeCompletedParts(); + } else { + parts = existingParts.values().toArray(new CompletedPart[0]); + } + completeMpuFuture = multipartUploadHelper.completeMultipartUpload(returnFuture, uploadId, parts, + putObjectRequest); + } + } + + private CompletedPart[] mergeCompletedParts() { + CompletedPart[] merged = new CompletedPart[partCount]; + int currPart = 1; + while (currPart < partCount + 1) { + CompletedPart completedPart = existingParts.containsKey(currPart) ? existingParts.get(currPart) : + completedParts.get(currPart); + merged[currPart - 1] = completedPart; + currPart++; + } + return merged; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContext.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContext.java new file mode 100644 index 000000000000..6c4b978e4183 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContext.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.utils.Pair; + +@SdkInternalApi +public class MpuRequestContext { + + private final Pair request; + private final Long contentLength; + private final Long partSize; + private final Long numPartsCompleted; + private final String uploadId; + private final Map existingParts; + + protected MpuRequestContext(Builder builder) { + this.request = builder.request; + this.contentLength = builder.contentLength; + this.partSize = builder.partSize; + this.uploadId = builder.uploadId; + this.existingParts = builder.existingParts; + this.numPartsCompleted = builder.numPartsCompleted; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MpuRequestContext that = (MpuRequestContext) o; + + return Objects.equals(request, that.request) && Objects.equals(contentLength, that.contentLength) + && Objects.equals(partSize, that.partSize) && Objects.equals(numPartsCompleted, that.numPartsCompleted) + && Objects.equals(uploadId, that.uploadId) && Objects.equals(existingParts, that.existingParts); + } + + @Override + public int hashCode() { + int result = request != null ? request.hashCode() : 0; + result = 31 * result + (uploadId != null ? uploadId.hashCode() : 0); + result = 31 * result + (existingParts != null ? existingParts.hashCode() : 0); + result = 31 * result + (contentLength != null ? contentLength.hashCode() : 0); + result = 31 * result + (partSize != null ? partSize.hashCode() : 0); + result = 31 * result + (numPartsCompleted != null ? numPartsCompleted.hashCode() : 0); + return result; + } + + public Pair request() { + return request; + } + + public Long contentLength() { + return contentLength; + } + + public Long partSize() { + return partSize; + } + + public Long numPartsCompleted() { + return numPartsCompleted; + } + + public String uploadId() { + return uploadId; + } + + public Map existingParts() { + return existingParts != null ? Collections.unmodifiableMap(existingParts) : null; + } + + public static final class Builder { + private Pair request; + private Long contentLength; + private Long partSize; + private Long numPartsCompleted; + private String uploadId; + private Map existingParts; + + private Builder() { + } + + public Builder request(Pair request) { + this.request = request; + return this; + } + + public Builder contentLength(Long contentLength) { + this.contentLength = contentLength; + return this; + } + + public Builder partSize(Long partSize) { + this.partSize = partSize; + return this; + } + + public Builder numPartsCompleted(Long numPartsCompleted) { + this.numPartsCompleted = numPartsCompleted; + return this; + } + + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + public Builder existingParts(Map existingParts) { + this.existingParts = existingParts; + return this; + } + + public MpuRequestContext build() { + return new MpuRequestContext(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java index 8f2f8cffb51d..99ce7691d951 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java @@ -18,15 +18,22 @@ import java.util.concurrent.CompletableFuture; import java.util.function.Function; +import java.util.stream.Stream; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.SplittingTransformerConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.async.SplitAsyncResponseTransformer; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.services.s3.DelegatingS3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.internal.UserAgentUtils; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.services.s3.model.ChecksumMode; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.CopyObjectResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -38,8 +45,9 @@ import software.amazon.awssdk.utils.Validate; /** - * An {@link S3AsyncClient} that automatically converts put, copy requests to their respective multipart call. Note: get is not - * yet supported. + * An {@link S3AsyncClient} that automatically converts PUT, COPY requests to their respective multipart call. CRC32 will be + * enabled for the PUT and COPY requests, unless the the checksum is specified or checksum validation is disabled. + * Note: GET is not yet supported. * * @see MultipartConfiguration */ @@ -66,9 +74,40 @@ private MultipartS3AsyncClient(S3AsyncClient delegate, MultipartConfiguration mu @Override public CompletableFuture putObject(PutObjectRequest putObjectRequest, AsyncRequestBody requestBody) { + if (shouldEnableCrc32(putObjectRequest)) { + putObjectRequest = putObjectRequest.toBuilder().checksumAlgorithm(ChecksumAlgorithm.CRC32).build(); + } + return mpuHelper.uploadObject(putObjectRequest, requestBody); } + private boolean shouldEnableCrc32(PutObjectRequest putObjectRequest) { + return !checksumSetOnRequest(putObjectRequest) && checksumEnabledPerConfig(putObjectRequest); + } + + private boolean checksumSetOnRequest(PutObjectRequest putObjectRequest) { + if (putObjectRequest.checksumAlgorithm() != null) { + return true; + } + + return Stream.of("ChecksumCRC32", "ChecksumCRC32C", "ChecksumSHA1", "ChecksumSHA256") + .anyMatch(s -> putObjectRequest.getValueForField(s, String.class).isPresent()); + } + + private boolean checksumEnabledPerConfig(PutObjectRequest putObjectRequest) { + ExecutionAttributes executionAttributes = + putObjectRequest.overrideConfiguration().map(RequestOverrideConfiguration::executionAttributes).orElse(null); + + if (executionAttributes == null) { + return true; + } + + S3Configuration serviceConfiguration = + (S3Configuration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG); + + return serviceConfiguration == null || serviceConfiguration.checksumValidationEnabled(); + } + @Override public CompletableFuture copyObject(CopyObjectRequest copyObjectRequest) { return copyObjectHelper.copyObject(copyObjectRequest); @@ -77,6 +116,7 @@ public CompletableFuture copyObject(CopyObjectRequest copyOb @Override public CompletableFuture getObject( GetObjectRequest getObjectRequest, AsyncResponseTransformer asyncResponseTransformer) { + getObjectRequest = getObjectRequest.toBuilder().checksumMode(ChecksumMode.ENABLED).build(); SplitAsyncResponseTransformer split = asyncResponseTransformer.split(SplittingTransformerConfiguration.builder().bufferSize(apiCallBufferSize).build()); split.publisher().subscribe(new MultipartDownloaderSubscriber((S3AsyncClient) delegate(), getObjectRequest)); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java index 9754d284f5b9..c5bb7fe286cc 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java @@ -24,6 +24,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; @@ -74,20 +75,20 @@ CompletableFuture createMultipartUpload(PutObject return createMultipartUploadFuture; } - void completeMultipartUpload(CompletableFuture returnFuture, + CompletableFuture completeMultipartUpload(CompletableFuture returnFuture, String uploadId, CompletedPart[] completedParts, PutObjectRequest putObjectRequest) { - genericMultipartHelper.completeMultipartUpload(putObjectRequest, - uploadId, - completedParts) - .handle(genericMultipartHelper.handleExceptionOrResponse(putObjectRequest, returnFuture, - uploadId)) - .exceptionally(throwable -> { - genericMultipartHelper.handleException(returnFuture, () -> "Unexpected exception occurred", - throwable); - return null; - }); + CompletableFuture future = + genericMultipartHelper.completeMultipartUpload(putObjectRequest, uploadId, completedParts); + + future.handle(genericMultipartHelper.handleExceptionOrResponse(putObjectRequest, returnFuture, uploadId)) + .exceptionally(throwable -> { + genericMultipartHelper.handleException(returnFuture, () -> "Unexpected exception occurred", throwable); + return null; + }); + + return future; } CompletableFuture sendIndividualUploadPartRequest(String uploadId, diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/PausableUpload.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/PausableUpload.java new file mode 100644 index 000000000000..2e0d1885d432 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/PausableUpload.java @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; + +@SdkProtectedApi +public interface PausableUpload { + + default S3ResumeToken pause() { + throw new UnsupportedOperationException(); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java index 25fde18cadaf..bff5d389e1e9 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; @@ -33,6 +34,8 @@ import software.amazon.awssdk.services.s3.model.CopyPartResult; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.ListPartsRequest; +import software.amazon.awssdk.services.s3.model.Part; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; @@ -56,12 +59,17 @@ private SdkPojoConversionUtils() { public static UploadPartRequest toUploadPartRequest(PutObjectRequest putObjectRequest, int partNumber, String uploadId) { UploadPartRequest.Builder builder = UploadPartRequest.builder(); - setSdkFields(builder, putObjectRequest, PUT_OBJECT_REQUEST_TO_UPLOAD_PART_FIELDS_TO_IGNORE); - return builder.uploadId(uploadId).partNumber(partNumber).build(); } + public static CompleteMultipartUploadRequest toCompleteMultipartUploadRequest(PutObjectRequest putObjectRequest, + String uploadId, CompletedPart[] parts) { + CompleteMultipartUploadRequest.Builder builder = CompleteMultipartUploadRequest.builder(); + setSdkFields(builder, putObjectRequest); + return builder.uploadId(uploadId).multipartUpload(c -> c.parts(parts)).build(); + } + public static CreateMultipartUploadRequest toCreateMultipartUploadRequest(PutObjectRequest putObjectRequest) { CreateMultipartUploadRequest.Builder builder = CreateMultipartUploadRequest.builder(); @@ -101,6 +109,18 @@ public static CompletedPart toCompletedPart(UploadPartResponse partResponse, int return builder.partNumber(partNumber).build(); } + public static CompletedPart toCompletedPart(Part part) { + CompletedPart.Builder builder = CompletedPart.builder(); + setSdkFields(builder, part); + return builder.build(); + } + + public static ListPartsRequest toListPartsRequest(String uploadId, PutObjectRequest putObjectRequest) { + ListPartsRequest.Builder builder = ListPartsRequest.builder(); + setSdkFields(builder, putObjectRequest); + return builder.uploadId(uploadId).build(); + } + private static void setSdkFields(SdkPojo targetBuilder, SdkPojo sourceObject) { setSdkFields(targetBuilder, sourceObject, new HashSet<>()); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java index 46caefca8d61..9cb1aa62a100 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java @@ -15,25 +15,25 @@ package software.amazon.awssdk.services.s3.internal.multipart; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.PAUSE_OBSERVABLE; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.RESUME_TOKEN; -import java.util.Collection; +import java.util.Map; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.function.Consumer; -import java.util.stream.IntStream; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; +import java.util.concurrent.ConcurrentHashMap; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.ListPartsRequest; +import software.amazon.awssdk.services.s3.model.Part; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; +import software.amazon.awssdk.services.s3.paginators.ListPartsPublisher; +import software.amazon.awssdk.utils.CompletableFutureUtils; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Pair; @@ -47,7 +47,6 @@ public final class UploadWithKnownContentLengthHelper { private final S3AsyncClient s3AsyncClient; private final long partSizeInBytes; private final GenericMultipartHelper genericMultipartHelper; - private final long maxMemoryUsageInBytes; private final long multipartUploadThresholdInBytes; private final MultipartUploadHelper multipartUploadHelper; @@ -71,7 +70,6 @@ public CompletableFuture uploadObject(PutObjectRequest putObj AsyncRequestBody asyncRequestBody, long contentLength) { CompletableFuture returnFuture = new CompletableFuture<>(); - try { if (contentLength > multipartUploadThresholdInBytes && contentLength > partSizeInBytes) { log.debug(() -> "Starting the upload as multipart upload request"); @@ -80,7 +78,6 @@ public CompletableFuture uploadObject(PutObjectRequest putObj log.debug(() -> "Starting the upload as a single upload part request"); multipartUploadHelper.uploadInOneChunk(putObjectRequest, asyncRequestBody, returnFuture); } - } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); } @@ -90,7 +87,21 @@ public CompletableFuture uploadObject(PutObjectRequest putObj private void uploadInParts(PutObjectRequest putObjectRequest, long contentLength, AsyncRequestBody asyncRequestBody, CompletableFuture returnFuture) { + S3ResumeToken resumeToken = putObjectRequest.overrideConfiguration() + .map(c -> c.executionAttributes() + .getAttribute(RESUME_TOKEN)).orElse(null); + + if (resumeToken == null) { + initiateNewUpload(putObjectRequest, contentLength, asyncRequestBody, returnFuture); + } else { + ResumeRequestContext resumeRequestContext = new ResumeRequestContext(resumeToken, putObjectRequest, contentLength, + asyncRequestBody, returnFuture); + resumePausedUpload(resumeRequestContext); + } + } + private void initiateNewUpload(PutObjectRequest putObjectRequest, long contentLength, AsyncRequestBody asyncRequestBody, + CompletableFuture returnFuture) { CompletableFuture createMultipartUploadFuture = multipartUploadHelper.createMultipartUpload(putObjectRequest, returnFuture); @@ -99,160 +110,133 @@ private void uploadInParts(PutObjectRequest putObjectRequest, long contentLength genericMultipartHelper.handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); } else { log.debug(() -> "Initiated a new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); - doUploadInParts(Pair.of(putObjectRequest, asyncRequestBody), contentLength, returnFuture, - createMultipartUploadResponse.uploadId()); + uploadFromBeginning(Pair.of(putObjectRequest, asyncRequestBody), contentLength, returnFuture, + createMultipartUploadResponse.uploadId()); } }); } - private void doUploadInParts(Pair request, - long contentLength, - CompletableFuture returnFuture, - String uploadId) { + private void uploadFromBeginning(Pair request, long contentLength, + CompletableFuture returnFuture, String uploadId) { - long optimalPartSize = genericMultipartHelper.calculateOptimalPartSizeFor(contentLength, partSizeInBytes); - int partCount = genericMultipartHelper.determinePartCount(contentLength, optimalPartSize); - if (optimalPartSize > partSizeInBytes) { - log.debug(() -> String.format("Configured partSize is %d, but using %d to prevent reaching maximum number of parts " - + "allowed", partSizeInBytes, optimalPartSize)); + long numPartsCompleted = 0; + long partSize = genericMultipartHelper.calculateOptimalPartSizeFor(contentLength, partSizeInBytes); + int partCount = genericMultipartHelper.determinePartCount(contentLength, partSize); + + if (partSize > partSizeInBytes) { + log.debug(() -> String.format("Configured partSize is %d, but using %d to prevent reaching maximum number of " + + "parts allowed", partSizeInBytes, partSize)); } log.debug(() -> String.format("Starting multipart upload with partCount: %d, optimalPartSize: %d", partCount, - optimalPartSize)); + partSize)); + + MpuRequestContext mpuRequestContext = MpuRequestContext.builder() + .request(request) + .contentLength(contentLength) + .partSize(partSize) + .uploadId(uploadId) + .existingParts(new ConcurrentHashMap<>()) + .numPartsCompleted(numPartsCompleted) + .build(); + + splitAndSubscribe(mpuRequestContext, returnFuture); + } - MpuRequestContext mpuRequestContext = new MpuRequestContext(request, contentLength, optimalPartSize, uploadId); + private void resumePausedUpload(ResumeRequestContext resumeContext) { + S3ResumeToken resumeToken = resumeContext.resumeToken; + String uploadId = resumeToken.uploadId(); + PutObjectRequest putObjectRequest = resumeContext.putObjectRequest; + Map existingParts = new ConcurrentHashMap<>(); + CompletableFuture listPartsFuture = identifyExistingPartsForResume(uploadId, putObjectRequest, existingParts); - request.right() - .split(b -> b.chunkSizeInBytes(mpuRequestContext.partSize) - .bufferSizeInBytes(maxMemoryUsageInBytes)) - .subscribe(new KnownContentLengthAsyncRequestBodySubscriber(mpuRequestContext, - returnFuture)); - } + int remainingParts = (int) (resumeToken.totalNumParts() - resumeToken.numPartsCompleted()); + log.debug(() -> String.format("Resuming a paused multipart upload, uploadId: %s, completedPartCount: %d, " + + "remainingPartCount: %d, partSize: %d", + uploadId, resumeToken.numPartsCompleted(), remainingParts, resumeToken.partSize())); - private static final class MpuRequestContext { - private final Pair request; - private final long contentLength; - private final long partSize; + CompletableFutureUtils.forwardExceptionTo(resumeContext.returnFuture, listPartsFuture); - private final String uploadId; + listPartsFuture.whenComplete((r, t) -> { + if (t != null) { + genericMultipartHelper.handleException(resumeContext.returnFuture, + () -> "Failed to resume because listParts failed", t); + return; + } - private MpuRequestContext(Pair request, - long contentLength, - long partSize, - String uploadId) { - this.request = request; - this.contentLength = contentLength; - this.partSize = partSize; - this.uploadId = uploadId; - } + Pair request = Pair.of(putObjectRequest, resumeContext.asyncRequestBody); + MpuRequestContext mpuRequestContext = MpuRequestContext.builder() + .request(request) + .contentLength(resumeContext.contentLength) + .partSize(resumeToken.partSize()) + .uploadId(uploadId) + .existingParts(existingParts) + .numPartsCompleted(resumeToken.numPartsCompleted()) + .build(); + + splitAndSubscribe(mpuRequestContext, resumeContext.returnFuture); + }); } - private class KnownContentLengthAsyncRequestBodySubscriber implements Subscriber { - - /** - * The number of AsyncRequestBody has been received but yet to be processed - */ - private final AtomicInteger asyncRequestBodyInFlight = new AtomicInteger(0); + private void splitAndSubscribe(MpuRequestContext mpuRequestContext, CompletableFuture returnFuture) { + KnownContentLengthAsyncRequestBodySubscriber subscriber = + new KnownContentLengthAsyncRequestBodySubscriber(mpuRequestContext, returnFuture, multipartUploadHelper); - /** - * Indicates whether CompleteMultipart has been initiated or not. - */ - private final AtomicBoolean completedMultipartInitiated = new AtomicBoolean(false); + attachSubscriberToObservable(subscriber, mpuRequestContext.request().left()); - private final AtomicBoolean failureActionInitiated = new AtomicBoolean(false); + mpuRequestContext.request().right() + .split(b -> b.chunkSizeInBytes(mpuRequestContext.partSize()) + .bufferSizeInBytes(maxMemoryUsageInBytes)) + .subscribe(subscriber); + } - private final AtomicInteger partNumber = new AtomicInteger(1); + private CompletableFuture identifyExistingPartsForResume(String uploadId, PutObjectRequest putObjectRequest, + Map existingParts) { + ListPartsRequest request = SdkPojoConversionUtils.toListPartsRequest(uploadId, putObjectRequest); + ListPartsPublisher listPartsPublisher = s3AsyncClient.listPartsPaginator(request); + SdkPublisher partsPublisher = listPartsPublisher.parts(); + return partsPublisher.subscribe(part -> + existingParts.put(part.partNumber(), SdkPojoConversionUtils.toCompletedPart(part))); + } - private final AtomicReferenceArray completedParts; - private final String uploadId; - private final Collection> futures = new ConcurrentLinkedQueue<>(); + private void attachSubscriberToObservable(KnownContentLengthAsyncRequestBodySubscriber subscriber, + PutObjectRequest putObjectRequest) { + // observable will be present if TransferManager is used + putObjectRequest.overrideConfiguration().map(c -> c.executionAttributes().getAttribute(PAUSE_OBSERVABLE)) + .ifPresent(p -> p.setPausableUpload(new DefaultPausableUpload(subscriber))); + } + private static final class ResumeRequestContext { + private final S3ResumeToken resumeToken; private final PutObjectRequest putObjectRequest; + private final long contentLength; + private final AsyncRequestBody asyncRequestBody; private final CompletableFuture returnFuture; - private Subscription subscription; - - private volatile boolean isDone; - KnownContentLengthAsyncRequestBodySubscriber(MpuRequestContext mpuRequestContext, - CompletableFuture returnFuture) { - long optimalPartSize = genericMultipartHelper.calculateOptimalPartSizeFor(mpuRequestContext.contentLength, - partSizeInBytes); - int partCount = genericMultipartHelper.determinePartCount(mpuRequestContext.contentLength, optimalPartSize); - this.putObjectRequest = mpuRequestContext.request.left(); + private ResumeRequestContext(S3ResumeToken resumeToken, + PutObjectRequest putObjectRequest, + long contentLength, + AsyncRequestBody asyncRequestBody, + CompletableFuture returnFuture) { + this.resumeToken = resumeToken; + this.putObjectRequest = putObjectRequest; + this.contentLength = contentLength; + this.asyncRequestBody = asyncRequestBody; this.returnFuture = returnFuture; - this.completedParts = new AtomicReferenceArray<>(partCount); - this.uploadId = mpuRequestContext.uploadId; } + } - @Override - public void onSubscribe(Subscription s) { - if (this.subscription != null) { - log.warn(() -> "The subscriber has already been subscribed. Cancelling the incoming subscription"); - subscription.cancel(); - return; - } - this.subscription = s; - s.request(1); - returnFuture.whenComplete((r, t) -> { - if (t != null) { - s.cancel(); - if (failureActionInitiated.compareAndSet(false, true)) { - multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); - } - } - }); - } + private static final class DefaultPausableUpload implements PausableUpload { - @Override - public void onNext(AsyncRequestBody asyncRequestBody) { - log.trace(() -> "Received asyncRequestBody " + asyncRequestBody.contentLength()); - asyncRequestBodyInFlight.incrementAndGet(); - UploadPartRequest uploadRequest = - SdkPojoConversionUtils.toUploadPartRequest(putObjectRequest, - partNumber.getAndIncrement(), - uploadId); - - Consumer completedPartConsumer = completedPart -> completedParts.set(completedPart.partNumber() - 1, - completedPart); - multipartUploadHelper.sendIndividualUploadPartRequest(uploadId, completedPartConsumer, futures, - Pair.of(uploadRequest, asyncRequestBody)) - .whenComplete((r, t) -> { - if (t != null) { - if (failureActionInitiated.compareAndSet(false, true)) { - multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, - putObjectRequest); - } - } else { - completeMultipartUploadIfFinish(asyncRequestBodyInFlight.decrementAndGet()); - } - }); - subscription.request(1); - } + private KnownContentLengthAsyncRequestBodySubscriber subscriber; - @Override - public void onError(Throwable t) { - log.debug(() -> "Received onError ", t); - if (failureActionInitiated.compareAndSet(false, true)) { - multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); - } + private DefaultPausableUpload(KnownContentLengthAsyncRequestBodySubscriber subscriber) { + this.subscriber = subscriber; } @Override - public void onComplete() { - log.debug(() -> "Received onComplete()"); - isDone = true; - completeMultipartUploadIfFinish(asyncRequestBodyInFlight.get()); + public S3ResumeToken pause() { + return subscriber.pause(); } - - private void completeMultipartUploadIfFinish(int requestsInFlight) { - if (isDone && requestsInFlight == 0 && completedMultipartInitiated.compareAndSet(false, true)) { - CompletedPart[] parts = - IntStream.range(0, completedParts.length()) - .mapToObj(completedParts::get) - .toArray(CompletedPart[]::new); - multipartUploadHelper.completeMultipartUpload(returnFuture, uploadId, parts, putObjectRequest); - } - } - } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingAuthSchemeProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingAuthSchemeProvider.java new file mode 100644 index 000000000000..c02814d5273d --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingAuthSchemeProvider.java @@ -0,0 +1,67 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.plugins; + +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4FamilyHttpSigner; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.auth.scheme.S3AuthSchemeParams; +import software.amazon.awssdk.services.s3.auth.scheme.S3AuthSchemeProvider; + +/** + * Internal plugin that sets the signer property {@link AwsV4FamilyHttpSigner#CHUNK_ENCODING_ENABLED} to {@code false}. This + * plugin is invoked by the client builder only if {@link S3Configuration#chunkedEncodingEnabled()} is set to {@code false}. + */ +@SdkInternalApi +public final class S3DisableChunkEncodingAuthSchemeProvider implements S3AuthSchemeProvider { + + private final S3AuthSchemeProvider delegate; + + private S3DisableChunkEncodingAuthSchemeProvider(S3AuthSchemeProvider delegate) { + this.delegate = delegate; + } + + public static S3DisableChunkEncodingAuthSchemeProvider create(S3AuthSchemeProvider delegate) { + return new S3DisableChunkEncodingAuthSchemeProvider(delegate); + } + + @Override + public List resolveAuthScheme(S3AuthSchemeParams authSchemeParams) { + List options = delegate.resolveAuthScheme(authSchemeParams); + List result = options; + + // Disables chunk encoding but only for PutObject or UploadPart operations. + String operation = authSchemeParams.operation(); + if ("PutObject".equals(operation) || "UploadPart".equals(operation)) { + result = new ArrayList<>(options.size()); + for (AuthSchemeOption option : options) { + String schemeId = option.schemeId(); + // We check here that the scheme id is sigV4 or sigV4a or some other in the same family. + // We don't set the overrides for non-sigV4 auth schemes. + if (schemeId.startsWith(AwsV4AuthScheme.SCHEME_ID)) { + result.add(option.toBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, false) + .build()); + } + } + } + return result; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingIfConfiguredPlugin.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingIfConfiguredPlugin.java new file mode 100644 index 000000000000..a37ccfa1c8d1 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3DisableChunkEncodingIfConfiguredPlugin.java @@ -0,0 +1,82 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.plugins; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.core.SdkServiceClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4FamilyHttpSigner; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.S3ServiceClientConfiguration; +import software.amazon.awssdk.services.s3.auth.scheme.S3AuthSchemeProvider; +import software.amazon.awssdk.utils.Logger; + +/** + * Internal plugin that uses the check if {@link S3Configuration#chunkedEncodingEnabled()} is configured and equals to + * {@code false}, if so, then it installs an instance of {@link S3DisableChunkEncodingAuthSchemeProvider} wrapping the configured + * {@link S3AuthSchemeProvider} that sets {@link AwsV4FamilyHttpSigner#CHUNK_ENCODING_ENABLED} to false. + *

    + * This pre SRA logic was implemented before using an interceptor but now requires wrapping the S3AuthSchemeProvider for it to + * work. + */ +@SdkInternalApi +public final class S3DisableChunkEncodingIfConfiguredPlugin implements SdkPlugin { + + private static final Logger LOG = Logger.loggerFor(S3DisableChunkEncodingIfConfiguredPlugin.class); + + private final boolean isServiceConfigurationPresent; + private final boolean isChunkedEncodingEnabledConfigured; + private final boolean isChunkedEncodingEnabledDisabled; + private final boolean configuresDisableChunkEncoding; + + public S3DisableChunkEncodingIfConfiguredPlugin(SdkClientConfiguration config) { + S3Configuration serviceConfiguration = + (S3Configuration) config.option(SdkClientOption.SERVICE_CONFIGURATION); + + boolean isServiceConfigurationPresent = serviceConfiguration != null; + boolean shouldAddDisableChunkEncoding = false; + boolean isChunkedEncodingEnabledConfigured = false; + boolean isChunkedEncodingEnabledDisabled = false; + boolean configuresDisableChunkEncoding = false; + if (isServiceConfigurationPresent) { + isChunkedEncodingEnabledConfigured = serviceConfiguration.toBuilder().chunkedEncodingEnabled() != null; + isChunkedEncodingEnabledDisabled = !serviceConfiguration.chunkedEncodingEnabled(); + configuresDisableChunkEncoding = isChunkedEncodingEnabledConfigured && isChunkedEncodingEnabledDisabled; + if (configuresDisableChunkEncoding) { + shouldAddDisableChunkEncoding = true; + } + } + this.configuresDisableChunkEncoding = shouldAddDisableChunkEncoding; + this.isChunkedEncodingEnabledConfigured = isChunkedEncodingEnabledConfigured; + this.isChunkedEncodingEnabledDisabled = isChunkedEncodingEnabledDisabled; + this.isServiceConfigurationPresent = isServiceConfigurationPresent; + } + + @Override + public void configureClient(SdkServiceClientConfiguration.Builder config) { + if (configuresDisableChunkEncoding) { + LOG.debug(() -> String.format("chunkedEncodingEnabled was explicitly disabled in the configuration, adding " + + "`S3DisableChunkEncodingAuthSchemeProvider` auth provider wrapper.")); + S3ServiceClientConfiguration.Builder s3Config = (S3ServiceClientConfiguration.Builder) config; + + S3AuthSchemeProvider disablingAuthSchemeProvider = + S3DisableChunkEncodingAuthSchemeProvider.create(s3Config.authSchemeProvider()); + s3Config.authSchemeProvider(disablingAuthSchemeProvider); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3OverrideAuthSchemePropertiesPlugin.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3OverrideAuthSchemePropertiesPlugin.java new file mode 100644 index 000000000000..5b5fe096eefe --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/plugins/S3OverrideAuthSchemePropertiesPlugin.java @@ -0,0 +1,221 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.plugins; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.core.SdkServiceClientConfiguration; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4FamilyHttpSigner; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.SignerProperty; +import software.amazon.awssdk.identity.spi.IdentityProperty; +import software.amazon.awssdk.services.s3.S3ServiceClientConfiguration; +import software.amazon.awssdk.services.s3.auth.scheme.S3AuthSchemeParams; +import software.amazon.awssdk.services.s3.auth.scheme.S3AuthSchemeProvider; + +/** + * Plugin that allows override of signer and identity properties on the selected auth scheme options. The class offers static + * methods to create plugins for common cases such as enable payload signing by default. For instance, if you want + * to unconditionally enable payload signing across the board you can create the S3 client, e.g., + * + * {@snippet + * S3AsyncClient s3 = S3AsyncClient.builder() + * .region(Region.US_WEST_2) + * .credentialsProvider(CREDENTIALS) + * .httpClient(httpClient) + * .addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()) + * .build(); + * } + * + * The plugin can also be used for a particular request, e.g., + * + * {@snippet + * s3Client.putObject(PutObjectRequest.builder() + * .overrideConfiguration(c -> c.addPlugin( + * S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin())) + * .checksumAlgorithm(ChecksumAlgorithm.SHA256) + * .bucket("test").key("test").build(), RequestBody.fromBytes("abc".getBytes())); + * } + */ +@SdkProtectedApi +public final class S3OverrideAuthSchemePropertiesPlugin implements SdkPlugin { + private final Map, Object> identityProperties; + private final Map, Object> signerProperties; + private final Set operationConstraints; + + private S3OverrideAuthSchemePropertiesPlugin(Builder builder) { + if (builder.identityProperties.isEmpty()) { + this.identityProperties = Collections.emptyMap(); + } else { + this.identityProperties = Collections.unmodifiableMap(new HashMap<>(builder.identityProperties)); + } + if (builder.signerProperties.isEmpty()) { + this.signerProperties = Collections.emptyMap(); + } else { + this.signerProperties = Collections.unmodifiableMap(new HashMap<>(builder.signerProperties)); + } + if (builder.operationConstraints.isEmpty()) { + this.operationConstraints = Collections.emptySet(); + } else { + this.operationConstraints = Collections.unmodifiableSet(new HashSet<>(builder.operationConstraints)); + } + } + + @Override + public void configureClient(SdkServiceClientConfiguration.Builder config) { + if (identityProperties.isEmpty() && signerProperties.isEmpty()) { + return; + } + S3ServiceClientConfiguration.Builder s3Config = (S3ServiceClientConfiguration.Builder) config; + S3AuthSchemeProvider delegate = s3Config.authSchemeProvider(); + s3Config.authSchemeProvider(params -> { + List options = delegate.resolveAuthScheme(params); + List result = new ArrayList<>(options.size()); + for (AuthSchemeOption option : options) { + // We check here that the scheme id is sigV4 or sigV4a or some other in the same family. + // We don't set the overrides for non-sigV4 auth schemes. If the plugin was configured to + // constraint using operations then that's also checked on the call below. + if (addConfiguredProperties(option, params)) { + AuthSchemeOption.Builder builder = option.toBuilder(); + identityProperties.forEach((k, v) -> putIdentityProperty(builder, k, v)); + signerProperties.forEach((k, v) -> putSingerProperty(builder, k, v)); + result.add(builder.build()); + } else { + result.add(option); + } + } + return result; + }); + } + + @SuppressWarnings("unchecked") + private void putIdentityProperty(AuthSchemeOption.Builder builder, IdentityProperty key, Object value) { + // Safe because of Builder#putIdentityProperty + builder.putIdentityProperty((IdentityProperty) key, (T) value); + } + + @SuppressWarnings("unchecked") + private void putSingerProperty(AuthSchemeOption.Builder builder, SignerProperty key, Object value) { + // Safe because of Builder#putSignerProperty + builder.putSignerProperty((SignerProperty) key, (T) value); + } + + private boolean addConfiguredProperties(AuthSchemeOption option, S3AuthSchemeParams params) { + String schemeId = option.schemeId(); + // We check here that the scheme id is sigV4 or sigV4a or some other in the same family. + // We don't set the overrides for non-sigV4 auth schemes. + if (schemeId.startsWith(AwsV4AuthScheme.SCHEME_ID)) { + if (this.operationConstraints.isEmpty() || this.operationConstraints.contains(params.operation())) { + return true; + } + } + return false; + } + + /** + * Creates a new plugin that enables payload signing. This plugin can be used per client or by per-request. + */ + public static SdkPlugin enablePayloadSigningPlugin() { + return builder() + .payloadSigningEnabled(true) + .build(); + } + + /** + * Creates a new plugin that disables the ChunkEncoding signers property for the `UploadPart` and `PutObject` operations. + * This plugin can be used per client or by per-request. + */ + public static SdkPlugin disableChunkEncodingPlugin() { + return builder() + .chunkEncodingEnabled(false) + .addOperationConstraint("UploadPart") + .addOperationConstraint("PutObject") + .build(); + } + + /** + * Creates a new builder to configure the plugin. + */ + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private final Map, Object> identityProperties = new HashMap<>(); + private final Map, Object> signerProperties = new HashMap<>(); + private final Set operationConstraints = new HashSet<>(); + + /** + * Adds an operation constraint to use the configured properties. + */ + public Builder addOperationConstraint(String operation) { + this.operationConstraints.add(operation); + return this; + } + + /** + * Adds the provided property value as an override. + */ + public Builder putIdentityProperty(IdentityProperty key, T value) { + identityProperties.put(key, value); + return this; + } + + /** + * Adds the provided property value as an override. + */ + public Builder putSignerProperty(SignerProperty key, T value) { + signerProperties.put(key, value); + return this; + } + + /** + * Sets the {@link AwsV4FamilyHttpSigner#NORMALIZE_PATH} signing property to the given value. + */ + public Builder normalizePath(Boolean value) { + return putSignerProperty(AwsV4FamilyHttpSigner.NORMALIZE_PATH, value); + } + + /** + * Sets the {@link AwsV4FamilyHttpSigner#CHUNK_ENCODING_ENABLED} signing property to the given value. + */ + public Builder chunkEncodingEnabled(Boolean value) { + return putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, value); + } + + /** + * Sets the {@link AwsV4FamilyHttpSigner#PAYLOAD_SIGNING_ENABLED} signing property to the given value. + */ + public Builder payloadSigningEnabled(Boolean value) { + return putSignerProperty(AwsV4FamilyHttpSigner.PAYLOAD_SIGNING_ENABLED, value); + } + + /** + * Builds and returns a new plugin. + */ + public S3OverrideAuthSchemePropertiesPlugin build() { + return new S3OverrideAuthSchemePropertiesPlugin(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/s3express/S3ExpressAuthSchemeProvider.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/s3express/S3ExpressAuthSchemeProvider.java index f6fadc7d422f..24314769cb5a 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/s3express/S3ExpressAuthSchemeProvider.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/s3express/S3ExpressAuthSchemeProvider.java @@ -15,8 +15,8 @@ package software.amazon.awssdk.services.s3.internal.s3express; +import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.identity.spi.IdentityProperty; @@ -42,9 +42,13 @@ public static S3ExpressAuthSchemeProvider create(S3AuthSchemeProvider delegate) @Override public List resolveAuthScheme(S3AuthSchemeParams authSchemeParams) { List options = delegate.resolveAuthScheme(authSchemeParams); - return options.stream() - .map(option -> option.toBuilder().putIdentityProperty(BUCKET, authSchemeParams.bucket()).build()) - .collect(Collectors.toList()); + List result = new ArrayList<>(options.size()); + for (AuthSchemeOption option : options) { + result.add(option.toBuilder() + .putIdentityProperty(BUCKET, authSchemeParams.bucket()) + .build()); + } + return result; } public S3AuthSchemeProvider delegate() { diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/PauseObservable.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/PauseObservable.java new file mode 100644 index 000000000000..49886c7beeac --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/PauseObservable.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.services.s3.internal.multipart.PausableUpload; + +@SdkProtectedApi +public class PauseObservable { + + private volatile PausableUpload pausableUpload; + + public void setPausableUpload(PausableUpload pausableUpload) { + this.pausableUpload = pausableUpload; + } + + public S3ResumeToken pause() { + // single part upload or TM is not used + if (pausableUpload == null) { + return null; + } + return pausableUpload.pause(); + } + + public PausableUpload pausableUpload() { + return pausableUpload; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3PauseResumeExecutionAttribute.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3PauseResumeExecutionAttribute.java new file mode 100644 index 000000000000..3aae35725557 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3PauseResumeExecutionAttribute.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; + +@SdkProtectedApi +public final class S3PauseResumeExecutionAttribute extends SdkExecutionAttribute { + public static final ExecutionAttribute RESUME_TOKEN = new ExecutionAttribute<>("ResumeToken"); + public static final ExecutionAttribute PAUSE_OBSERVABLE = new ExecutionAttribute<>("PauseObservable"); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3ResumeToken.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3ResumeToken.java new file mode 100644 index 000000000000..2ec2223fcec5 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/S3ResumeToken.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkProtectedApi; + +@SdkProtectedApi +public class S3ResumeToken { + + private final String uploadId; + private final Long partSize; + private final Long totalNumParts; + private final Long numPartsCompleted; + + public S3ResumeToken(Builder builder) { + this.uploadId = builder.uploadId; + this.partSize = builder.partSize; + this.totalNumParts = builder.totalNumParts; + this.numPartsCompleted = builder.numPartsCompleted; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + S3ResumeToken that = (S3ResumeToken) o; + + return partSize == that.partSize && totalNumParts == that.totalNumParts && numPartsCompleted == that.numPartsCompleted + && Objects.equals(uploadId, that.uploadId); + } + + @Override + public int hashCode() { + return Objects.hashCode(uploadId); + } + + public String uploadId() { + return uploadId; + } + + public Long partSize() { + return partSize; + } + + public Long totalNumParts() { + return totalNumParts; + } + + public Long numPartsCompleted() { + return numPartsCompleted; + } + + public static final class Builder { + private String uploadId; + private Long partSize; + private Long totalNumParts; + private Long numPartsCompleted; + + private Builder() { + } + + public Builder uploadId(String uploadId) { + this.uploadId = uploadId; + return this; + } + + public Builder partSize(Long partSize) { + this.partSize = partSize; + return this; + } + + public Builder totalNumParts(Long totalNumParts) { + this.totalNumParts = totalNumParts; + return this; + } + + public Builder numPartsCompleted(Long numPartsCompleted) { + this.numPartsCompleted = numPartsCompleted; + return this; + } + + public S3ResumeToken build() { + return new S3ResumeToken(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config index 7bd3369c2b88..0f06cc755691 100644 --- a/services/s3/src/main/resources/codegen-resources/customization.config +++ b/services/s3/src/main/resources/codegen-resources/customization.config @@ -240,7 +240,7 @@ "multipartCustomization": { "multipartConfigurationClass": "software.amazon.awssdk.services.s3.multipart.MultipartConfiguration", "multipartConfigMethodDoc": "Configuration for multipart operation of this client.", - "multipartEnableMethodDoc": "Enables automatic conversion of put and copy method to their equivalent multipart operation.", + "multipartEnableMethodDoc": "Enables automatic conversion of PUT and COPY methods to their equivalent multipart operation. CRC32 checksum will be enabled for PUT, unless the checksum is specified or checksum validation is disabled.", "contextParamEnabledKey": "S3AsyncClientDecorator.MULTIPART_ENABLED_KEY", "contextParamConfigKey": "S3AsyncClientDecorator.MULTIPART_CONFIGURATION_KEY" }, @@ -248,8 +248,6 @@ "software.amazon.awssdk.services.s3.internal.handlers.StreamingRequestInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.CreateBucketInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.CreateMultipartUploadRequestInterceptor", - "software.amazon.awssdk.services.s3.internal.handlers.EnableChunkedEncodingInterceptor", - "software.amazon.awssdk.services.s3.internal.handlers.ConfigureSignerInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.DecodeUrlEncodedResponseInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.GetBucketPolicyInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.S3ExpressChecksumInterceptor", @@ -259,9 +257,10 @@ "software.amazon.awssdk.services.s3.internal.handlers.ExceptionTranslationInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.GetObjectInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.CopySourceInterceptor", - "software.amazon.awssdk.services.s3.internal.handlers.DisablePayloadSigningInterceptor" + "software.amazon.awssdk.services.s3.internal.handlers.ObjectMetadataInterceptor" ], "internalPlugins": [ + "software.amazon.awssdk.services.s3.internal.plugins.S3DisableChunkEncodingIfConfiguredPlugin(config)", "software.amazon.awssdk.services.s3.internal.s3express.S3ExpressPlugin" ], "requiredTraitValidationEnabled": true, diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/PayloadSigningDisabledTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/PayloadSigningDisabledTest.java index 0ac8d0eec1bb..35091b38875d 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/PayloadSigningDisabledTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/PayloadSigningDisabledTest.java @@ -20,11 +20,10 @@ import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; -import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.http.HttpExecuteResponse; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; @@ -33,10 +32,6 @@ */ public class PayloadSigningDisabledTest { private static final AwsCredentialsProvider CREDENTIALS = () -> AwsBasicCredentials.create("akid", "skid"); - private static final ClientOverrideConfiguration ENABLE_PAYLOAD_SIGNING_CONFIG = - ClientOverrideConfiguration.builder() - .putExecutionAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true) - .build(); @Test public void syncPayloadSigningIsDisabled() { @@ -83,7 +78,7 @@ public void syncPayloadSigningCanBeEnabled() { .region(Region.US_WEST_2) .credentialsProvider(CREDENTIALS) .httpClient(httpClient) - .overrideConfiguration(ENABLE_PAYLOAD_SIGNING_CONFIG) + .addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()) .build()) { httpClient.stubNextResponse(HttpExecuteResponse.builder() .response(SdkHttpResponse.builder().statusCode(200).build()) @@ -103,7 +98,7 @@ public void asyncPayloadSigningCanBeEnabled() { .region(Region.US_WEST_2) .credentialsProvider(CREDENTIALS) .httpClient(httpClient) - .overrideConfiguration(ENABLE_PAYLOAD_SIGNING_CONFIG) + .addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()) .build()) { httpClient.stubNextResponse(HttpExecuteResponse.builder() .response(SdkHttpResponse.builder().statusCode(200).build()) diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3SignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3SignerTest.java index 0652127a8bb2..32178fd4ec76 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3SignerTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3SignerTest.java @@ -39,7 +39,6 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumSpecs; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; @@ -47,6 +46,7 @@ import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.PutObjectRequest; @@ -69,10 +69,13 @@ private String getEndpoint() { } private S3Client getS3Client(boolean chunkedEncoding, boolean payloadSigning, URI endpoint) { + S3OverrideAuthSchemePropertiesPlugin plugin = S3OverrideAuthSchemePropertiesPlugin.builder() + .chunkEncodingEnabled(chunkedEncoding) + .payloadSigningEnabled(payloadSigning) + .build(); return S3Client.builder() + .addPlugin(plugin) .overrideConfiguration(ClientOverrideConfiguration.builder() - .putExecutionAttribute(S3SignerExecutionAttribute.ENABLE_CHUNKED_ENCODING, chunkedEncoding) - .putExecutionAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, payloadSigning) .putAdvancedOption(SdkAdvancedClientOption.SIGNER, AwsS3V4Signer.create()).build()) .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) @@ -150,7 +153,7 @@ public void headerBasedSignedPayload() { stubFor(any(urlMatching(".*")) .willReturn(response())); s3Client.putObject(PutObjectRequest.builder() - .checksumAlgorithm(ChecksumAlgorithm.SHA256) + .checksumAlgorithm(ChecksumAlgorithm.SHA256) .bucket("test").key("test").build(), RequestBody.fromBytes("abc".getBytes())); verify(putRequestedFor(anyUrl()).withHeader(CONTENT_TYPE, equalTo(Mimetype.MIMETYPE_OCTET_STREAM))); verify(putRequestedFor(anyUrl()).withHeader(CONTENT_LENGTH, equalTo("3"))); diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/crt/CrtDownloadErrorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/crt/CrtDownloadErrorTest.java index df1d717d866e..8819702cb957 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/crt/CrtDownloadErrorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/crt/CrtDownloadErrorTest.java @@ -15,65 +15,74 @@ package software.amazon.awssdk.services.s3.crt; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.head; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.client.WireMock; -import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.github.tomakehurst.wiremock.junit5.WireMockTest; import java.net.URI; import java.nio.charset.StandardCharsets; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.crt.CrtResource; import software.amazon.awssdk.crt.Log; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.S3Exception; +@WireMockTest +@Timeout(10) public class CrtDownloadErrorTest { private static final String BUCKET = "my-bucket"; private static final String KEY = "my-key"; - private static final WireMockServer WM = new WireMockServer(WireMockConfiguration.wireMockConfig().dynamicPort()); private S3AsyncClient s3; @BeforeAll - public static void setup() { - WM.start(); - // Execute this statement before constructing the SDK service client. - Log.initLoggingToStdout(Log.LogLevel.Trace); + public static void setUpBeforeAll() { + System.setProperty("aws.crt.debugnative", "true"); + Log.initLoggingToStdout(Log.LogLevel.Warn); } - @AfterAll - public static void teardown() { - WM.stop(); + @BeforeEach + public void setup(WireMockRuntimeInfo wiremock) { + s3 = S3AsyncClient.crtBuilder() + .endpointOverride(URI.create("http://localhost:" + wiremock.getHttpPort())) + .forcePathStyle(true) + .region(Region.US_EAST_1) + .build(); + } @AfterEach - public void methodTeardown() { - if (s3 != null) { - s3.close(); - } - s3 = null; + public void tearDown() { + s3.close(); } + @AfterAll + public static void verifyCrtResource() { + CrtResource.waitForNoResources(); + } + + @Test public void getObject_headObjectOk_getObjectThrows_operationThrows() { - s3 = S3AsyncClient.crtBuilder() - .endpointOverride(URI.create("http://localhost:" + WM.port())) - .forcePathStyle(true) - .region(Region.US_EAST_1) - .build(); - String path = String.format("/%s/%s", BUCKET, KEY); - WM.stubFor(WireMock.head(WireMock.urlPathEqualTo(path)) - .willReturn(WireMock.aResponse() - .withStatus(200) - .withHeader("ETag", "etag") - .withHeader("Content-Length", "5"))); + stubFor(head(urlPathEqualTo(path)) + .willReturn(WireMock.aResponse() + .withStatus(200) + .withHeader("ETag", "etag") + .withHeader("Content-Length", "5"))); String errorContent = "" + "\n" @@ -82,39 +91,35 @@ public void getObject_headObjectOk_getObjectThrows_operationThrows() { + " request-id\n" + " host-id\n" + ""; - WM.stubFor(WireMock.get(WireMock.urlPathEqualTo(path)) - .willReturn(WireMock.aResponse() - .withStatus(403) - .withBody(errorContent))); + stubFor(get(urlPathEqualTo(path)) + .willReturn(WireMock.aResponse() + .withStatus(403) + .withBody(errorContent))); assertThatThrownBy(s3.getObject(r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBytes())::join) .hasCauseInstanceOf(S3Exception.class) .hasMessageContaining("User does not have permission") .hasMessageContaining("Status Code: 403"); + + } @Test public void getObject_headObjectOk_getObjectOk_operationSucceeds() { - s3 = S3AsyncClient.crtBuilder() - .endpointOverride(URI.create("http://localhost:" + WM.port())) - .forcePathStyle(true) - .region(Region.US_EAST_1) - .build(); - String path = String.format("/%s/%s", BUCKET, KEY); byte[] content = "hello".getBytes(StandardCharsets.UTF_8); - WM.stubFor(WireMock.head(WireMock.urlPathEqualTo(path)) - .willReturn(WireMock.aResponse() - .withStatus(200) - .withHeader("ETag", "etag") - .withHeader("Content-Length", Integer.toString(content.length)))); - WM.stubFor(WireMock.get(WireMock.urlPathEqualTo(path)) - .willReturn(WireMock.aResponse() - .withStatus(200) - .withHeader("Content-Type", "text/plain") - .withBody(content))); + stubFor(head(urlPathEqualTo(path)) + .willReturn(WireMock.aResponse() + .withStatus(200) + .withHeader("ETag", "etag") + .withHeader("Content-Length", Integer.toString(content.length)))); + stubFor(get(urlPathEqualTo(path)) + .willReturn(WireMock.aResponse() + .withStatus(200) + .withHeader("Content-Type", "text/plain") + .withBody(content))); String objectContent = s3.getObject(r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBytes()) .join() @@ -125,18 +130,11 @@ public void getObject_headObjectOk_getObjectOk_operationSucceeds() { @Test public void getObject_headObjectThrows_operationThrows() { - s3 = S3AsyncClient.crtBuilder() - .endpointOverride(URI.create("http://localhost:" + WM.port())) - .forcePathStyle(true) - .region(Region.US_EAST_1) - .build(); - String path = String.format("/%s/%s", BUCKET, KEY); - - WM.stubFor(WireMock.head(WireMock.urlPathEqualTo(path)) - .willReturn(WireMock.aResponse() - .withStatus(403))); + stubFor(head(urlPathEqualTo(path)) + .willReturn(WireMock.aResponse() + .withStatus(403))); assertThatThrownBy(s3.getObject(r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBytes())::join) .hasCauseInstanceOf(S3Exception.class) diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressCreateSessionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressCreateSessionTest.java index a2751cba65f0..815736f9d1e6 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressCreateSessionTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressCreateSessionTest.java @@ -64,10 +64,12 @@ import software.amazon.awssdk.services.s3.model.Protocol; import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.http.SdkHttpUtils; @WireMockTest(httpsEnabled = true) public class S3ExpressCreateSessionTest extends BaseRuleSetClientTest { + private static final Logger log = Logger.loggerFor(S3ExpressCreateSessionTest.class); private static final Function WM_HTTP_ENDPOINT = wm -> URI.create(wm.getHttpBaseUrl()); private static final Function WM_HTTPS_ENDPOINT = wm -> URI.create(wm.getHttpsBaseUrl()); @@ -329,9 +331,8 @@ private static final class CapturingInterceptor implements ExecutionInterceptor public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { SdkHttpRequest sdkHttpRequest = context.httpRequest(); this.headers = sdkHttpRequest.headers(); - System.out.printf("%s %s%n", sdkHttpRequest.method(), sdkHttpRequest.encodedPath()); - headers.forEach((k, strings) -> System.out.printf("%s, %s%n", k, strings)); - System.out.println(); + log.debug(() -> String.format("%s %s%n", sdkHttpRequest.method(), sdkHttpRequest.encodedPath())); + headers.forEach((k, strings) -> log.debug(() -> String.format("%s, %s%n", k, strings))); } } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressTest.java index 4373a90df9d8..71f009165590 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/S3ExpressTest.java @@ -70,11 +70,12 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.http.SdkHttpUtils; @WireMockTest(httpsEnabled = true) public class S3ExpressTest extends BaseRuleSetClientTest { - + private static final Logger log = Logger.loggerFor(S3ExpressTest.class); private static final Function WM_HTTP_ENDPOINT = wm -> URI.create(wm.getHttpBaseUrl()); private static final Function WM_HTTPS_ENDPOINT = wm -> URI.create(wm.getHttpsBaseUrl()); private static final AwsCredentialsProvider CREDENTIALS_PROVIDER = @@ -207,7 +208,7 @@ private void createClientAndCallPutObject(ClientType clientType, Protocol protoc } private void createClientAndCallUploadPart(ClientType clientType, Protocol protocol, S3ExpressSessionAuth s3ExpressSessionAuth, - ChecksumAlgorithm checksumAlgorithm, WireMockRuntimeInfo wm) { + ChecksumAlgorithm checksumAlgorithm, WireMockRuntimeInfo wm) { UploadPartRequest.Builder requestBuilder = UploadPartRequest.builder().bucket(DEFAULT_BUCKET).key(DEFAULT_KEY).partNumber(0).uploadId("test"); if (checksumAlgorithm != ChecksumAlgorithm.UNKNOWN_TO_SDK_VERSION) { @@ -295,7 +296,7 @@ void verifyUploadPartHeaders(ClientType clientType, Protocol protocol, ChecksumA assertThat(headers.get("x-amz-content-sha256")).isNotNull(); if ((protocol == Protocol.HTTPS || clientType == ClientType.ASYNC) && - checksumAlgorithm == ChecksumAlgorithm.UNKNOWN_TO_SDK_VERSION) { + checksumAlgorithm == ChecksumAlgorithm.UNKNOWN_TO_SDK_VERSION) { assertThat(headers.get("x-amz-content-sha256").get(0)).isEqualToIgnoringCase("UNSIGNED-PAYLOAD"); } else { assertThat(headers.get("x-amz-decoded-content-length")).isNotNull(); @@ -431,9 +432,8 @@ private static final class CapturingInterceptor implements ExecutionInterceptor public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { SdkHttpRequest sdkHttpRequest = context.httpRequest(); this.headers = sdkHttpRequest.headers(); - System.out.printf("%s %s%n", sdkHttpRequest.method(), sdkHttpRequest.encodedPath()); - headers.forEach((k, strings) -> System.out.printf("%s, %s%n", k, strings)); - System.out.println(); + log.debug(() -> String.format("%s %s%n", sdkHttpRequest.method(), sdkHttpRequest.encodedPath())); + headers.forEach((k, strings) -> log.debug(() -> String.format("%s, %s%n", k, strings))); } } -} +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionAsyncClientTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionAsyncClientTest.java index f4139c883bd6..917214f1af67 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionAsyncClientTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionAsyncClientTest.java @@ -429,7 +429,7 @@ void given_US_EAST_1_Client_resolveToRegionalEndpoints_when_crossRegion_is_True( } @ParameterizedTest - @ValueSource(strings = {"us-east-1", "us-east-2", "us-west-1", "aws-global"}) + @ValueSource(strings = {"us-east-1", "us-east-2", "us-west-1"}) void given_AnyRegion_Client_Updates_the_useGlobalEndpointFlag_asFalse(String region) { mockAsyncHttpClient.stubResponses(successHttpResponse()); S3EndpointProvider mockEndpointProvider = Mockito.mock(S3EndpointProvider.class); @@ -450,6 +450,28 @@ void given_AnyRegion_Client_Updates_the_useGlobalEndpointFlag_asFalse(String reg }); } + @Test + void given_globalRegion_Client_Updates_region_to_useast1_and_useGlobalEndpointFlag_as_False() { + String region = Region.AWS_GLOBAL.id(); + mockAsyncHttpClient.stubResponses(successHttpResponse()); + S3EndpointProvider mockEndpointProvider = Mockito.mock(S3EndpointProvider.class); + + when(mockEndpointProvider.resolveEndpoint(ArgumentMatchers.any(S3EndpointParams.class))) + .thenReturn(CompletableFuture.completedFuture(Endpoint.builder().url(URI.create("https://bucket.s3.amazonaws.com")).build())); + + S3AsyncClient s3Client = clientBuilder().crossRegionAccessEnabled(true) + .region(Region.of(region)) + .endpointProvider(mockEndpointProvider).build(); + s3Client.getObject(r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBytes()).join(); + assertThat(captureInterceptor.endpointProvider).isInstanceOf(BucketEndpointProvider.class); + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(S3EndpointParams.class); + verify(mockEndpointProvider, atLeastOnce()).resolveEndpoint(collectionCaptor.capture()); + collectionCaptor.getAllValues().forEach(resolvedParams -> { + assertThat(resolvedParams.region()).isEqualTo(Region.US_EAST_1); + assertThat(resolvedParams.useGlobalEndpoint()).isFalse(); + }); + } + private S3AsyncClientBuilder clientBuilder() { return S3AsyncClient.builder() .httpClient(mockAsyncHttpClient) diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionSyncClientTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionSyncClientTest.java index a17ded1bdb09..6d9d45e5c431 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionSyncClientTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crossregion/S3CrossRegionSyncClientTest.java @@ -256,7 +256,7 @@ void given_US_EAST_1_Client_resolveToRegionalEndpoints_when_crossRegion_is_True( } @ParameterizedTest - @ValueSource(strings = {"us-east-1", "us-east-2", "us-west-1", "aws-global"}) + @ValueSource(strings = {"us-east-1", "us-east-2", "us-west-1"}) void given_AnyRegion_Client_Updates_the_useGlobalEndpointFlag_asFalse(String region) { mockSyncHttpClient.stubResponses(successHttpResponse()); S3EndpointProvider mockEndpointProvider = Mockito.mock(S3EndpointProvider.class); @@ -277,6 +277,28 @@ void given_AnyRegion_Client_Updates_the_useGlobalEndpointFlag_asFalse(String reg }); } + @Test + void given_globalRegion_Client_Updates_region_to_useast1_and_useGlobalEndpointFlag_as_False() { + String region = Region.AWS_GLOBAL.id(); + mockSyncHttpClient.stubResponses(successHttpResponse()); + S3EndpointProvider mockEndpointProvider = Mockito.mock(S3EndpointProvider.class); + + when(mockEndpointProvider.resolveEndpoint(ArgumentMatchers.any(S3EndpointParams.class))) + .thenReturn(CompletableFuture.completedFuture(Endpoint.builder().url(URI.create("https://bucket.s3.amazonaws.com")).build())); + + S3Client s3Client = clientBuilder().crossRegionAccessEnabled(true) + .region(Region.of(region)) + .endpointProvider(mockEndpointProvider).build(); + s3Client.getObject(getObjectBuilder().build()); + assertThat(captureInterceptor.endpointProvider).isInstanceOf(BucketEndpointProvider.class); + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(S3EndpointParams.class); + verify(mockEndpointProvider, atLeastOnce()).resolveEndpoint(collectionCaptor.capture()); + collectionCaptor.getAllValues().forEach(resolvedParams ->{ + assertThat(resolvedParams.region()).isEqualTo(Region.US_EAST_1); + assertThat(resolvedParams.useGlobalEndpoint()).isFalse(); + }); + } + private static GetObjectRequest.Builder getObjectBuilder() { return GetObjectRequest.builder() .bucket(BUCKET) diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CrtCredentialProviderAdapterTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CrtCredentialProviderAdapterTest.java index ecdaeec905a7..d4a8a1e5faf3 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CrtCredentialProviderAdapterTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CrtCredentialProviderAdapterTest.java @@ -21,6 +21,7 @@ import java.nio.charset.StandardCharsets; import java.util.concurrent.CompletableFuture; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.mockito.Mockito; import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; @@ -28,6 +29,7 @@ import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.auth.credentials.HttpCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.crt.CrtResource; import software.amazon.awssdk.crt.auth.credentials.Credentials; import software.amazon.awssdk.crt.auth.credentials.CredentialsProvider; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; @@ -36,6 +38,10 @@ public class CrtCredentialProviderAdapterTest { + @AfterAll + public static void verifyCrtResource() { + CrtResource.waitForNoResources(); + } @Test void crtCredentials_withSession_shouldConvert() { IdentityProvider awsCredentialsProvider = StaticCredentialsProvider diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClientTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClientTest.java index ee44bf18839f..c88cdd24cfc9 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClientTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClientTest.java @@ -19,6 +19,8 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.util.concurrent.atomic.AtomicReference; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -29,6 +31,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.crt.CrtResource; import software.amazon.awssdk.services.s3.DelegatingS3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.endpoints.S3ClientContextParams; @@ -37,6 +40,11 @@ class DefaultS3CrtAsyncClientTest { + @AfterAll + public static void verifyCrtResource() { + CrtResource.waitForNoResources(); + } + @Test void requestSignerOverrideProvided_shouldThrowException() { try (S3AsyncClient s3AsyncClient = S3CrtAsyncClient.builder().build()) { @@ -96,21 +104,24 @@ void invalidConfig_shouldThrowException(long value) { } @Test - void crtClient_with_crossRegionAccessEnabled_asTrue(){ - S3AsyncClient crossRegionCrtClient = S3AsyncClient.crtBuilder().crossRegionAccessEnabled(true).build(); - assertThat(crossRegionCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); - assertThat(((DelegatingS3AsyncClient)crossRegionCrtClient).delegate()).isInstanceOf(S3CrossRegionAsyncClient.class); + void crtClient_with_crossRegionAccessEnabled_asTrue() { + try (S3AsyncClient crossRegionCrtClient = S3AsyncClient.crtBuilder().crossRegionAccessEnabled(true).build()) { + assertThat(crossRegionCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); + assertThat(((DelegatingS3AsyncClient)crossRegionCrtClient).delegate()).isInstanceOf(S3CrossRegionAsyncClient.class); + } } @Test - void crtClient_with_crossRegionAccessEnabled_asFalse(){ - S3AsyncClient crossRegionDisabledCrtClient = S3AsyncClient.crtBuilder().crossRegionAccessEnabled(false).build(); - assertThat(crossRegionDisabledCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); - assertThat(((DelegatingS3AsyncClient)crossRegionDisabledCrtClient).delegate()).isNotInstanceOf(S3CrossRegionAsyncClient.class); - - S3AsyncClient defaultCrtClient = S3AsyncClient.crtBuilder().build(); - assertThat(defaultCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); - assertThat(((DelegatingS3AsyncClient)defaultCrtClient).delegate()).isNotInstanceOf(S3CrossRegionAsyncClient.class); + void crtClient_with_crossRegionAccessEnabled_asFalse() { + try (S3AsyncClient crossRegionDisabledCrtClient = S3AsyncClient.crtBuilder().crossRegionAccessEnabled(false).build()) { + assertThat(crossRegionDisabledCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); + assertThat(((DelegatingS3AsyncClient) crossRegionDisabledCrtClient).delegate()).isNotInstanceOf(S3CrossRegionAsyncClient.class); + } + + try (S3AsyncClient defaultCrtClient = S3AsyncClient.crtBuilder().build()) { + assertThat(defaultCrtClient).isInstanceOf(DefaultS3CrtAsyncClient.class); + assertThat(((DelegatingS3AsyncClient)defaultCrtClient).delegate()).isNotInstanceOf(S3CrossRegionAsyncClient.class); + } } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientGetObjectResourceManagementTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientGetObjectResourceManagementTest.java new file mode 100644 index 000000000000..fe2b1d5308bd --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientGetObjectResourceManagementTest.java @@ -0,0 +1,166 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.crt; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.head; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; + +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.github.tomakehurst.wiremock.junit5.WireMockTest; +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import org.assertj.core.util.Files; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.Log; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.testutils.RandomTempFile; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Tests to make sure all CRT resources are cleaned up for get object. + */ +@WireMockTest +@Timeout(10) +public class S3CrtClientGetObjectResourceManagementTest { + + private static final String BUCKET = "Example-Bucket"; + private static final String KEY = "Example-Object"; + private static final long PART_SIZE = 1024 * 1024 * 5L; + private S3AsyncClient s3AsyncClient; + + @BeforeAll + public static void setUpBeforeAll() { + System.setProperty("aws.crt.debugnative", "true"); + Log.initLoggingToStdout(Log.LogLevel.Warn); + } + + @BeforeEach + public void setup(WireMockRuntimeInfo wiremock) { + stubGetObjectCalls(); + s3AsyncClient = S3AsyncClient.crtBuilder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wiremock.getHttpPort())) + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("key", "secret"))) + .minimumPartSizeInBytes(PART_SIZE) + .maxConcurrency(2) + .initialReadBufferSizeInBytes(1024L) + .build(); + } + + @AfterEach + public void tearDown() { + s3AsyncClient.close(); + } + + @AfterAll + public static void verifyCrtResource() { + CrtResource.waitForNoResources(); + } + + @Test + void toBlockingInputStream_abortStream_shouldCloseResources() throws IOException { + ResponseInputStream response = s3AsyncClient.getObject( + r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBlockingInputStream()).join(); + response.read(); + response.abort(); + } + + @Test + void toBlockingInputStream_closeStream_shouldCloseResources() throws IOException { + ResponseInputStream response = s3AsyncClient.getObject( + r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBlockingInputStream()).join(); + response.read(); + response.close(); + } + + @Test + void toFile_cancelRequest_shouldCloseResource() throws IOException { + CompletableFuture future = s3AsyncClient.getObject( + r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toFile(Files.newTemporaryFile())); + future.cancel(false); + } + + @Test + void toFile_happyCase_shouldCloseResource() throws IOException { + File file = RandomTempFile.randomUncreatedFile(); + CompletableFuture future = s3AsyncClient.getObject( + r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toFile(file)); + future.join(); + } + + @Test + void toBlockingInputStream_happyCase_shouldCloseResource() throws IOException { + try (ResponseInputStream response = s3AsyncClient.getObject( + r -> r.bucket(BUCKET).key(KEY), AsyncResponseTransformer.toBlockingInputStream()).join()) { + IoUtils.drainInputStream(response); + } + } + + private static void stubGetObjectCalls() { + int numOfParts = 3; + long finalPartSize = 1024 * 1024 * 4; + long totalContentSize = PART_SIZE * (numOfParts - 1) + finalPartSize; + + stubFor(head(anyUrl()).willReturn(aResponse().withStatus(200) + .withHeader("content-length", String.valueOf(totalContentSize)) + .withHeader("etag", "1234"))); + + for (int i = 0; i < numOfParts - 1; i++) { + int partNumberIndex = i + 1; + String contentRange = "bytes " + PART_SIZE * i + "-" + (PART_SIZE * partNumberIndex - 1) + "/" + totalContentSize; + String range = "bytes=" + PART_SIZE * i + "-" + (PART_SIZE * partNumberIndex - 1); + stubFor(get(anyUrl()).withHeader("Range", equalTo(range)).willReturn(aResponse().withStatus(200) + .withHeader("content-length", + String.valueOf(PART_SIZE)) + .withHeader("Content-Range", + contentRange) + .withHeader("etag", "1234") + .withBodyFile("part" + partNumberIndex))); + } + + // final part + String contentRange = "bytes " + PART_SIZE * numOfParts + "-" + (totalContentSize - 1) + "/" + totalContentSize; + String range = "bytes=" + PART_SIZE * (numOfParts - 1) + "-" + (totalContentSize - 1); + stubFor(get(anyUrl()).withHeader("Range", equalTo(range)).willReturn(aResponse().withStatus(200) + .withHeader("content-length", String.valueOf(finalPartSize)) + .withHeader("Content-Range", + contentRange) + .withHeader("etag", "1234") + .withBodyFile("part" + (numOfParts - 1)))); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientWiremockTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientWiremockTest.java index 2f863bb42c18..2776358ae64e 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientWiremockTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtClientWiremockTest.java @@ -29,6 +29,7 @@ import com.github.tomakehurst.wiremock.junit5.WireMockTest; import java.net.URI; import java.util.concurrent.Executor; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -84,6 +85,10 @@ public void setup(WireMockRuntimeInfo wiremock) { @AfterEach public void tearDown() { s3AsyncClient.close(); + } + + @AfterAll + public static void verifyCrtResource() { CrtResource.waitForNoResources(); } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapterTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapterTest.java index dbd86d3be6d8..f20df7c9aa4a 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapterTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtResponseHandlerAdapterTest.java @@ -53,7 +53,7 @@ public class S3CrtResponseHandlerAdapterTest { private S3FinishedResponseContext context; @Mock - private S3MetaRequest s3MetaRequest; + private S3MetaRequestWrapper s3MetaRequest; private CompletableFuture future; @Before @@ -62,8 +62,8 @@ public void setup() { sdkResponseHandler = spy(new TestResponseHandler()); responseHandlerAdapter = new S3CrtResponseHandlerAdapter(future, sdkResponseHandler, - null); - responseHandlerAdapter.metaRequest(s3MetaRequest); + null, + CompletableFuture.completedFuture(s3MetaRequest)); } @Test diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapperTest.java new file mode 100644 index 000000000000..68d57ee2ba48 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3MetaRequestWrapperTest.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.crt; + +import java.util.concurrent.CompletableFuture; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import software.amazon.awssdk.crt.s3.S3MetaRequest; + +@ExtendWith(MockitoExtension.class) +public class S3MetaRequestWrapperTest { + + @Mock + private S3MetaRequest request; + + private S3MetaRequestWrapper wrapper; + + @BeforeEach + void setUp() { + wrapper = new S3MetaRequestWrapper(request); + } + + @Test + void close_concurrentCalls_onlyExecuteOnce() { + CompletableFuture.allOf(CompletableFuture.runAsync(() -> wrapper.close()), + CompletableFuture.runAsync(() -> wrapper.close())).join(); + Mockito.verify(request, Mockito.times(1)).close(); + } + + @Test + void incrementWindow_afterClose_shouldBeNoOp() { + wrapper.close(); + wrapper.incrementReadWindow(10L); + Mockito.verify(request, Mockito.times(1)).close(); + Mockito.verify(request, Mockito.never()).incrementReadWindow(Mockito.anyLong()); + } + + @Test + void pause_afterClose_shouldBeNoOp() { + wrapper.close(); + wrapper.pause(); + Mockito.verify(request, Mockito.times(1)).close(); + Mockito.verify(request, Mockito.never()).pause(); + } + + @Test + void cancel_afterClose_shouldBeNoOp() { + wrapper.close(); + wrapper.cancel(); + Mockito.verify(request, Mockito.times(1)).close(); + Mockito.verify(request, Mockito.never()).cancel(); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptorTest.java new file mode 100644 index 000000000000..a29fff164c8e --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/ObjectMetadataInterceptorTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import static java.util.Arrays.asList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class ObjectMetadataInterceptorTest { + private static final ObjectMetadataInterceptor INTERCEPTOR = new ObjectMetadataInterceptor(); + + + + public static List testCases() { + return asList( + tc(asList("a", "b", "c"), asList("a", "b", "c")), + tc(asList(" a ", "b", "c"), asList("a", "b", "c")), + tc(asList(" a", "\tb", "\tc"), asList("a", "b", "c")), + tc(asList("a\n", "\tb", "\tc\r\n"), asList("a", "b", "c")) + + ); + } + + @ParameterizedTest + @MethodSource("testCases") + public void modifyRequest_putObject_metadataKeysAreTrimmed(TestCase tc) { + Map metadata = tc.inputKeys.stream() + .collect(Collectors.toMap(k -> k, k -> "value")); + + Context.ModifyHttpRequest ctx = mock(Context.ModifyHttpRequest.class); + + PutObjectRequest put = PutObjectRequest.builder() + .metadata(metadata) + .build(); + + when(ctx.request()).thenReturn(put); + + ExecutionAttributes attrs = new ExecutionAttributes(); + attrs.putAttribute(SdkExecutionAttribute.OPERATION_NAME, "PutObject"); + + PutObjectRequest modified = (PutObjectRequest) INTERCEPTOR.modifyRequest(ctx, attrs); + + assertThat(modified.metadata().keySet()).containsExactlyElementsOf(tc.expectedKeys); + } + + @ParameterizedTest + @MethodSource("testCases") + public void modifyRequest_creatMultipartUpload_metadataKeysAreTrimmed(TestCase tc) { + Map metadata = tc.inputKeys.stream() + .collect(Collectors.toMap(k -> k, k -> "value")); + + Context.ModifyHttpRequest ctx = mock(Context.ModifyHttpRequest.class); + + CreateMultipartUploadRequest mpu = CreateMultipartUploadRequest.builder() + .metadata(metadata) + .build(); + + when(ctx.request()).thenReturn(mpu); + + ExecutionAttributes attrs = new ExecutionAttributes(); + attrs.putAttribute(SdkExecutionAttribute.OPERATION_NAME, "CreateMultipartUpload"); + + CreateMultipartUploadRequest modified = (CreateMultipartUploadRequest) INTERCEPTOR.modifyRequest(ctx, attrs); + + assertThat(modified.metadata().keySet()).containsExactlyElementsOf(tc.expectedKeys); + } + + @Test + public void modifyRequest_unknownOperation_ignores() { + Context.ModifyHttpRequest ctx = mock(Context.ModifyHttpRequest.class); + + GetObjectRequest get = GetObjectRequest.builder().build(); + + when(ctx.request()).thenReturn(get); + + ExecutionAttributes attrs = new ExecutionAttributes(); + attrs.putAttribute(SdkExecutionAttribute.OPERATION_NAME, "GetObject"); + + SdkRequest sdkRequest = INTERCEPTOR.modifyRequest(ctx, attrs); + + assertThat(sdkRequest).isEqualTo(get); + } + + private static TestCase tc(List input, List expected) { + return new TestCase(input, expected); + } + private static class TestCase { + private List inputKeys; + private List expectedKeys; + + public TestCase(List inputKeys, List expectedKeys) { + this.inputKeys = inputKeys; + this.expectedKeys = expectedKeys; + } + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriberTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriberTest.java new file mode 100644 index 000000000000..0ffbab391b3c --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/KnownContentLengthAsyncRequestBodySubscriberTest.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; +import software.amazon.awssdk.testutils.RandomTempFile; +import software.amazon.awssdk.utils.Pair; + +public class KnownContentLengthAsyncRequestBodySubscriberTest { + + // Should contain four parts: [8KB, 8KB, 8KB, 1KB] + private static final long MPU_CONTENT_SIZE = 25 * 1024; + private static final long PART_SIZE = 8 * 1024; + private static final int TOTAL_NUM_PARTS = 4; + private static final String UPLOAD_ID = "1234"; + private static RandomTempFile testFile; + private AsyncRequestBody asyncRequestBody; + private PutObjectRequest putObjectRequest; + private S3AsyncClient s3AsyncClient; + private MultipartUploadHelper multipartUploadHelper; + + @BeforeAll + public static void beforeAll() throws IOException { + testFile = new RandomTempFile("testfile.dat", MPU_CONTENT_SIZE); + } + + @AfterAll + public static void afterAll() { + testFile.delete(); + } + + @BeforeEach + public void beforeEach() { + s3AsyncClient = mock(S3AsyncClient.class); + multipartUploadHelper = mock(MultipartUploadHelper.class); + asyncRequestBody = AsyncRequestBody.fromFile(testFile); + putObjectRequest = PutObjectRequest.builder().bucket("bucket").key("key").build(); + } + + @Test + void pause_withOngoingCompleteMpuFuture_shouldReturnTokenAndCancelFuture() { + CompletableFuture completeMpuFuture = new CompletableFuture<>(); + int numExistingParts = 2; + S3ResumeToken resumeToken = configureSubscriberAndPause(numExistingParts, completeMpuFuture); + + verifyResumeToken(resumeToken, numExistingParts); + assertThat(completeMpuFuture).isCancelled(); + } + + @Test + void pause_withCompletedCompleteMpuFuture_shouldReturnNullToken() { + CompletableFuture completeMpuFuture = + CompletableFuture.completedFuture(CompleteMultipartUploadResponse.builder().build()); + int numExistingParts = 2; + S3ResumeToken resumeToken = configureSubscriberAndPause(numExistingParts, completeMpuFuture); + + assertThat(resumeToken).isNull(); + } + + @Test + void pause_withUninitiatedCompleteMpuFuture_shouldReturnToken() { + CompletableFuture completeMpuFuture = null; + int numExistingParts = 2; + S3ResumeToken resumeToken = configureSubscriberAndPause(numExistingParts, completeMpuFuture); + + verifyResumeToken(resumeToken, numExistingParts); + } + + private S3ResumeToken configureSubscriberAndPause(int numExistingParts, + CompletableFuture completeMpuFuture) { + Map existingParts = existingParts(numExistingParts); + KnownContentLengthAsyncRequestBodySubscriber subscriber = subscriber(putObjectRequest, asyncRequestBody, existingParts); + + when(multipartUploadHelper.completeMultipartUpload(any(CompletableFuture.class), any(String.class), + any(CompletedPart[].class), any(PutObjectRequest.class))).thenReturn(completeMpuFuture); + subscriber.onComplete(); + return subscriber.pause(); + } + + private KnownContentLengthAsyncRequestBodySubscriber subscriber(PutObjectRequest putObjectRequest, + AsyncRequestBody asyncRequestBody, + Map existingParts) { + + MpuRequestContext mpuRequestContext = MpuRequestContext.builder() + .request(Pair.of(putObjectRequest, asyncRequestBody)) + .contentLength(MPU_CONTENT_SIZE) + .partSize(PART_SIZE) + .uploadId(UPLOAD_ID) + .existingParts(existingParts) + .numPartsCompleted((long) existingParts.size()) + .build(); + + return new KnownContentLengthAsyncRequestBodySubscriber(mpuRequestContext, new CompletableFuture<>(), multipartUploadHelper); + } + + private Map existingParts(int numExistingParts) { + Map existingParts = new ConcurrentHashMap<>(); + for (int i = 1; i <= numExistingParts; i++) { + existingParts.put(i, CompletedPart.builder().partNumber(i).build()); + } + return existingParts; + } + + private void verifyResumeToken(S3ResumeToken s3ResumeToken, int numExistingParts) { + assertThat(s3ResumeToken).isNotNull(); + assertThat(s3ResumeToken.uploadId()).isEqualTo(UPLOAD_ID); + assertThat(s3ResumeToken.partSize()).isEqualTo(PART_SIZE); + assertThat(s3ResumeToken.totalNumParts()).isEqualTo(TOTAL_NUM_PARTS); + assertThat(s3ResumeToken.numPartsCompleted()).isEqualTo(numExistingParts); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContextTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContextTest.java new file mode 100644 index 000000000000..c858e7e8e9ec --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuRequestContextTest.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.utils.Pair; + +public class MpuRequestContextTest { + + private static final Pair REQUEST = Pair.of(PutObjectRequest.builder().build(), AsyncRequestBody.empty()); + private static final long CONTENT_LENGTH = 999; + private static final long PART_SIZE = 111; + private static final long NUM_PARTS_COMPLETED = 3; + private static final String UPLOAD_ID = "55555"; + private static final Map EXISTING_PARTS = new ConcurrentHashMap<>(); + + @Test + public void mpuRequestContext_withValues_buildsCorrectly() { + MpuRequestContext mpuRequestContext = MpuRequestContext.builder() + .request(REQUEST) + .contentLength(CONTENT_LENGTH) + .partSize(PART_SIZE) + .uploadId(UPLOAD_ID) + .existingParts(EXISTING_PARTS) + .numPartsCompleted(NUM_PARTS_COMPLETED) + .build(); + + assertThat(mpuRequestContext.request()).isEqualTo(REQUEST); + assertThat(mpuRequestContext.contentLength()).isEqualTo(CONTENT_LENGTH); + assertThat(mpuRequestContext.partSize()).isEqualTo(PART_SIZE); + assertThat(mpuRequestContext.uploadId()).isEqualTo(UPLOAD_ID); + assertThat(mpuRequestContext.existingParts()).isEqualTo(EXISTING_PARTS); + assertThat(mpuRequestContext.numPartsCompleted()).isEqualTo(NUM_PARTS_COMPLETED); + } + + @Test + public void mpuRequestContext_default_buildsCorrectly() { + MpuRequestContext mpuRequestContext = MpuRequestContext.builder().build(); + + assertThat(mpuRequestContext.request()).isNull(); + assertThat(mpuRequestContext.contentLength()).isNull(); + assertThat(mpuRequestContext.partSize()).isNull(); + assertThat(mpuRequestContext.uploadId()).isNull(); + assertThat(mpuRequestContext.existingParts()).isNull(); + assertThat(mpuRequestContext.numPartsCompleted()).isNull(); + } + + @Test + void testEqualsAndHashCodeContract() { + EqualsVerifier.forClass(MpuRequestContext.class); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java index 435d5b406189..23fe07ab2743 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java @@ -19,6 +19,9 @@ import static org.mockito.Mockito.when; import java.util.concurrent.CompletableFuture; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; @@ -26,6 +29,9 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; public final class MpuTestUtils { @@ -62,4 +68,33 @@ public static void stubSuccessfulCompleteMultipartCall(String bucket, String key when(s3AsyncClient.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) .thenReturn(completeMultipartUploadFuture); } + + public static void stubSuccessfulUploadPartCalls(S3AsyncClient s3AsyncClient) { + when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))) + .thenAnswer(new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocationOnMock) { + AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); + // Draining the request body + AsyncRequestBody.subscribe(b -> {}); + + return CompletableFuture.completedFuture(UploadPartResponse.builder() + .build()); + } + }); + } + + public static S3ResumeToken s3ResumeToken(long numPartsCompleted, long partSize, long contentLength, String uploadId) { + return S3ResumeToken.builder() + .uploadId(uploadId) + .partSize(partSize) + .numPartsCompleted(numPartsCompleted) + .totalNumParts(determinePartCount(contentLength, partSize)) + .build(); + } + + public static long determinePartCount(long contentLength, long partSize) { + return (long) Math.ceil(contentLength / (double) partSize); + } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientChecksumTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientChecksumTest.java new file mode 100644 index 000000000000..351c1a750a60 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientChecksumTest.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; + +class MultipartClientChecksumTest { + private MockAsyncHttpClient mockAsyncHttpClient; + private ChecksumCapturingInterceptor checksumCapturingInterceptor; + private S3AsyncClient s3Client; + + @BeforeEach + void init() { + this.mockAsyncHttpClient = new MockAsyncHttpClient(); + this.checksumCapturingInterceptor = new ChecksumCapturingInterceptor(); + s3Client = S3AsyncClient.builder() + .httpClient(mockAsyncHttpClient) + .endpointOverride(URI.create("http://localhost")) + .overrideConfiguration(c -> c.addExecutionInterceptor(checksumCapturingInterceptor)) + .multipartEnabled(true) + .region(Region.US_EAST_1) + .build(); + } + + @AfterEach + void reset() { + this.mockAsyncHttpClient.reset(); + } + + @Test + public void putObject_default_shouldAddCrc32() { + HttpExecuteResponse response = HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + mockAsyncHttpClient.stubResponses(response); + + PutObjectRequest putObjectRequest = putObjectRequestBuilder().build(); + + s3Client.putObject(putObjectRequest, AsyncRequestBody.fromString("hello world")); + assertThat(checksumCapturingInterceptor.checksumHeader).isEqualTo("CRC32"); + } + + @Test + public void putObject_withNonCrc32ChecksumType_shouldNotAddCrc32() { + HttpExecuteResponse response = HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + mockAsyncHttpClient.stubResponses(response); + + PutObjectRequest putObjectRequest = + putObjectRequestBuilder() + .checksumAlgorithm(ChecksumAlgorithm.SHA256) + .build(); + + s3Client.putObject(putObjectRequest, AsyncRequestBody.fromString("hello world")); + assertThat(checksumCapturingInterceptor.checksumHeader).isEqualTo("SHA256"); + } + + @Test + public void putObject_withNonCrc32ChecksumValue_shouldNotAddCrc32() { + HttpExecuteResponse response = HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + mockAsyncHttpClient.stubResponses(response); + + PutObjectRequest putObjectRequest = + putObjectRequestBuilder() + .checksumSHA256("checksumVal") + .build(); + + s3Client.putObject(putObjectRequest, AsyncRequestBody.fromString("hello world")); + assertThat(checksumCapturingInterceptor.checksumHeader).isNull(); + assertThat(checksumCapturingInterceptor.headers.get("x-amz-checksum-sha256")).contains("checksumVal"); + } + + @Test + public void putObject_withCrc32Value_shouldNotAddCrc32TypeHeader() { + HttpExecuteResponse response = HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + mockAsyncHttpClient.stubResponses(response); + + PutObjectRequest putObjectRequest = + putObjectRequestBuilder() + .checksumCRC32("checksumVal") + .build(); + + s3Client.putObject(putObjectRequest, AsyncRequestBody.fromString("hello world")); + assertThat(checksumCapturingInterceptor.checksumHeader).isNull(); + assertThat(checksumCapturingInterceptor.headers.get("x-amz-checksum-crc32")).contains("checksumVal"); + } + + private PutObjectRequest.Builder putObjectRequestBuilder() { + return PutObjectRequest.builder().bucket("bucket").key("key"); + } + + private static final class ChecksumCapturingInterceptor implements ExecutionInterceptor { + String checksumHeader; + Map> headers; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + SdkHttpRequest sdkHttpRequest = context.httpRequest(); + headers = sdkHttpRequest.headers(); + String checksumHeaderName = "x-amz-sdk-checksum-algorithm"; + if (headers.containsKey(checksumHeaderName)) { + List checksumHeaderVals = headers.get(checksumHeaderName); + assertThat(checksumHeaderVals).hasSize(1); + checksumHeader = checksumHeaderVals.get(0); + } + } + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java index 4d5a333a51dd..0f3ab5b5589f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.services.s3.internal.multipart.SdkPojoConversionUtils; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; @@ -44,6 +45,7 @@ import software.amazon.awssdk.services.s3.model.CopyPartResult; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.ListPartsRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.S3ResponseMetadata; @@ -182,7 +184,6 @@ void toCreateMultipartUploadRequest_putObjectRequest_shouldCopyProperties() { PutObjectRequest randomObject = randomPutObjectRequest(); CreateMultipartUploadRequest convertedObject = SdkPojoConversionUtils.toCreateMultipartUploadRequest(randomObject); Set fieldsToIgnore = new HashSet<>(); - System.out.println(convertedObject); verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, PutObjectRequest.builder().sdkFields(), CreateMultipartUploadRequest.builder().sdkFields()); @@ -201,6 +202,35 @@ void toCompletedPart_putObject_shouldCopyProperties() { assertThat(convertedCompletedPart.partNumber()).isEqualTo(1); } + @Test + void toCompleteMultipartUploadRequest_putObject_shouldCopyProperties() { + PutObjectRequest randomObject = randomPutObjectRequest(); + CompletedPart[] parts = new CompletedPart[1]; + CompletedPart completedPart = CompletedPart.builder().partNumber(1).build(); + parts[0] = completedPart; + CompleteMultipartUploadRequest convertedObject = + SdkPojoConversionUtils.toCompleteMultipartUploadRequest(randomObject, "uploadId", parts); + + Set fieldsToIgnore = new HashSet<>(); + verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, + PutObjectRequest.builder().sdkFields(), + CompleteMultipartUploadRequest.builder().sdkFields()); + assertThat(convertedObject.uploadId()).isEqualTo("uploadId"); + assertThat(convertedObject.multipartUpload().parts()).contains(completedPart); + } + + @Test + void toListPartsRequest_putObject_shouldCopyProperties() { + PutObjectRequest randomObject = randomPutObjectRequest(); + ListPartsRequest convertedObject = SdkPojoConversionUtils.toListPartsRequest("uploadId", randomObject); + Set fieldsToIgnore = new HashSet<>(); + + verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, + PutObjectRequest.builder().sdkFields(), + ListPartsRequest.builder().sdkFields()); + assertThat(convertedObject.uploadId()).isEqualTo("uploadId"); + } + private static void verifyFieldsAreCopied(SdkPojo requestConvertedFrom, SdkPojo requestConvertedTo, Set fieldsToIgnore, diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java index c18d4c993114..d17ab358c527 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java @@ -19,11 +19,17 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static software.amazon.awssdk.services.s3.internal.multipart.MpuTestUtils.s3ResumeToken; import static software.amazon.awssdk.services.s3.internal.multipart.MpuTestUtils.stubSuccessfulCompleteMultipartCall; +import static software.amazon.awssdk.services.s3.internal.multipart.MpuTestUtils.stubSuccessfulCreateMultipartCall; +import static software.amazon.awssdk.services.s3.internal.multipart.MpuTestUtils.stubSuccessfulUploadPartCalls; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.PAUSE_OBSERVABLE; +import static software.amazon.awssdk.services.s3.multipart.S3PauseResumeExecutionAttribute.RESUME_TOKEN; import java.io.IOException; import java.nio.ByteBuffer; @@ -32,13 +38,14 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; @@ -50,19 +57,26 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.ListPartsRequest; +import software.amazon.awssdk.services.s3.model.Part; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; +import software.amazon.awssdk.services.s3.multipart.PauseObservable; +import software.amazon.awssdk.services.s3.multipart.S3ResumeToken; +import software.amazon.awssdk.services.s3.paginators.ListPartsPublisher; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.utils.CompletableFutureUtils; @@ -140,8 +154,8 @@ void uploadObject_unKnownContentLengthDoesNotExceedPartSize_shouldUploadInOneChu void uploadObject_contentLengthExceedThresholdAndPartSize_shouldUseMPU(AsyncRequestBody asyncRequestBody) { PutObjectRequest putObjectRequest = putObjectRequest(null); - MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); - stubSuccessfulUploadPartCalls(); + stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(s3AsyncClient); stubSuccessfulCompleteMultipartCall(BUCKET, KEY, s3AsyncClient); uploadHelper.uploadObject(putObjectRequest, asyncRequestBody).join(); @@ -178,7 +192,7 @@ void uploadObject_contentLengthExceedThresholdAndPartSize_shouldUseMPU(AsyncRequ void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncRequestBody) { PutObjectRequest putObjectRequest = putObjectRequest(MPU_CONTENT_SIZE); - MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); CompletableFuture ongoingRequest = new CompletableFuture<>(); SdkClientException exception = SdkClientException.create("request failed"); @@ -239,12 +253,12 @@ void upload_knownContentLengthCancelResponseFuture_shouldCancelUploadPart() { CompletableFuture createMultipartFuture = new CompletableFuture<>(); - MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); CompletableFuture ongoingRequest = new CompletableFuture<>(); - when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), - any(AsyncRequestBody.class))).thenReturn(ongoingRequest); + when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), + any(AsyncRequestBody.class))).thenReturn(ongoingRequest); CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)); @@ -267,7 +281,7 @@ void upload_knownContentLengthCancelResponseFuture_shouldCancelUploadPart() { void uploadObject_createMultipartUploadFailed_shouldFail(AsyncRequestBody asyncRequestBody) { PutObjectRequest putObjectRequest = putObjectRequest(null); - SdkClientException exception = SdkClientException.create("CompleteMultipartUpload failed"); + SdkClientException exception = SdkClientException.create("CreateMultipartUpload failed"); CompletableFuture createMultipartUploadFuture = CompletableFutureUtils.failedFuture(exception); @@ -286,8 +300,8 @@ void uploadObject_createMultipartUploadFailed_shouldFail(AsyncRequestBody asyncR void uploadObject_completeMultipartFailed_shouldFailAndAbort(AsyncRequestBody asyncRequestBody) { PutObjectRequest putObjectRequest = putObjectRequest(null); - MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); - stubSuccessfulUploadPartCalls(); + stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(s3AsyncClient); SdkClientException exception = SdkClientException.create("CompleteMultipartUpload failed"); @@ -315,8 +329,8 @@ void uploadObject_requestBodyOnError_shouldFailAndAbort(boolean contentLengthKno Long contentLength = contentLengthKnown ? MPU_CONTENT_SIZE : null; ErroneousAsyncRequestBody erroneousAsyncRequestBody = new ErroneousAsyncRequestBody(contentLength, exception); - MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); - stubSuccessfulUploadPartCalls(); + stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(s3AsyncClient); when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); @@ -327,6 +341,42 @@ void uploadObject_requestBodyOnError_shouldFailAndAbort(boolean contentLengthKno .hasRootCause(exception); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 3, 4}) + void uploadObject_withResumeToken_shouldInvokeListPartsAndSkipExistingParts(int numExistingParts) { + S3ResumeToken resumeToken = s3ResumeToken(numExistingParts, PART_SIZE, MPU_CONTENT_SIZE, "uploadId"); + PutObjectRequest putObjectRequest = putObjectRequestWithResumeToken(MPU_CONTENT_SIZE, resumeToken); + ListPartsRequest request = SdkPojoConversionUtils.toListPartsRequest("uploadId", putObjectRequest); + ListPartsPublisher mockPublisher = mock(ListPartsPublisher.class); + when(s3AsyncClient.listPartsPaginator(request)).thenReturn(mockPublisher); + when(mockPublisher.parts()).thenReturn(new TestPartPublisher(numExistingParts)); + + stubSuccessfulUploadPartCalls(s3AsyncClient); + stubSuccessfulCompleteMultipartCall(BUCKET, KEY, s3AsyncClient); + + uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)).join(); + + ArgumentCaptor listPartsRequestArgumentCaptor = ArgumentCaptor.forClass(ListPartsRequest.class); + verify(s3AsyncClient).listPartsPaginator(listPartsRequestArgumentCaptor.capture()); + assertThat(putObjectRequest.overrideConfiguration().get().executionAttributes().getAttribute(PAUSE_OBSERVABLE).pausableUpload()).isNotNull(); + + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); + ArgumentCaptor requestBodyArgumentCaptor = ArgumentCaptor.forClass(AsyncRequestBody.class); + int numTotalParts = 4; + int numPartsToSend = numTotalParts - numExistingParts; + verify(s3AsyncClient, times(numPartsToSend)).uploadPart(requestArgumentCaptor.capture(), requestBodyArgumentCaptor.capture()); + + ArgumentCaptor completeMpuArgumentCaptor = ArgumentCaptor.forClass(CompleteMultipartUploadRequest.class); + verify(s3AsyncClient).completeMultipartUpload(completeMpuArgumentCaptor.capture()); + + CompleteMultipartUploadRequest actualRequest = completeMpuArgumentCaptor.getValue(); + assertThat(actualRequest.multipartUpload().parts()).isEqualTo(completedParts(numTotalParts)); + } + + private List completedParts(int totalNumParts) { + return IntStream.range(1, totalNumParts + 1).mapToObj(i -> CompletedPart.builder().partNumber(i).build()).collect(Collectors.toList()); + } + private static PutObjectRequest putObjectRequest(Long contentLength) { return PutObjectRequest.builder() .bucket(BUCKET) @@ -335,37 +385,27 @@ private static PutObjectRequest putObjectRequest(Long contentLength) { .build(); } - private void stubSuccessfulUploadPartCalls() { - when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))) - .thenAnswer(new Answer>() { - int numberOfCalls = 0; + private static PutObjectRequest putObjectRequestWithResumeToken(Long contentLength, S3ResumeToken resumeToken) { + return putObjectRequest(contentLength).toBuilder() + .overrideConfiguration( + o -> o.putExecutionAttribute(RESUME_TOKEN, resumeToken) + .putExecutionAttribute(PAUSE_OBSERVABLE, new PauseObservable())) + .build(); - @Override - public CompletableFuture answer(InvocationOnMock invocationOnMock) { - AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); - // Draining the request body - AsyncRequestBody.subscribe(b -> {}); - - numberOfCalls++; - return CompletableFuture.completedFuture(UploadPartResponse.builder() - .checksumCRC32("crc" + numberOfCalls) - .build()); - } - }); } private OngoingStubbing> stubFailedUploadPartCalls(OngoingStubbing> stubbing, Exception exception) { return stubbing.thenAnswer(new Answer>() { - @Override - public CompletableFuture answer(InvocationOnMock invocationOnMock) { - AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); - // Draining the request body - AsyncRequestBody.subscribe(b -> {}); + @Override + public CompletableFuture answer(InvocationOnMock invocationOnMock) { + AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); + // Draining the request body + AsyncRequestBody.subscribe(b -> {}); - return CompletableFutureUtils.failedFuture(exception); - } - }); + return CompletableFutureUtils.failedFuture(exception); + } + }); } private static class UnknownContentLengthAsyncRequestBody implements AsyncRequestBody { @@ -425,4 +465,39 @@ public void cancel() { } } -} + + private static class TestPartPublisher implements SdkPublisher { + private int existingParts; + private int currentPart = 1; + + TestPartPublisher(int existingParts) { + this.existingParts = existingParts; + } + + @Override + public void subscribe(Subscriber subscriber) { + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (n <= 0) { + subscriber.onError(new IllegalArgumentException("Demand must be positive")); + return; + } + + if (existingParts == 0) { + subscriber.onComplete(); + } + + while(existingParts > 0) { + existingParts--; + subscriber.onNext(Part.builder().partNumber(currentPart++).build()); + } + } + + @Override + public void cancel() {} + }); + } + } + +} \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/plugins/S3SignerPropertiesPluginsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/plugins/S3SignerPropertiesPluginsTest.java new file mode 100644 index 000000000000..89ef600522da --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/plugins/S3SignerPropertiesPluginsTest.java @@ -0,0 +1,418 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.plugins; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4FamilyHttpSigner; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.SignerProperty; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3ClientBuilder; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.utils.Validate; + +class S3SignerPropertiesPluginsTest { + private static final String PUT_BODY = "put body"; + private static String DEFAULT_BUCKET = "bucket"; + private static String DEFAULT_KEY = "key"; + private static final AwsCredentialsProvider CREDENTIALS_PROVIDER = + StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); + + @ParameterizedTest + @MethodSource("testCases") + public void validateTestCase(TestCase testCase) { + CapturingInterceptor capturingInterceptor = new CapturingInterceptor(); + S3ClientBuilder clientBuilder = getS3ClientBuilder(capturingInterceptor); + testCase.configureClient().accept(clientBuilder); + S3Client client = clientBuilder.build(); + assertThatThrownBy(() -> testCase.useClient().accept(client)) + .hasMessageContaining("boom") + .as(testCase.name()); + + AuthSchemeOption expectedValues = testCase.expectedSignerProperties(); + Map, Object> expectedProperties = signerProperties(expectedValues); + + assertThat(selectSignerProperties(signerProperties(capturingInterceptor.authSchemeOption()), expectedProperties.keySet())) + .isEqualTo(expectedProperties) + .as(testCase.name()); + assertThat(selectSignerProperties(signerProperties(capturingInterceptor.authSchemeOption()), testCase.unsetProperties())) + .isEqualTo(Collections.emptyMap()) + .as(testCase.name()); + } + + static Map, Object> signerProperties(AuthSchemeOption option) { + return SignerPropertiesBuilder.from(option).build(); + } + + static Map, Object> selectSignerProperties( + Map, Object> signerProperties, + Collection> keys + ) { + Map, Object> result = new HashMap<>(); + for (SignerProperty key : keys) { + if (signerProperties.containsKey(key)) { + result.put(key, signerProperties.get(key)); + } + } + return result; + } + + public static Collection testCases() { + return Arrays.asList( + // S3DisableChunkEncodingIfConfiguredPlugin, honors + // S#Configuration.enableChunkEncoding(false) + testUploadPartEnablesChunkEncodingByDefault(), + testUploadPartDisablesChunkEncodingWhenConfigured(), + testPutObjectEnablesChunkEncodingByDefault(), + testPutObjectDisablesChunkEncodingWhenConfigured(), + testGetObjectDoesNotSetChunkEncoding(), + testGetObjectDoesNotSetChunkEncodingIfNotConfigured(), + testGetObjectDoesNotSetChunkEncodingIfConfigured(), + // S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin() + testUploadPartDisablesPayloadSigningByDefault(), + testUploadPartEnablesPayloadSigningUsingPlugin(), + // S3OverrideAuthSchemePropertiesPlugin.disableChunkEncoding() + testUploadPartDisablesChunkEncodingUsingPlugin(), + testPutObjectDisablesChunkEncodingUsingPlugin() , + testGetObjectDoesNotDisablesChunkEncodingUsingPlugin() + ); + } + + // S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin() + private static TestCase testUploadPartDisablesPayloadSigningByDefault() { + return forUploadPart("UploadPartDisablesPayloadSigningByDefault") + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.PAYLOAD_SIGNING_ENABLED, false) + .build()) + .build(); + } + + private static TestCase testUploadPartEnablesPayloadSigningUsingPlugin() { + return forUploadPart("UploadPartEnablesPayloadSigningUsingPlugin") + .configureClient(c -> c.addPlugin(S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.PAYLOAD_SIGNING_ENABLED, true) + .build()) + .build(); + + } + + + // S3OverrideAuthSchemePropertiesPlugin.disableChunkEncoding() + private static TestCase testUploadPartDisablesChunkEncodingUsingPlugin() { + return forUploadPart("UploadPartDisablesChunkEncodingUsingPlugin") + .configureClient(c -> c.addPlugin(S3OverrideAuthSchemePropertiesPlugin.disableChunkEncodingPlugin())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, false) + .build()) + .build(); + + } + + static TestCase testPutObjectDisablesChunkEncodingUsingPlugin() { + return forPutObject("PutObjectDisablesChunkEncodingUsingPlugin") + .configureClient(c -> c.addPlugin(S3OverrideAuthSchemePropertiesPlugin.disableChunkEncodingPlugin())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, false) + .build()) + .build(); + } + + static TestCase testGetObjectDoesNotDisablesChunkEncodingUsingPlugin() { + return forGetObject("GetObjectDoesNotDisablesChunkEncodingUsingPlugin") + .configureClient(c -> c.addPlugin(S3OverrideAuthSchemePropertiesPlugin.disableChunkEncodingPlugin())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .build()) + .addUnsetProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED) + .build(); + } + + // S3DisableChunkEncodingIfConfiguredPlugin + static TestCase testUploadPartEnablesChunkEncodingByDefault() { + return forUploadPart("UploadPartEnablesChunkEncodingByDefault") + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, true) + .build()) + .build(); + } + + static TestCase testUploadPartDisablesChunkEncodingWhenConfigured() { + return forUploadPart("UploadPartDisablesChunkEncodingWhenConfigured") + .configureClient(c -> c.serviceConfiguration(S3Configuration.builder() + .chunkedEncodingEnabled(false) + .build())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, false) + .build()) + .build(); + + } + + static TestCase testPutObjectEnablesChunkEncodingByDefault() { + return forPutObject("PutObjectEnablesChunkEncodingByDefault") + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, true) + .build()) + .build(); + } + + static TestCase testPutObjectDisablesChunkEncodingWhenConfigured() { + return forPutObject("PutObjectDisablesChunkEncodingWhenConfigured") + .configureClient(c -> c.serviceConfiguration(S3Configuration.builder() + .chunkedEncodingEnabled(false) + .build())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder() + .putSignerProperty(AwsV4FamilyHttpSigner.CHUNK_ENCODING_ENABLED, false) + .build()) + .build(); + } + + static TestCase testGetObjectDoesNotSetChunkEncoding() { + return forGetObject("GetObjectDoesNotSetChunkEncoding") + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder().build()) + .build(); + } + + static TestCase testGetObjectDoesNotSetChunkEncodingIfNotConfigured() { + return forGetObject("GetObjectDoesNotSetChunkEncodingIfNotConfigured") + .configureClient(c -> c.serviceConfiguration(S3Configuration.builder() + .chunkedEncodingEnabled(true) + .build())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder().build()) + .build(); + } + + static TestCase testGetObjectDoesNotSetChunkEncodingIfConfigured() { + return forGetObject("GetObjectDoesNotSetChunkEncodingIfConfigured") + .configureClient(c -> c.serviceConfiguration(S3Configuration.builder() + .chunkedEncodingEnabled(false) + .build())) + .expectedSignerProperties(defaultExpectedAuthSchemeOptionBuilder().build()) + .build(); + } + + // End of tests, utils next + static TestCaseBuilder forUploadPart(String name) { + return testCaseBuilder(name) + .useClient(c -> { + UploadPartRequest.Builder requestBuilder = + UploadPartRequest.builder().bucket(DEFAULT_BUCKET).key(DEFAULT_KEY).partNumber(0).uploadId("test"); + c.uploadPart(requestBuilder.build(), RequestBody.fromString(PUT_BODY)); + }); + } + + static TestCaseBuilder forPutObject(String name) { + return testCaseBuilder(name) + .useClient(c -> { + PutObjectRequest.Builder requestBuilder = + PutObjectRequest.builder().bucket(DEFAULT_BUCKET).key(DEFAULT_KEY); + c.putObject(requestBuilder.build(), RequestBody.fromString(PUT_BODY)); + }); + } + + static TestCaseBuilder forGetObject(String name) { + return testCaseBuilder(name) + .useClient(c -> { + GetObjectRequest.Builder requestBuilder = + GetObjectRequest.builder().bucket(DEFAULT_BUCKET).key(DEFAULT_KEY); + c.getObject(requestBuilder.build()); + }); + } + + public static TestCaseBuilder testCaseBuilder(String name) { + return new TestCaseBuilder(name); + } + + static AuthSchemeOption.Builder defaultExpectedAuthSchemeOptionBuilder() { + return AuthSchemeOption.builder() + .schemeId(AwsV4AuthScheme.SCHEME_ID) + .putSignerProperty(AwsV4FamilyHttpSigner.NORMALIZE_PATH, false) + .putSignerProperty(AwsV4FamilyHttpSigner.DOUBLE_URL_ENCODE, false) + .putSignerProperty(AwsV4FamilyHttpSigner.PAYLOAD_SIGNING_ENABLED, false); + } + + static S3ClientBuilder getS3ClientBuilder(CapturingInterceptor capturingInterceptor) { + return S3Client.builder() + .region(Region.US_EAST_1) + .overrideConfiguration(c -> c.addExecutionInterceptor(capturingInterceptor)) + .credentialsProvider(CREDENTIALS_PROVIDER); + } + + public static class TestCaseBuilder { + private final String name; + private Consumer configureClient = c -> { + }; + private Consumer useClient; + private AuthSchemeOption expectedSignerProperties = defaultExpectedAuthSchemeOptionBuilder().build(); + private Set> unsetProperties = new HashSet<>(); + + public TestCaseBuilder(String name) { + this.name = name; + } + + public Consumer configureClient() { + return configureClient; + } + + public TestCaseBuilder configureClient(Consumer configureClient) { + this.configureClient = configureClient; + return this; + } + + public Consumer useClient() { + return useClient; + } + + public TestCaseBuilder useClient(Consumer useClient) { + this.useClient = useClient; + return this; + } + + public AuthSchemeOption expectedSignerProperties() { + return expectedSignerProperties; + } + + public TestCaseBuilder expectedSignerProperties(AuthSchemeOption expectedSignerProperties) { + this.expectedSignerProperties = expectedSignerProperties; + return this; + } + + public Set> unsetProperties() { + if (unsetProperties.isEmpty()) { + return Collections.emptySet(); + } + return Collections.unmodifiableSet(new HashSet<>(this.unsetProperties)); + } + + public TestCaseBuilder unsetProperties(Set> unsetProperties) { + this.unsetProperties.clear(); + this.unsetProperties.addAll(unsetProperties); + return this; + } + + public TestCaseBuilder addUnsetProperty(SignerProperty unsetProperty) { + this.unsetProperties.add(unsetProperty); + return this; + } + + public String name() { + return name; + } + + public TestCase build() { + return new TestCase(this); + } + } + + static class TestCase { + private final String name; + private final Consumer configureClient; + private final Consumer useClient; + private final AuthSchemeOption expectedSignerProperties; + private final Set> unsetProperties; + + public TestCase(TestCaseBuilder builder) { + this.name = Validate.paramNotNull(builder.name(), "name"); + this.configureClient = Validate.paramNotNull(builder.configureClient(), "configureClient"); + this.useClient = Validate.paramNotNull(builder.useClient(), "useClient"); + this.expectedSignerProperties = Validate.paramNotNull(builder.expectedSignerProperties(), "expectedSignerProperties"); + this.unsetProperties = Validate.paramNotNull(builder.unsetProperties(), "unsetProperties"); + } + + public String name() { + return name; + } + + public Consumer configureClient() { + return configureClient; + } + + public Consumer useClient() { + return useClient; + } + + public AuthSchemeOption expectedSignerProperties() { + return expectedSignerProperties; + } + + public Set> unsetProperties() { + return unsetProperties; + } + } + + static class SignerPropertiesBuilder { + Map, Object> map = new HashMap<>(); + + static SignerPropertiesBuilder from(AuthSchemeOption option) { + SignerPropertiesBuilder builder = + new SignerPropertiesBuilder(); + option.forEachSignerProperty(builder::putSignerProperty); + return builder; + } + + public void putSignerProperty(SignerProperty key, T value) { + map.put(key, value); + } + + public Map, Object> build() { + return map; + } + } + + static class CapturingInterceptor implements ExecutionInterceptor { + private static final RuntimeException RTE = new RuntimeException("boom"); + private SelectedAuthScheme selectedAuthScheme; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + selectedAuthScheme = executionAttributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + throw RTE; + } + + public AuthSchemeOption authSchemeOption() { + if (selectedAuthScheme == null) { + return null; + } + return selectedAuthScheme.authSchemeOption(); + } + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/multipart/S3ResumeTokenTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/multipart/S3ResumeTokenTest.java new file mode 100644 index 000000000000..d31e3be7f2f2 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/multipart/S3ResumeTokenTest.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class S3ResumeTokenTest { + + private static final String UPLOAD_ID = "uploadId"; + private static final long PART_SIZE = 99; + private static final long TOTAL_NUM_PARTS = 20; + private static final long NUM_PARTS_COMPLETED = 2; + + @Test + public void s3ResumeToken_withValues_buildsCorrectly() { + S3ResumeToken token = S3ResumeToken.builder() + .uploadId(UPLOAD_ID) + .partSize(PART_SIZE) + .totalNumParts(TOTAL_NUM_PARTS) + .numPartsCompleted(NUM_PARTS_COMPLETED) + .build(); + + assertThat(token.uploadId()).isEqualTo(UPLOAD_ID); + assertThat(token.partSize()).isEqualTo(PART_SIZE); + assertThat(token.totalNumParts()).isEqualTo(TOTAL_NUM_PARTS); + assertThat(token.numPartsCompleted()).isEqualTo(NUM_PARTS_COMPLETED); + } + + @Test + public void s3ResumeToken_default_buildsCorrectly() { + S3ResumeToken token = S3ResumeToken.builder().build(); + + assertThat(token.uploadId()).isNull(); + assertThat(token.partSize()).isNull(); + assertThat(token.totalNumParts()).isNull(); + assertThat(token.numPartsCompleted()).isNull(); + } + + @Test + void testEqualsAndHashCodeContract() { + EqualsVerifier.forClass(S3ResumeToken.class); + } +} diff --git a/services/s3/src/test/resources/__files/part1 b/services/s3/src/test/resources/__files/part1 new file mode 100644 index 000000000000..93cadc986855 Binary files /dev/null and b/services/s3/src/test/resources/__files/part1 differ diff --git a/services/s3/src/test/resources/__files/part2 b/services/s3/src/test/resources/__files/part2 new file mode 100644 index 000000000000..ebd8722df565 Binary files /dev/null and b/services/s3/src/test/resources/__files/part2 differ diff --git a/services/s3/src/test/resources/__files/part3 b/services/s3/src/test/resources/__files/part3 new file mode 100644 index 000000000000..065033159e6b Binary files /dev/null and b/services/s3/src/test/resources/__files/part3 differ diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index c76638ae4d4a..613bb06bd972 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3MrapIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3MrapIntegrationTest.java index af18e2257d91..3b59ea2c593c 100644 --- a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3MrapIntegrationTest.java +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3MrapIntegrationTest.java @@ -22,10 +22,8 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Optional; import java.util.stream.Stream; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -33,10 +31,9 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.auth.signer.internal.SignerConstant; import software.amazon.awssdk.awscore.presigner.PresignedRequest; -import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.SdkPlugin; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -46,16 +43,14 @@ import software.amazon.awssdk.core.waiters.WaiterAcceptor; import software.amazon.awssdk.http.HttpExecuteRequest; import software.amazon.awssdk.http.HttpExecuteResponse; -import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.plugins.S3OverrideAuthSchemePropertiesPlugin; import software.amazon.awssdk.services.s3.model.BucketAlreadyOwnedByYouException; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.presigner.S3Presigner; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3control.model.BucketAlreadyExistsException; @@ -99,7 +94,7 @@ public static void setupFixture() { .build(); s3Client = mrapEnabledS3Client(Collections.singletonList(captureInterceptor)); - s3ClientWithPayloadSigning = mrapEnabledS3Client(Arrays.asList(captureInterceptor, new PayloadSigningInterceptor())); + s3ClientWithPayloadSigning = mrapEnabledS3ClientWithPayloadSigning(captureInterceptor); stsClient = StsClient.builder() .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) @@ -309,6 +304,25 @@ private static S3Client mrapEnabledS3Client(List execution .build(); } + private static S3Client mrapEnabledS3ClientWithPayloadSigning(ExecutionInterceptor executionInterceptor) { + // We can't use here `S3OverrideAuthSchemePropertiesPlugin.enablePayloadSigningPlugin()` since + // it enables payload signing for *all* operations. + SdkPlugin plugin = S3OverrideAuthSchemePropertiesPlugin.builder() + .payloadSigningEnabled(true) + .addOperationConstraint("UploadPart") + .addOperationConstraint("PutObject") + .build(); + return S3Client.builder() + .region(REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .serviceConfiguration(S3Configuration.builder() + .useArnRegionEnabled(true) + .build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(executionInterceptor)) + .addPlugin(plugin) + .build(); + } + private void deleteObjectIfExists(S3Client s31, String bucket1, String key) { System.out.println(bucket1); try { @@ -341,21 +355,4 @@ public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttr this.normalizePath = executionAttributes.getAttribute(AwsSignerExecutionAttribute.SIGNER_NORMALIZE_PATH); } } - - private static class PayloadSigningInterceptor implements ExecutionInterceptor { - - public Optional modifyHttpContent(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - SdkRequest sdkRequest = context.request(); - - if (sdkRequest instanceof PutObjectRequest || sdkRequest instanceof UploadPartRequest) { - executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); - } - if (!context.requestBody().isPresent() && context.httpRequest().method().equals(SdkHttpMethod.POST)) { - return Optional.of(RequestBody.fromBytes(new byte[0])); - } - - return context.requestBody(); - } - } } diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java index 5b3cb9f88444..6c0f91ed9f0b 100644 --- a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptor.java @@ -17,7 +17,6 @@ import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -25,7 +24,7 @@ import software.amazon.awssdk.http.SdkHttpMethod; /** - * Turns on payload signing and prevents moving query params to body during a POST which S3 doesn't like. + * Prevents moving query params to body during a POST which S3 doesn't like. */ @SdkInternalApi public class PayloadSigningInterceptor implements ExecutionInterceptor { @@ -33,11 +32,10 @@ public class PayloadSigningInterceptor implements ExecutionInterceptor { @Override public Optional modifyHttpContent(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); - if (!context.requestBody().isPresent() && context.httpRequest().method() == SdkHttpMethod.POST) { + Optional bodyOptional = context.requestBody(); + if (context.httpRequest().method() == SdkHttpMethod.POST && !bodyOptional.isPresent()) { return Optional.of(RequestBody.fromBytes(new byte[0])); } - - return context.requestBody(); + return bodyOptional; } } diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java index a7acddb6e4ab..bc5ee8275b77 100644 --- a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/PayloadSigningInterceptorTest.java @@ -19,7 +19,6 @@ import java.util.Optional; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; import software.amazon.awssdk.core.Protocol; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.async.AsyncRequestBody; @@ -53,7 +52,6 @@ public void modifyHttpContent_AddsExecutionAttributeAndPayload() { assertThat(modified.isPresent()).isTrue(); assertThat(modified.get().contentLength()).isEqualTo(0); - assertThat(executionAttributes.getAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING)).isTrue(); } @Test @@ -65,7 +63,6 @@ public void modifyHttpContent_DoesNotReplaceBody() { assertThat(modified.isPresent()).isTrue(); assertThat(modified.get().contentLength()).isEqualTo(5); - assertThat(executionAttributes.getAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING)).isTrue(); } public final class Context implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index b096a611a555..e1f886d45269 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/s3outposts/src/main/resources/codegen-resources/customization.config b/services/s3outposts/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/s3outposts/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index b1ceaf7522cb..1553e2307e32 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index b37de25a53de..2d32a93f54b6 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -3476,7 +3476,21 @@ {"shape":"ResourceNotFound"}, {"shape":"ConflictException"} ], - "documentation":"

    Update a SageMaker HyperPod cluster.

    " + "documentation":"

    Updates a SageMaker HyperPod cluster.

    " + }, + "UpdateClusterSoftware":{ + "name":"UpdateClusterSoftware", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClusterSoftwareRequest"}, + "output":{"shape":"UpdateClusterSoftwareResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.

    " }, "UpdateCodeRepository":{ "name":"UpdateCodeRepository", @@ -25853,7 +25867,7 @@ "documentation":"

    The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

    " } }, - "documentation":"

    Provides information about the location that is configured for storing model artifacts.

    Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata.

    " + "documentation":"

    Provides information about the location that is configured for storing model artifacts.

    Model artifacts are outputs that result from training a model. They typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata. A SageMaker container stores your trained model artifacts in the /opt/ml/model directory. After training has completed, by default, these artifacts are uploaded to your Amazon S3 bucket as compressed files.

    " }, "ModelBiasAppSpecification":{ "type":"structure", @@ -35782,6 +35796,26 @@ } } }, + "UpdateClusterSoftwareRequest":{ + "type":"structure", + "required":["ClusterName"], + "members":{ + "ClusterName":{ + "shape":"ClusterNameOrArn", + "documentation":"

    Specify the name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster you want to update for security patching.

    " + } + } + }, + "UpdateClusterSoftwareResponse":{ + "type":"structure", + "required":["ClusterArn"], + "members":{ + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

    The Amazon Resource Name (ARN) of the SageMaker HyperPod cluster being updated for security patching.

    " + } + } + }, "UpdateCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index 576af17096a9..6f83cbc281ea 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config b/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index a035046440ac..410945014789 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakeredge/src/main/resources/codegen-resources/customization.config b/services/sagemakeredge/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sagemakeredge/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 8c655b3adafe..d25655222c92 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index dbc926be128c..e1f3cabb12e2 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config b/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index d61a77d62621..3c9c0dfb0d28 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config b/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 11fabbd7623c..bcf3eaaff0da 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config b/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 6f00351e6a52..06722d77e177 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 05ee003bbe46..f552a829d821 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/scheduler/src/main/resources/codegen-resources/customization.config b/services/scheduler/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/scheduler/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 347ee594e04e..43cf9880d4f0 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 59547de6f37b..dced87d671f6 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/secretsmanager/src/main/resources/codegen-resources/customization.config b/services/secretsmanager/src/main/resources/codegen-resources/customization.config index 8398ab40cbc7..90925c738888 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/customization.config +++ b/services/secretsmanager/src/main/resources/codegen-resources/customization.config @@ -1,6 +1,7 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "getRandomPassword", "listSecrets" - ] + ], + "useSraAuth": true } diff --git a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json index b836713b02a2..0f5e987afb04 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json @@ -194,7 +194,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], - "documentation":"

    Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

    ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret.

    To list the versions of a secret, use ListSecretVersionIds.

    To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue.

    For information about finding secrets in the console, see Find secrets in Secrets Manager.

    Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

    Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

    " + "documentation":"

    Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

    All Secrets Manager operations are eventually consistent. ListSecrets might not reflect changes from the last five minutes. You can get more recent information for a specific secret by calling DescribeSecret.

    To list the versions of a secret, use ListSecretVersionIds.

    To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue.

    For information about finding secrets in the console, see Find secrets in Secrets Manager.

    Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

    Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

    " }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -706,7 +706,7 @@ }, "NextRotationDate":{ "shape":"NextRotationDateType", - "documentation":"

    The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.

    " + "documentation":"

    The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null. If rotation fails, Secrets Manager retries the entire rotation process multiple times. If rotation is unsuccessful, this date may be in the past.

    " }, "Tags":{ "shape":"TagListType", diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index a75f81a51314..ecdaa79e856e 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/customization.config b/services/securityhub/src/main/resources/codegen-resources/customization.config index a1cf874638da..57d2b7d2eeab 100644 --- a/services/securityhub/src/main/resources/codegen-resources/customization.config +++ b/services/securityhub/src/main/resources/codegen-resources/customization.config @@ -13,5 +13,6 @@ "getEnabledStandards", "getInsights" ], - "useSraAuth": true + "useSraAuth": true, + "enableGenerateCompiledEndpointRules": true } diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index c178484e6fc7..7effc81c5467 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/securitylake/src/main/resources/codegen-resources/customization.config b/services/securitylake/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/securitylake/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/securitylake/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/securitylake/src/main/resources/codegen-resources/endpoint-rule-set.json index 565deb8ab107..f72c5adafeed 100644 --- a/services/securitylake/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/securitylake/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff --git a/services/securitylake/src/main/resources/codegen-resources/service-2.json b/services/securitylake/src/main/resources/codegen-resources/service-2.json index 2aadfa9cc226..165dbb19051e 100644 --- a/services/securitylake/src/main/resources/codegen-resources/service-2.json +++ b/services/securitylake/src/main/resources/codegen-resources/service-2.json @@ -1217,14 +1217,14 @@ "members":{ "regions":{ "shape":"RegionList", - "documentation":"

    Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

    Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

    " + "documentation":"

    Specifies one or more centralized rollup Regions. The Amazon Web Services Region specified in the region parameter of the CreateDataLake or UpdateDataLake operations contributes data to the rollup Region or Regions specified in this parameter.

    Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Regions or within the same Region as the source bucket.

    " }, "roleArn":{ "shape":"RoleArn", "documentation":"

    Replication settings for the Amazon S3 buckets. This parameter uses the Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct.

    " } }, - "documentation":"

    Provides replication details of Amazon Security Lake object.

    " + "documentation":"

    Provides replication details for objects stored in the Amazon Security Lake data lake.

    " }, "DataLakeResource":{ "type":"structure", @@ -1654,7 +1654,7 @@ }, "regions":{ "shape":"RegionList", - "documentation":"

    List the Amazon Web Services Regions from which exceptions are retrieved.

    " + "documentation":"

    The Amazon Web Services Regions from which exceptions are retrieved.

    " } } }, @@ -1676,7 +1676,7 @@ "members":{ "regions":{ "shape":"RegionList", - "documentation":"

    The list of regions where Security Lake is enabled.

    ", + "documentation":"

    The list of Regions where Security Lake is enabled.

    ", "location":"querystring", "locationName":"regions" } @@ -1708,7 +1708,7 @@ }, "regions":{ "shape":"RegionList", - "documentation":"

    The list of regions for which log sources are displayed.

    " + "documentation":"

    The list of Regions for which log sources are displayed.

    " }, "sources":{ "shape":"LogSourceResourceList", diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 2ad0c93bea9e..9db436b60a34 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 038ac379d177..d1cc124d4a2e 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/customization.config b/services/servicecatalog/src/main/resources/codegen-resources/customization.config index c0f905687005..a572d9470662 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/customization.config +++ b/services/servicecatalog/src/main/resources/codegen-resources/customization.config @@ -1,15 +1,16 @@ { - "verifiedSimpleMethods": [ - "listAcceptedPortfolioShares", - "listPortfolios", - "listProvisionedProductPlans", - "listRecordHistory", - "listServiceActions", - "scanProvisionedProducts", - "searchProducts", - "searchProductsAsAdmin", - "listTagOptions", - "searchProvisionedProducts", - "getAWSOrganizationsAccessStatus" - ] + "verifiedSimpleMethods": [ + "listAcceptedPortfolioShares", + "listPortfolios", + "listProvisionedProductPlans", + "listRecordHistory", + "listServiceActions", + "scanProvisionedProducts", + "searchProducts", + "searchProductsAsAdmin", + "listTagOptions", + "searchProvisionedProducts", + "getAWSOrganizationsAccessStatus" + ], + "useSraAuth": true } diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 974f363f4650..f3c800352d0b 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 25142917f492..fd2cd5dd9537 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index ae06ceaedce0..51b494480fe0 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/servicequotas/src/main/resources/codegen-resources/customization.config b/services/servicequotas/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/servicequotas/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/ses/pom.xml b/services/ses/pom.xml index 324e2c2d1211..00accf0ea2e7 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index a9a5c2a57e29..3210b811ca61 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/customization.config b/services/sesv2/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/sesv2/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index adf3b8164a7c..a591b647e63d 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 636bc93e537c..d9ee08c59277 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/customization.config b/services/shield/src/main/resources/codegen-resources/customization.config index 541a64eac33c..56b326b84715 100644 --- a/services/shield/src/main/resources/codegen-resources/customization.config +++ b/services/shield/src/main/resources/codegen-resources/customization.config @@ -1,20 +1,21 @@ { - "verifiedSimpleMethods": [ - "createSubscription", - "deleteSubscription", - "describeSubscription", - "listProtections", - "disassociateDRTRole", - "listAttacks", - "getSubscriptionState" - ], - "excludedSimpleMethods": [ - "updateEmergencyContactSettings", - "updateSubscription", - "describeDRTAccess", - "describeEmergencyContactSettings" - ], - "deprecatedOperations": [ - "DeleteSubscription" - ] + "verifiedSimpleMethods": [ + "createSubscription", + "deleteSubscription", + "describeSubscription", + "listProtections", + "disassociateDRTRole", + "listAttacks", + "getSubscriptionState" + ], + "excludedSimpleMethods": [ + "updateEmergencyContactSettings", + "updateSubscription", + "describeDRTAccess", + "describeEmergencyContactSettings" + ], + "deprecatedOperations": [ + "DeleteSubscription" + ], + "useSraAuth": true } diff --git a/services/signer/pom.xml b/services/signer/pom.xml index fc1d9cfacab6..d4c0837acab3 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/signer/src/main/resources/codegen-resources/customization.config b/services/signer/src/main/resources/codegen-resources/customization.config index 2f6eb56c8ecb..73464a93ec02 100644 --- a/services/signer/src/main/resources/codegen-resources/customization.config +++ b/services/signer/src/main/resources/codegen-resources/customization.config @@ -1,7 +1,8 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listSigningJobs", "listSigningPlatforms", "listSigningProfiles" - ] + ], + "useSraAuth": true } diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 953912e02907..11bb7f3f04b7 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/customization.config b/services/simspaceweaver/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/simspaceweaver/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/sms/pom.xml b/services/sms/pom.xml index c3c0cd1bc22d..5d007e0915a4 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/sms/src/main/resources/codegen-resources/customization.config b/services/sms/src/main/resources/codegen-resources/customization.config index 6e997de2f31f..7518fe8a6148 100644 --- a/services/sms/src/main/resources/codegen-resources/customization.config +++ b/services/sms/src/main/resources/codegen-resources/customization.config @@ -1,13 +1,14 @@ { - "verifiedSimpleMethods": [ - "deleteServerCatalog", - "importServerCatalog", - "getAppLaunchConfiguration", - "getAppReplicationConfiguration", - "getApp", - "getConnectors", - "getReplicationJobs", - "getServers", - "listApps" - ] + "verifiedSimpleMethods": [ + "deleteServerCatalog", + "importServerCatalog", + "getAppLaunchConfiguration", + "getAppReplicationConfiguration", + "getApp", + "getConnectors", + "getReplicationJobs", + "getServers", + "listApps" + ], + "useSraAuth": true } diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 4ad3e05c6da9..168362afa364 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index f9f32af15114..1ed0abe62256 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index c4e7316cb656..509e617bb05c 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sns/src/main/resources/codegen-resources/customization.config b/services/sns/src/main/resources/codegen-resources/customization.config index d9c57a07275b..3a40a2c658fc 100644 --- a/services/sns/src/main/resources/codegen-resources/customization.config +++ b/services/sns/src/main/resources/codegen-resources/customization.config @@ -1,23 +1,24 @@ { - "verifiedSimpleMethods": [ - "getSMSAttributes", - "listPhoneNumbersOptedOut", - "listPlatformApplications", - "listSubscriptions", - "listTopics" - ], - "shapeModifiers": { - "AddPermissionInput": { - "modify": [ - { - "AWSAccountId": { - "emitPropertyName": "AWSAccountIds" - }, - "ActionName": { - "emitPropertyName": "ActionNames" - } + "verifiedSimpleMethods": [ + "getSMSAttributes", + "listPhoneNumbersOptedOut", + "listPlatformApplications", + "listSubscriptions", + "listTopics" + ], + "shapeModifiers": { + "AddPermissionInput": { + "modify": [ + { + "AWSAccountId": { + "emitPropertyName": "AWSAccountIds" + }, + "ActionName": { + "emitPropertyName": "ActionNames" + } + } + ] } - ] - } - } + }, + "useSraAuth": true } diff --git a/services/sns/src/main/resources/codegen-resources/service-2.json b/services/sns/src/main/resources/codegen-resources/service-2.json index 00c166bd6c15..c97124fa7656 100644 --- a/services/sns/src/main/resources/codegen-resources/service-2.json +++ b/services/sns/src/main/resources/codegen-resources/service-2.json @@ -84,7 +84,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

    Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

    PlatformPrincipal and PlatformCredential are received from the notification service.

    • For ADM, PlatformPrincipal is client id and PlatformCredential is client secret.

    • For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key.

    • For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key.

    • For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key.

    • For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and the PlatformCredential is API key.

    • For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key.

    • For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key.

    You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

    " + "documentation":"

    Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

    PlatformPrincipal and PlatformCredential are received from the notification service.

    • For ADM, PlatformPrincipal is client id and PlatformCredential is client secret.

    • For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key.

    • For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key.

    • For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key.

    • For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key.

    • For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`.

    • For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key.

    • For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key.

    You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

    " }, "CreatePlatformEndpoint":{ "name":"CreatePlatformEndpoint", @@ -1027,7 +1027,7 @@ }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

    For a list of attributes, see SetPlatformApplicationAttributes.

    " + "documentation":"

    For a list of attributes, see SetPlatformApplicationAttributes .

    " } }, "documentation":"

    Input for CreatePlatformApplication action.

    " @@ -1037,7 +1037,7 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn is returned.

    " + "documentation":"

    PlatformApplicationArn is returned.

    " } }, "documentation":"

    Response from CreatePlatformApplication action.

    " @@ -1051,7 +1051,7 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

    " + "documentation":"

    PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

    " }, "Token":{ "shape":"String", @@ -1063,7 +1063,7 @@ }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

    For a list of attributes, see SetEndpointAttributes.

    " + "documentation":"

    For a list of attributes, see SetEndpointAttributes .

    " } }, "documentation":"

    Input for CreatePlatformEndpoint action.

    " @@ -1130,10 +1130,10 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

    EndpointArn of endpoint to delete.

    " + "documentation":"

    EndpointArn of endpoint to delete.

    " } }, - "documentation":"

    Input for DeleteEndpoint action.

    " + "documentation":"

    Input for DeleteEndpoint action.

    " }, "DeletePlatformApplicationInput":{ "type":"structure", @@ -1141,10 +1141,10 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn of platform application object to delete.

    " + "documentation":"

    PlatformApplicationArn of platform application object to delete.

    " } }, - "documentation":"

    Input for DeletePlatformApplication action.

    " + "documentation":"

    Input for DeletePlatformApplication action.

    " }, "DeleteSMSSandboxPhoneNumberInput":{ "type":"structure", @@ -1252,10 +1252,10 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

    EndpointArn for GetEndpointAttributes input.

    " + "documentation":"

    EndpointArn for GetEndpointAttributes input.

    " } }, - "documentation":"

    Input for GetEndpointAttributes action.

    " + "documentation":"

    Input for GetEndpointAttributes action.

    " }, "GetEndpointAttributesResponse":{ "type":"structure", @@ -1265,7 +1265,7 @@ "documentation":"

    Attributes include the following:

    • CustomUserData – arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    • Enabled – flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.

    • Token – device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.

      The device token for the iOS platform is returned in lowercase.

    " } }, - "documentation":"

    Response from GetEndpointAttributes of the EndpointArn.

    " + "documentation":"

    Response from GetEndpointAttributes of the EndpointArn.

    " }, "GetPlatformApplicationAttributesInput":{ "type":"structure", @@ -1273,20 +1273,20 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn for GetPlatformApplicationAttributesInput.

    " + "documentation":"

    PlatformApplicationArn for GetPlatformApplicationAttributesInput.

    " } }, - "documentation":"

    Input for GetPlatformApplicationAttributes action.

    " + "documentation":"

    Input for GetPlatformApplicationAttributes action.

    " }, "GetPlatformApplicationAttributesResponse":{ "type":"structure", "members":{ "Attributes":{ "shape":"MapStringToString", - "documentation":"

    Attributes include the following:

    • AppleCertificateExpiryDate – The expiry date of the SSL certificate used to configure certificate-based authentication.

    • ApplePlatformTeamID – The Apple developer account ID used to configure token-based authentication.

    • ApplePlatformBundleID – The app identifier used to configure token-based authentication.

    • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications should be sent.

    • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications should be sent.

    • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications should be sent.

    • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    " + "documentation":"

    Attributes include the following:

    • AppleCertificateExpiryDate – The expiry date of the SSL certificate used to configure certificate-based authentication.

    • ApplePlatformTeamID – The Apple developer account ID used to configure token-based authentication.

    • ApplePlatformBundleID – The app identifier used to configure token-based authentication.

    • AuthenticationMethod – Returns the credential type used when sending push notifications from application to APNS/APNS_Sandbox, or application to GCM.

      • APNS – Returns the token or certificate.

      • GCM – Returns the token or key.

    • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications should be sent.

    • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications should be sent.

    • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications should be sent.

    • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    " } }, - "documentation":"

    Response for GetPlatformApplicationAttributes action.

    " + "documentation":"

    Response for GetPlatformApplicationAttributes action.

    " }, "GetSMSAttributesInput":{ "type":"structure", @@ -1555,28 +1555,28 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

    " + "documentation":"

    PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

    " }, "NextToken":{ "shape":"String", - "documentation":"

    NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

    " + "documentation":"

    NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

    " } }, - "documentation":"

    Input for ListEndpointsByPlatformApplication action.

    " + "documentation":"

    Input for ListEndpointsByPlatformApplication action.

    " }, "ListEndpointsByPlatformApplicationResponse":{ "type":"structure", "members":{ "Endpoints":{ "shape":"ListOfEndpoints", - "documentation":"

    Endpoints returned for ListEndpointsByPlatformApplication action.

    " + "documentation":"

    Endpoints returned for ListEndpointsByPlatformApplication action.

    " }, "NextToken":{ "shape":"String", - "documentation":"

    NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

    " + "documentation":"

    NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

    " } }, - "documentation":"

    Response for ListEndpointsByPlatformApplication action.

    " + "documentation":"

    Response for ListEndpointsByPlatformApplication action.

    " }, "ListOfEndpoints":{ "type":"list", @@ -1641,24 +1641,24 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

    NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

    " + "documentation":"

    NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

    " } }, - "documentation":"

    Input for ListPlatformApplications action.

    " + "documentation":"

    Input for ListPlatformApplications action.

    " }, "ListPlatformApplicationsResponse":{ "type":"structure", "members":{ "PlatformApplications":{ "shape":"ListOfPlatformApplications", - "documentation":"

    Platform applications returned when calling ListPlatformApplications action.

    " + "documentation":"

    Platform applications returned when calling ListPlatformApplications action.

    " }, "NextToken":{ "shape":"String", - "documentation":"

    NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

    " + "documentation":"

    NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

    " } }, - "documentation":"

    Response for ListPlatformApplications action.

    " + "documentation":"

    Response for ListPlatformApplications action.

    " }, "ListSMSSandboxPhoneNumbersInput":{ "type":"structure", @@ -1895,7 +1895,10 @@ }, "exception":true }, - "PhoneNumber":{"type":"string"}, + "PhoneNumber":{ + "type":"string", + "sensitive":true + }, "PhoneNumberInformation":{ "type":"structure", "members":{ @@ -1904,7 +1907,7 @@ "documentation":"

    The date and time when the phone number was created.

    " }, "PhoneNumber":{ - "shape":"String", + "shape":"PhoneNumber", "documentation":"

    The phone number.

    " }, "Status":{ @@ -1938,7 +1941,8 @@ "PhoneNumberString":{ "type":"string", "max":20, - "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$" + "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$", + "sensitive":true }, "PlatformApplication":{ "type":"structure", @@ -2077,7 +2081,7 @@ "documentation":"

    If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters.

    " }, "PhoneNumber":{ - "shape":"String", + "shape":"PhoneNumber", "documentation":"

    The phone number to which you want to deliver an SMS message. Use E.164 format.

    If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters.

    " }, "Message":{ @@ -2226,14 +2230,14 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

    EndpointArn used for SetEndpointAttributes action.

    " + "documentation":"

    EndpointArn used for SetEndpointAttributes action.

    " }, "Attributes":{ "shape":"MapStringToString", "documentation":"

    A map of the endpoint attributes. Attributes in this map include the following:

    • CustomUserData – arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

    • Enabled – flag that enables/disables delivery to the endpoint. Amazon SNS will set this to false when a notification service indicates to Amazon SNS that the endpoint is invalid. Users can set it back to true, typically after updating Token.

    • Token – device token, also referred to as a registration id, for an app and mobile device. This is returned from the notification service when an app and mobile device are registered with the notification service.

    " } }, - "documentation":"

    Input for SetEndpointAttributes action.

    " + "documentation":"

    Input for SetEndpointAttributes action.

    " }, "SetPlatformApplicationAttributesInput":{ "type":"structure", @@ -2244,14 +2248,14 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

    PlatformApplicationArn for SetPlatformApplicationAttributes action.

    " + "documentation":"

    PlatformApplicationArn for SetPlatformApplicationAttributes action.

    " }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

    A map of the platform application attributes. Attributes in this map include the following:

    • PlatformCredential – The credential received from the notification service.

      • For ADM, PlatformCredentialis client secret.

      • For Apple Services using certificate credentials, PlatformCredential is private key.

      • For Apple Services using token credentials, PlatformCredential is signing key.

      • For GCM (Firebase Cloud Messaging), PlatformCredential is API key.

    • PlatformPrincipal – The principal received from the notification service.

      • For ADM, PlatformPrincipalis client id.

      • For Apple Services using certificate credentials, PlatformPrincipal is SSL certificate.

      • For Apple Services using token credentials, PlatformPrincipal is signing key ID.

      • For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal.

    • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications are sent.

    • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications are sent.

    • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications are sent.

    • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    • SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • FailureFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully delivered messages.

    The following attributes only apply to APNs token-based authentication:

    • ApplePlatformTeamID – The identifier that's assigned to your Apple developer account team.

    • ApplePlatformBundleID – The bundle identifier that's assigned to your iOS app.

    " + "documentation":"

    A map of the platform application attributes. Attributes in this map include the following:

    • PlatformCredential – The credential received from the notification service.

      • For ADM, PlatformCredentialis client secret.

      • For Apple Services using certificate credentials, PlatformCredential is private key.

      • For Apple Services using token credentials, PlatformCredential is signing key.

      • For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key.

      • For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json <<< cat service.json`.

    • PlatformPrincipal – The principal received from the notification service.

      • For ADM, PlatformPrincipalis client id.

      • For Apple Services using certificate credentials, PlatformPrincipal is SSL certificate.

      • For Apple Services using token credentials, PlatformPrincipal is signing key ID.

      • For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal.

    • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications are sent.

    • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications are sent.

    • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications are sent.

    • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

    • SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • FailureFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

    • SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully delivered messages.

    The following attributes only apply to APNs token-based authentication:

    • ApplePlatformTeamID – The identifier that's assigned to your Apple developer account team.

    • ApplePlatformBundleID – The bundle identifier that's assigned to your iOS app.

    " } }, - "documentation":"

    Input for SetPlatformApplicationAttributes action.

    " + "documentation":"

    Input for SetPlatformApplicationAttributes action.

    " }, "SetSMSAttributesInput":{ "type":"structure", diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 15b2d8a6acff..209967f8be76 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 51b313b2d7d2..971db0fb3aca 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/customization.config b/services/ssm/src/main/resources/codegen-resources/customization.config index 0f8e54aef312..83092d77b938 100644 --- a/services/ssm/src/main/resources/codegen-resources/customization.config +++ b/services/ssm/src/main/resources/codegen-resources/customization.config @@ -1,29 +1,30 @@ { - "verifiedSimpleMethods": [ - "describeActivations", - "describeAutomationExecutions", - "describeAvailablePatches", - "describeInstanceInformation", - "describeInventoryDeletions", - "describeMaintenanceWindows", - "describeParameters", - "describePatchBaselines", - "describePatchGroups", - "getDefaultPatchBaseline", - "getInventory", - "getInventorySchema", - "listAssociations", - "listCommandInvocations", - "listCommands", - "listComplianceSummaries", - "listDocuments", - "listResourceComplianceSummaries", - "listResourceDataSync" - ], - "excludedSimpleMethods": [ - "deleteAssociation", - "describeAssociation", - "listComplianceItems", - "describeMaintenanceWindowSchedule" - ] + "verifiedSimpleMethods": [ + "describeActivations", + "describeAutomationExecutions", + "describeAvailablePatches", + "describeInstanceInformation", + "describeInventoryDeletions", + "describeMaintenanceWindows", + "describeParameters", + "describePatchBaselines", + "describePatchGroups", + "getDefaultPatchBaseline", + "getInventory", + "getInventorySchema", + "listAssociations", + "listCommandInvocations", + "listCommands", + "listComplianceSummaries", + "listDocuments", + "listResourceComplianceSummaries", + "listResourceDataSync" + ], + "excludedSimpleMethods": [ + "deleteAssociation", + "describeAssociation", + "listComplianceItems", + "describeMaintenanceWindowSchedule" + ], + "useSraAuth": true } diff --git a/services/ssm/src/main/resources/codegen-resources/service-2.json b/services/ssm/src/main/resources/codegen-resources/service-2.json index 8faebf1afe7e..dd9be8febd00 100644 --- a/services/ssm/src/main/resources/codegen-resources/service-2.json +++ b/services/ssm/src/main/resources/codegen-resources/service-2.json @@ -414,9 +414,12 @@ "errors":[ {"shape":"InternalServerError"}, {"shape":"ResourcePolicyInvalidParameterException"}, - {"shape":"ResourcePolicyConflictException"} + {"shape":"ResourcePolicyConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MalformedResourcePolicyDocumentException"}, + {"shape":"ResourcePolicyNotFoundException"} ], - "documentation":"

    Deletes a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. Currently, OpsItemGroup is the only resource that supports Systems Manager resource policies. The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

    " + "documentation":"

    Deletes a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. The following resources support Systems Manager resource policies.

    • OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

    • Parameter - The resource policy is used to share a parameter with other accounts using Resource Access Manager (RAM). For more information about cross-account sharing of parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

    " }, "DeregisterManagedInstance":{ "name":"DeregisterManagedInstance", @@ -876,7 +879,7 @@ {"shape":"InvalidFilterValue"}, {"shape":"InvalidNextToken"} ], - "documentation":"

    Get information about a parameter.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

    " + "documentation":"

    Lists the parameters in your Amazon Web Services account or the parameters shared with you when you enable the Shared option.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

    " }, "DescribePatchBaselines":{ "name":"DescribePatchBaselines", @@ -1320,7 +1323,8 @@ "output":{"shape":"GetResourcePoliciesResponse"}, "errors":[ {"shape":"InternalServerError"}, - {"shape":"ResourcePolicyInvalidParameterException"} + {"shape":"ResourcePolicyInvalidParameterException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Returns an array of the Policy object.

    " }, @@ -1701,9 +1705,12 @@ {"shape":"InternalServerError"}, {"shape":"ResourcePolicyInvalidParameterException"}, {"shape":"ResourcePolicyLimitExceededException"}, - {"shape":"ResourcePolicyConflictException"} + {"shape":"ResourcePolicyConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MalformedResourcePolicyDocumentException"}, + {"shape":"ResourcePolicyNotFoundException"} ], - "documentation":"

    Creates or updates a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. Currently, OpsItemGroup is the only resource that supports Systems Manager resource policies. The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

    " + "documentation":"

    Creates or updates a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. The following resources support Systems Manager resource policies.

    • OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

    • Parameter - The resource policy is used to share a parameter with other accounts using Resource Access Manager (RAM).

      To share a parameter, it must be in the advanced parameter tier. For information about parameter tiers, see Managing parameter tiers. For information about changing an existing standard parameter to an advanced parameter, see Changing a standard parameter to an advanced parameter.

      To share a SecureString parameter, it must be encrypted with a customer managed key, and you must share the key separately through Key Management Service. Amazon Web Services managed keys cannot be shared. Parameters encrypted with the default Amazon Web Services managed key can be updated to use a customer managed key instead. For KMS key definitions, see KMS concepts in the Key Management Service Developer Guide.

      While you can share a parameter using the Systems Manager PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. This is because using PutResourcePolicy requires the extra step of promoting the parameter to a standard RAM Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation. Otherwise, the parameter won't be returned by the Systems Manager DescribeParameters API operation using the --shared option.

      For more information, see Sharing a parameter in the Amazon Web Services Systems Manager User Guide

    " }, "RegisterDefaultPatchBaseline":{ "name":"RegisterDefaultPatchBaseline", @@ -5217,7 +5224,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The name of the parameter to delete.

    " + "documentation":"

    The name of the parameter to delete.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    " } } }, @@ -5232,7 +5239,7 @@ "members":{ "Names":{ "shape":"ParameterNameList", - "documentation":"

    The names of the parameters to delete. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

    " + "documentation":"

    The names of the parameters to delete. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    " } } }, @@ -6379,6 +6386,11 @@ "NextToken":{ "shape":"NextToken", "documentation":"

    The token for the next set of items to return. (You received this token from a previous call.)

    " + }, + "Shared":{ + "shape":"Boolean", + "documentation":"

    Lists parameters that are shared with you.

    By default when using this option, the command returns parameters that have been shared using a standard Resource Access Manager Resource Share. In order for a parameter that was shared using the PutResourcePolicy command to be returned, the associated RAM Resource Share Created From Policy must have been promoted to a standard Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation.

    For more information about sharing parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

    ", + "box":true } } }, @@ -8258,7 +8270,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The name of the parameter for which you want to review history.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the parameter for which you want to review history. For parameters shared with you from another account, you must use the full ARN.

    " }, "WithDecryption":{ "shape":"Boolean", @@ -8295,7 +8307,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The name of the parameter you want to query.

    To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the parameter that you want to query. For parameters shared with you from another account, you must use the full ARN.

    To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

    For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

    " }, "WithDecryption":{ "shape":"Boolean", @@ -8370,7 +8382,7 @@ "members":{ "Names":{ "shape":"ParameterNameList", - "documentation":"

    Names of the parameters for which you want to query information.

    To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

    " + "documentation":"

    The names or Amazon Resource Names (ARNs) of the parameters that you want to query. For parameters shared with you from another account, you must use the full ARNs.

    To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

    For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

    " }, "WithDecryption":{ "shape":"Boolean", @@ -9973,7 +9985,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The parameter name on which you want to attach one or more labels.

    " + "documentation":"

    The parameter name on which you want to attach one or more labels.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    " }, "ParameterVersion":{ "shape":"PSParameterVersion", @@ -11302,6 +11314,14 @@ "type":"list", "member":{"shape":"MaintenanceWindowIdentityForTarget"} }, + "MalformedResourcePolicyDocumentException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The specified policy document is malformed or invalid, or excessive PutResourcePolicy or DeleteResourcePolicy calls have been made.

    ", + "exception":true + }, "ManagedInstanceId":{ "type":"string", "max":124, @@ -12693,6 +12713,10 @@ "shape":"PSParameterName", "documentation":"

    The parameter name.

    " }, + "ARN":{ + "shape":"String", + "documentation":"

    The (ARN) of the last user to update the parameter.

    " + }, "Type":{ "shape":"ParameterType", "documentation":"

    The type of parameter. Valid parameter types include the following: String, StringList, and SecureString.

    " @@ -12734,7 +12758,7 @@ "documentation":"

    The data type of the parameter, such as text or aws:ec2:image. The default is text.

    " } }, - "documentation":"

    Metadata includes information like the ARN of the last user and the date/time the parameter was last used.

    " + "documentation":"

    Metadata includes information like the Amazon Resource Name (ARN) of the last user to update the parameter and the date and time the parameter was last used.

    " }, "ParameterMetadataList":{ "type":"list", @@ -13530,7 +13554,7 @@ }, "Policy":{ "type":"string", - "pattern":"\\S+" + "pattern":"^(?!\\s*$).+" }, "PolicyHash":{"type":"string"}, "PolicyId":{"type":"string"}, @@ -13643,7 +13667,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

    Naming Constraints:

    • Parameter names are case sensitive.

    • A parameter name must be unique within an Amazon Web Services Region

    • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

    • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

      In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    • A parameter name can't include spaces.

    • Parameter hierarchies are limited to a maximum depth of fifteen levels.

    For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

    The maximum length constraint of 2048 characters listed below includes 1037 characters reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is 1011 characters. This includes the characters in the ARN that precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/.

    " + "documentation":"

    The fully qualified name of the parameter that you want to add to the system.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

    Naming Constraints:

    • Parameter names are case sensitive.

    • A parameter name must be unique within an Amazon Web Services Region

    • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

    • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

      In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

    • A parameter name can't include spaces.

    • Parameter hierarchies are limited to a maximum depth of fifteen levels.

    For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

    The maximum length constraint of 2048 characters listed below includes 1037 characters reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is 1011 characters. This includes the characters in the ARN that precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/.

    " }, "Description":{ "shape":"ParameterDescription", @@ -14434,6 +14458,14 @@ "documentation":"

    Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

    For information about resource quotas in Systems Manager, see Systems Manager service quotas in the Amazon Web Services General Reference.

    ", "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The specified parameter to be shared could not be found.

    ", + "exception":true + }, "ResourcePolicyConflictException":{ "type":"structure", "members":{ @@ -14466,6 +14498,14 @@ "max":50, "min":1 }, + "ResourcePolicyNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    No policies with the specified policy ID and hash could be found.

    ", + "exception":true + }, "ResourcePolicyParameterNamesList":{ "type":"list", "member":{"shape":"String"} @@ -15765,7 +15805,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

    The name of the parameter from which you want to delete one or more labels.

    " + "documentation":"

    The name of the parameter from which you want to delete one or more labels.

    You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

    " }, "ParameterVersion":{ "shape":"PSParameterVersion", diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 47f933b031df..d49dd9a1398a 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmcontacts/src/main/resources/codegen-resources/customization.config b/services/ssmcontacts/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/ssmcontacts/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 755c84404301..27269c13f12c 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmincidents/src/main/resources/codegen-resources/customization.config b/services/ssmincidents/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/ssmincidents/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index aa663e10b0d9..f28eb1617dad 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index e9937b365026..9efde5b313cb 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 81df7c2d5895..1abaa3829d9a 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 898d9d655f63..d30a840affe5 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index b15331ae21fa..e934d381364b 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/storagegateway/src/main/resources/codegen-resources/customization.config b/services/storagegateway/src/main/resources/codegen-resources/customization.config index b2f06f5894a2..932970f1d521 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/customization.config +++ b/services/storagegateway/src/main/resources/codegen-resources/customization.config @@ -1,21 +1,22 @@ { - "verifiedSimpleMethods": [ - "describeTapeArchives", - "listFileShares", - "listGateways", - "listTapes", - "listVolumes" - ], - "shapeModifiers": { - "InternalServerError": { - "exclude": [ - "error" - ] + "verifiedSimpleMethods": [ + "describeTapeArchives", + "listFileShares", + "listGateways", + "listTapes", + "listVolumes" + ], + "shapeModifiers": { + "InternalServerError": { + "exclude": [ + "error" + ] + }, + "InvalidGatewayRequestException": { + "exclude": [ + "error" + ] + } }, - "InvalidGatewayRequestException": { - "exclude": [ - "error" - ] - } - } + "useSraAuth": true } diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 95c069958fd5..83469d6a41e4 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/supplychain/pom.xml b/services/supplychain/pom.xml index 854dca0d18b4..3a2fb1d46c44 100644 --- a/services/supplychain/pom.xml +++ b/services/supplychain/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT supplychain AWS Java SDK :: Services :: Supply Chain diff --git a/services/support/pom.xml b/services/support/pom.xml index 494228a5177b..fb681cf7935d 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index 60c17853a83a..99864c06ae80 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/supportapp/src/main/resources/codegen-resources/customization.config b/services/supportapp/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/supportapp/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/swf/pom.xml b/services/swf/pom.xml index a23a735e6b07..0ea1e288d4b9 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 4f02c0c5b832..b6bc30f3e9e5 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/synthetics/src/main/resources/codegen-resources/customization.config b/services/synthetics/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/synthetics/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 54c924b4395a..271ea4f7acb8 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index e983016c33c7..66867b22462a 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 86c17155afe8..ae2038d7a200 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/customization.config b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config index 8175efc28858..d9a3bc76cacf 100644 --- a/services/timestreamwrite/src/main/resources/codegen-resources/customization.config +++ b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,4 @@ { - "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true -} \ No newline at end of file + "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true, + "useSraAuth": true +} diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index 27b627117001..79c06a96ea1b 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 091a8c0a1c76..c18f863bbd15 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 60f3167634b9..b027467e792e 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index e355072abfb4..d02cb06ccf90 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/customization.config b/services/transfer/src/main/resources/codegen-resources/customization.config index 5433b8e01c77..b82814760a08 100644 --- a/services/transfer/src/main/resources/codegen-resources/customization.config +++ b/services/transfer/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listServers" - ] + ], + "useSraAuth": true } diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 758286929d55..62342d428bf8 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 translate diff --git a/services/translate/src/main/resources/codegen-resources/customization.config b/services/translate/src/main/resources/codegen-resources/customization.config index ecd583dc0b3b..ce448c40a06d 100644 --- a/services/translate/src/main/resources/codegen-resources/customization.config +++ b/services/translate/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listTerminologies" - ] + ], + "useSraAuth": true } diff --git a/services/trustedadvisor/pom.xml b/services/trustedadvisor/pom.xml index 606ce4a23856..19eca50ba7fb 100644 --- a/services/trustedadvisor/pom.xml +++ b/services/trustedadvisor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT trustedadvisor AWS Java SDK :: Services :: Trusted Advisor diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index 07a4a5f16366..738b6b135858 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config b/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index e6e93e1fc193..8ce3b19738dd 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/voiceid/src/main/resources/codegen-resources/customization.config b/services/voiceid/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/voiceid/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 19ccba3e35ef..75fbaba3f0d8 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/vpclattice/src/main/resources/codegen-resources/customization.config b/services/vpclattice/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/vpclattice/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 9735091dba6d..2f3aa553a5c1 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 02e3dbf69aa7..64959e88ce2f 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/customization.config b/services/wafv2/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/wafv2/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index 82b30094ab13..cdd6329f58b7 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index f7baa0fb0ebd..13f09e7a2723 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 6b50d9be09b2..8eedb71e3f61 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/workdocs/src/main/resources/codegen-resources/customization.config b/services/workdocs/src/main/resources/codegen-resources/customization.config index 304bdf6f931b..c755e0bdcdc5 100644 --- a/services/workdocs/src/main/resources/codegen-resources/customization.config +++ b/services/workdocs/src/main/resources/codegen-resources/customization.config @@ -1,7 +1,8 @@ { - "excludedSimpleMethods" : [ - "describeUsers", - "describeActivities", - "getResources" - ] + "excludedSimpleMethods": [ + "describeUsers", + "describeActivities", + "getResources" + ], + "useSraAuth": true } diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index c809206b4bd7..718ee8aeb88c 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index 3429fd15529f..d724decb0c5e 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 workmail diff --git a/services/workmail/src/main/resources/codegen-resources/customization.config b/services/workmail/src/main/resources/codegen-resources/customization.config index 853d06179ea7..08b33a57f0e5 100644 --- a/services/workmail/src/main/resources/codegen-resources/customization.config +++ b/services/workmail/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { - "verifiedSimpleMethods" : [ + "verifiedSimpleMethods": [ "listOrganizations" - ] + ], + "useSraAuth": true } diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 7c1e465f7a01..c7f59f54a138 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config b/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..fb50d0a7a6eb --- /dev/null +++ b/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "useSraAuth": true +} diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 44492794dcbe..12c6fd21b051 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/service-2.json b/services/workspaces/src/main/resources/codegen-resources/service-2.json index 61c9ea256631..e6522046c81a 100644 --- a/services/workspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/workspaces/src/main/resources/codegen-resources/service-2.json @@ -3074,6 +3074,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

    If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

    " + }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

    The name of the user-decoupled WorkSpace.

    " } } }, @@ -4955,6 +4959,10 @@ "shape":"BooleanObject", "documentation":"

    Indicates whether the data stored on the root volume is encrypted.

    " }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

    The name of the user-decoupled WorkSpace.

    " + }, "WorkspaceProperties":{ "shape":"WorkspaceProperties", "documentation":"

    The properties of the WorkSpace.

    " @@ -5395,6 +5403,10 @@ "type":"list", "member":{"shape":"Workspace"} }, + "WorkspaceName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_()][a-zA-Z0-9_.()-]{1,63}$" + }, "WorkspaceProperties":{ "type":"structure", "members":{ @@ -5443,7 +5455,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

    The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

    " + "documentation":"

    The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

    The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces.

    " }, "BundleId":{ "shape":"BundleId", @@ -5468,6 +5480,10 @@ "Tags":{ "shape":"TagList", "documentation":"

    The tags for the WorkSpace.

    " + }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

    The name of the user-decoupled WorkSpace.

    " } }, "documentation":"

    Describes the information used to create a WorkSpace.

    " diff --git a/services/workspacesthinclient/pom.xml b/services/workspacesthinclient/pom.xml index c0c7ff1c1998..01355fa9e3aa 100644 --- a/services/workspacesthinclient/pom.xml +++ b/services/workspacesthinclient/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT workspacesthinclient AWS Java SDK :: Services :: Work Spaces Thin Client diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index ea59c8b2e78a..061deda0f2ff 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/xray/pom.xml b/services/xray/pom.xml index c89e400579ca..36ee45fb221d 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 8b96f758697d..cf63013fa9d9 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/bundle-logging-bridge-binding-test/pom.xml b/test/bundle-logging-bridge-binding-test/pom.xml index 27a78a5e1638..e1edf2417df6 100644 --- a/test/bundle-logging-bridge-binding-test/pom.xml +++ b/test/bundle-logging-bridge-binding-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 181827f814bc..5fe13a7fce8c 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml @@ -235,6 +235,12 @@ ${awsjavasdk.version} test
    + + software.amazon.awssdk + netty-nio-client + ${awsjavasdk.version} + test + io.reactivex.rxjava2 rxjava @@ -305,6 +311,17 @@ true + + + org.apache.maven.plugins + maven-dependency-plugin + ${maven-dependency-plugin.version} + + false + false + + + diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AuthSchemeInterceptorTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AuthSchemeInterceptorTest.java new file mode 100644 index 000000000000..e187ab4435d5 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AuthSchemeInterceptorTest.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.services.protocolrestjson.auth.scheme.ProtocolRestJsonAuthSchemeParams; +import software.amazon.awssdk.services.protocolrestjson.auth.scheme.ProtocolRestJsonAuthSchemeProvider; +import software.amazon.awssdk.services.protocolrestjson.auth.scheme.internal.ProtocolRestJsonAuthSchemeInterceptor; + +public class AuthSchemeInterceptorTest { + private static final ProtocolRestJsonAuthSchemeInterceptor INTERCEPTOR = new ProtocolRestJsonAuthSchemeInterceptor(); + + private Context.BeforeExecution mockContext; + + @BeforeEach + public void setup() { + mockContext = mock(Context.BeforeExecution.class); + } + + @Test + public void resolveAuthScheme_authSchemeSignerThrows_continuesToNextAuthScheme() { + ProtocolRestJsonAuthSchemeProvider mockAuthSchemeProvider = mock(ProtocolRestJsonAuthSchemeProvider.class); + List authSchemeOptions = Arrays.asList( + AuthSchemeOption.builder().schemeId(TestAuthScheme.SCHEME_ID).build(), + AuthSchemeOption.builder().schemeId(AwsV4AuthScheme.SCHEME_ID).build() + ); + when(mockAuthSchemeProvider.resolveAuthScheme(any(ProtocolRestJsonAuthSchemeParams.class))).thenReturn(authSchemeOptions); + + IdentityProviders mockIdentityProviders = mock(IdentityProviders.class); + when(mockIdentityProviders.identityProvider(any(Class.class))).thenReturn(AnonymousCredentialsProvider.create()); + + Map> authSchemes = new HashMap<>(); + authSchemes.put(AwsV4AuthScheme.SCHEME_ID, AwsV4AuthScheme.create()); + + TestAuthScheme notProvidedAuthScheme = spy(new TestAuthScheme()); + authSchemes.put(TestAuthScheme.SCHEME_ID, notProvidedAuthScheme); + + ExecutionAttributes attributes = new ExecutionAttributes(); + attributes.putAttribute(SdkExecutionAttribute.OPERATION_NAME, "GetFoo"); + attributes.putAttribute(SdkInternalExecutionAttribute.AUTH_SCHEME_RESOLVER, mockAuthSchemeProvider); + attributes.putAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS, mockIdentityProviders); + attributes.putAttribute(SdkInternalExecutionAttribute.AUTH_SCHEMES, authSchemes); + + INTERCEPTOR.beforeExecution(mockContext, attributes); + + SelectedAuthScheme selectedAuthScheme = attributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + + verify(notProvidedAuthScheme).signer(); + assertThat(selectedAuthScheme.authSchemeOption().schemeId()).isEqualTo(AwsV4AuthScheme.SCHEME_ID); + } + + private static class TestAuthScheme implements AuthScheme { + public static final String SCHEME_ID = "codegen-test-scheme"; + + @Override + public String schemeId() { + return SCHEME_ID; + } + + @Override + public IdentityProvider identityProvider(IdentityProviders providers) { + return providers.identityProvider(AwsCredentialsIdentity.class); + } + + @Override + public HttpSigner signer() { + throw new RuntimeException("Not on classpath"); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BlockingAsyncRequestResponseBodyResourceManagementTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BlockingAsyncRequestResponseBodyResourceManagementTest.java new file mode 100644 index 000000000000..7818c3140b2e --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BlockingAsyncRequestResponseBodyResourceManagementTest.java @@ -0,0 +1,206 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.netty.handler.codec.http.HttpHeaderValues.TEXT_PLAIN; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import java.io.IOException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.StreamingOutputOperationRequest; +import software.amazon.awssdk.services.protocolrestjson.model.StreamingOutputOperationResponse; + +@Timeout(10) +public class BlockingAsyncRequestResponseBodyResourceManagementTest { + private ProtocolRestJsonAsyncClient client; + private Server server; + + + @AfterEach + void tearDownPerTest() throws InterruptedException { + server.shutdown(); + server = null; + client.close();; + + } + + @BeforeEach + void setUpPerTest() throws Exception { + server = new Server(); + server.init(); + + client = ProtocolRestJsonAsyncClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .endpointOverride(URI.create("http://localhost:" + server.port())) + .overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())) + .httpClientBuilder(NettyNioAsyncHttpClient.builder().putChannelOption(ChannelOption.SO_RCVBUF, 8)) + .build(); + } + + + @Test + void blockingResponseTransformer_abort_shouldCloseUnderlyingConnection() throws IOException { + verifyConnection(r -> r.abort()); + } + + @Test + void blockingResponseTransformer_close_shouldCloseUnderlyingConnection() throws IOException { + Consumer> closeInputStream = closeInputStraem(); + verifyConnection(closeInputStream); + } + + + private static Consumer> closeInputStraem() { + Consumer> closeInputStream = r -> { + try { + r.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + return closeInputStream; + } + + + void verifyConnection(Consumer> consumer) throws IOException { + + CompletableFuture> responseFuture = + client.streamingOutputOperation(StreamingOutputOperationRequest.builder().build(), + AsyncResponseTransformer.toBlockingInputStream()); + ResponseInputStream responseStream = responseFuture.join(); + + + consumer.accept(responseStream); + + try { + client.headOperation().join(); + } catch (Exception exception) { + // Doesn't matter if the request succeeds or not + } + + // Total of 2 connections got established. + assertThat(server.channels.size()).isEqualTo(2); + } + + private static class Server extends ChannelInitializer { + private static final byte[] CONTENT = ("{ " + + "\"foo\": " + RandomStringUtils.randomAscii(1024 * 1024 * 10) + + "}").getBytes(StandardCharsets.UTF_8); + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private Set channels = ConcurrentHashMap.newKeySet(); + private final NioEventLoopGroup group = new NioEventLoopGroup(3); + + public void init() throws Exception { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + @Override + protected void initChannel(Channel ch) { + channels.add(ch); + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(new HttpServerCodec()); + pipeline.addLast(new BehaviorTestChannelHandler()); + pipeline.addLast(new LoggingHandler(LogLevel.INFO)); + } + + private class BehaviorTestChannelHandler extends ChannelDuplexHandler { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + + if (!(msg instanceof HttpRequest)) { + return; + } + + HttpMethod method = ((HttpRequest) msg).method(); + + if (Objects.equals(method, HttpMethod.HEAD)) { + DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, OK); + ctx.writeAndFlush(response); + return; + } + + FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, OK, + Unpooled.wrappedBuffer(CONTENT)); + + response.headers() + .set(CONTENT_TYPE, TEXT_PLAIN) + .setInt(CONTENT_LENGTH, response.content().readableBytes()); + + ctx.writeAndFlush(response); + } + } + } +} diff --git a/test/crt-unavailable-tests/pom.xml b/test/crt-unavailable-tests/pom.xml new file mode 100644 index 000000000000..c5d467f06bcc --- /dev/null +++ b/test/crt-unavailable-tests/pom.xml @@ -0,0 +1,92 @@ + + + + + + aws-sdk-java-pom + software.amazon.awssdk + 2.24.10-SNAPSHOT + ../../pom.xml + + 4.0.0 + + crt-unavailable-tests + AWS Java SDK :: Test :: Crt Unavailable Tests + Test package for testing components that use CRT when CRT is not on the classpath. + + + + software.amazon.awssdk + bom-internal + ${project.version} + pom + import + + + + + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + test + + + org.assertj + assertj-core + test + + + org.hamcrest + hamcrest-all + test + + + org.junit.jupiter + junit-jupiter + test + + + org.junit.vintage + junit-vintage-engine + test + + + com.github.tomakehurst + wiremock-jre8 + test + + + org.apache.logging.log4j + log4j-api + test + + + org.apache.logging.log4j + log4j-core + test + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + + diff --git a/test/crt-unavailable-tests/src/test/java/software/amazon/awssdk/auth/aws/AwsV4aAuthSchemeTest.java b/test/crt-unavailable-tests/src/test/java/software/amazon/awssdk/auth/aws/AwsV4aAuthSchemeTest.java new file mode 100644 index 000000000000..ba407ce64bc4 --- /dev/null +++ b/test/crt-unavailable-tests/src/test/java/software/amazon/awssdk/auth/aws/AwsV4aAuthSchemeTest.java @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.aws; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.http.auth.aws.scheme.AwsV4aAuthScheme; + +public class AwsV4aAuthSchemeTest { + @Test + public void signer_throwsRuntimeException() { + assertThatThrownBy(AwsV4aAuthScheme.create()::signer).hasMessageContaining("Could not load class"); + } +} diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 5985eead9970..9f6e2fa3447c 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index e6b63d13dda7..e21f4ebff659 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/old-client-version-compatibility-test/pom.xml b/test/old-client-version-compatibility-test/pom.xml index a1a83d917fd0..47aff7593aec 100644 --- a/test/old-client-version-compatibility-test/pom.xml +++ b/test/old-client-version-compatibility-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index bc0285855aac..50da88ff415f 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-core-input.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-core-input.json index 4c58440beb23..8528b322c376 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-core-input.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-core-input.json @@ -442,6 +442,11 @@ }, "then": { "serializedAs": { + "headers": { + "contains": { + "content-length": "8" + } + }, "body": { "equals": "contents" } diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json index fdf6882d7306..de141ecdbfa3 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-input.json @@ -52,6 +52,11 @@ }, "then": { "serializedAs": { + "headers": { + "contains": { + "Content-length": "22" + } + }, "body": { "jsonEquals": "{\"StringMember\": \"foo\"}" } diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 01599586ccf0..c34751cee31f 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/main/resources/codegen-resources/restjson/contenttype/service-2.json b/test/protocol-tests/src/main/resources/codegen-resources/restjson/contenttype/service-2.json index 65d3bc977488..04415d6a3f00 100644 --- a/test/protocol-tests/src/main/resources/codegen-resources/restjson/contenttype/service-2.json +++ b/test/protocol-tests/src/main/resources/codegen-resources/restjson/contenttype/service-2.json @@ -223,6 +223,9 @@ "HeadersOnlyEvent": { "shape": "HeadersOnlyEvent" }, + "EndEvent": { + "shape": "EndEvent" + }, "ImplicitPayloadAndHeadersEvent": { "shape": "ImplicitPayloadAndHeadersEvent" } @@ -280,6 +283,12 @@ }, "event": true }, + "EndEvent":{ + "type":"structure", + "members":{ + }, + "event":true + }, "BlobPayloadMember":{"type":"blob"}, "EventStream": { "type": "structure", diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/RestJsonEventStreamProtocolTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/RestJsonEventStreamProtocolTest.java index 53a19f98f5ae..0b2ddebb80e8 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/RestJsonEventStreamProtocolTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/RestJsonEventStreamProtocolTest.java @@ -15,10 +15,22 @@ package software.amazon.awssdk.protocol.tests; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.in; +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.github.tomakehurst.wiremock.junit5.WireMockTest; +import io.reactivex.Flowable; import java.net.URI; import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -27,6 +39,7 @@ import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; +import software.amazon.awssdk.services.protocolrestjsoncontenttype.ProtocolRestJsonContentTypeAsyncClient; import software.amazon.awssdk.services.protocolrestjsoncontenttype.model.BlobAndHeadersEvent; import software.amazon.awssdk.services.protocolrestjsoncontenttype.model.HeadersOnlyEvent; import software.amazon.awssdk.services.protocolrestjsoncontenttype.model.ImplicitPayloadAndHeadersEvent; @@ -38,9 +51,19 @@ import software.amazon.awssdk.services.protocolrestjsoncontenttype.transform.ImplicitPayloadAndHeadersEventMarshaller; import software.amazon.awssdk.services.protocolrestjsoncontenttype.transform.StringAndHeadersEventMarshaller; +@WireMockTest public class RestJsonEventStreamProtocolTest { private static final String EVENT_CONTENT_TYPE_HEADER = ":content-type"; + private ProtocolRestJsonContentTypeAsyncClient client; + + @BeforeEach + void setup(WireMockRuntimeInfo info) { + client = ProtocolRestJsonContentTypeAsyncClient.builder() + .endpointOverride(URI.create("http://localhost:" + info.getHttpPort())) + .build(); + } + @Test public void implicitPayloadAndHeaders_payloadMemberPresent() { ImplicitPayloadAndHeadersEventMarshaller marshaller = new ImplicitPayloadAndHeadersEventMarshaller(protocolFactory()); @@ -91,6 +114,18 @@ public void blobAndHeadersEvent() { assertThat(content).isEqualTo("hello rest-json"); } + @Test + public void containsEmptyEvent_shouldEncodeSuccessfully() { + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200))); + client.testEventStream(b -> { + }, Flowable.fromArray(InputEventStream.stringAndHeadersEventBuilder().stringPayloadMember( + "test").build(), + InputEventStream.endEventBuilder().build())).join(); + + verify(postRequestedFor(anyUrl()) + .withHeader("Content-Type", equalTo("application/vnd.amazon.eventstream"))); + } + @Test public void stringAndHeadersEvent() { StringAndHeadersEventMarshaller marshaller = new StringAndHeadersEventMarshaller(protocolFactory()); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/contentlength/MarshallersAddContentLengthTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/contentlength/MarshallersAddContentLengthTest.java new file mode 100644 index 000000000000..8e8b44208ee3 --- /dev/null +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/contentlength/MarshallersAddContentLengthTest.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocol.tests.contentlength; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static software.amazon.awssdk.http.Header.CONTENT_LENGTH; + +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.github.tomakehurst.wiremock.junit5.WireMockTest; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.crt.AwsCrtHttpClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.OperationWithExplicitPayloadStructureResponse; +import software.amazon.awssdk.services.protocolrestjson.model.SimpleStruct; +import software.amazon.awssdk.services.protocolrestxml.ProtocolRestXmlClient; +import software.amazon.awssdk.services.protocolrestxml.model.OperationWithExplicitPayloadStringResponse; + +@WireMockTest +public class MarshallersAddContentLengthTest { + public static final String STRING_PAYLOAD = "TEST_STRING_PAYLOAD"; + + @Test + void jsonMarshallers_AddContentLength_for_explicitBinaryPayload(WireMockRuntimeInfo wireMock) { + stubSuccessfulResponse(); + CaptureRequestInterceptor captureRequestInterceptor = new CaptureRequestInterceptor(); + ProtocolRestJsonClient client = ProtocolRestJsonClient.builder() + .httpClient(AwsCrtHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(captureRequestInterceptor)) + .endpointOverride(URI.create("http://localhost:" + wireMock.getHttpPort())) + .build(); + client.operationWithExplicitPayloadBlob(p -> p.payloadMember(SdkBytes.fromString(STRING_PAYLOAD, + StandardCharsets.UTF_8))); + verify(postRequestedFor(anyUrl()).withHeader(CONTENT_LENGTH, equalTo(String.valueOf(STRING_PAYLOAD.length())))); + assertThat(captureRequestInterceptor.requestAfterMarshalling().firstMatchingHeader(CONTENT_LENGTH)) + .contains(String.valueOf(STRING_PAYLOAD.length())); + } + + @Test + void jsonMarshallers_AddContentLength_for_explicitStringPayload(WireMockRuntimeInfo wireMock) { + stubSuccessfulResponse(); + String expectedPayload = String.format("{\"StringMember\":\"%s\"}", STRING_PAYLOAD); + CaptureRequestInterceptor captureRequestInterceptor = new CaptureRequestInterceptor(); + ProtocolRestJsonClient client = ProtocolRestJsonClient.builder() + .httpClient(AwsCrtHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(captureRequestInterceptor)) + .endpointOverride(URI.create("http://localhost:" + wireMock.getHttpPort())) + .build(); + OperationWithExplicitPayloadStructureResponse response = + client.operationWithExplicitPayloadStructure(p -> p.payloadMember(SimpleStruct.builder().stringMember(STRING_PAYLOAD).build())); + verify(postRequestedFor(anyUrl()) + .withRequestBody(equalTo(expectedPayload)) + .withHeader(CONTENT_LENGTH, equalTo(String.valueOf(expectedPayload.length())))); + assertThat(captureRequestInterceptor.requestAfterMarshalling().firstMatchingHeader(CONTENT_LENGTH)) + .contains(String.valueOf(expectedPayload.length())); + } + + @Test + void xmlMarshallers_AddContentLength_for_explicitBinaryPayload(WireMockRuntimeInfo wireMock) { + stubSuccessfulResponse(); + CaptureRequestInterceptor captureRequestInterceptor = new CaptureRequestInterceptor(); + ProtocolRestXmlClient client = ProtocolRestXmlClient.builder() + .httpClient(AwsCrtHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(captureRequestInterceptor)) + .endpointOverride(URI.create("http://localhost:" + wireMock.getHttpPort())) + .build(); + client.operationWithExplicitPayloadBlob(r -> r.payloadMember(SdkBytes.fromString(STRING_PAYLOAD, + StandardCharsets.UTF_8))); + verify(postRequestedFor(anyUrl()).withRequestBody(equalTo(STRING_PAYLOAD)) + .withHeader(CONTENT_LENGTH, equalTo(String.valueOf(STRING_PAYLOAD.length())))); + assertThat(captureRequestInterceptor.requestAfterMarshalling().firstMatchingHeader(CONTENT_LENGTH)) + .contains(String.valueOf(STRING_PAYLOAD.length())); + } + + @Test + void xmlMarshallers_AddContentLength_for_explicitStringPayload(WireMockRuntimeInfo wireMock) { + stubSuccessfulResponse(); + String expectedPayload = STRING_PAYLOAD; + CaptureRequestInterceptor captureRequestInterceptor = new CaptureRequestInterceptor(); + ProtocolRestXmlClient client = ProtocolRestXmlClient.builder() + .httpClient(AwsCrtHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(captureRequestInterceptor)) + .endpointOverride(URI.create("http://localhost:" + wireMock.getHttpPort())) + .build(); + OperationWithExplicitPayloadStringResponse stringResponse = + client.operationWithExplicitPayloadString(p -> p.payloadMember(STRING_PAYLOAD)); + verify(postRequestedFor(anyUrl()) + .withRequestBody(equalTo(expectedPayload)) + .withHeader(CONTENT_LENGTH, equalTo(String.valueOf(expectedPayload.length())))); + assertThat(captureRequestInterceptor.requestAfterMarshalling().firstMatchingHeader(CONTENT_LENGTH)) + .contains(String.valueOf(expectedPayload.length())); + } + + private void stubSuccessfulResponse() { + stubFor(post(anyUrl()).willReturn(aResponse().withStatus(200))); + } + + private static class CaptureRequestInterceptor implements ExecutionInterceptor { + private SdkHttpRequest requestAfterMarshilling; + + public SdkHttpRequest requestAfterMarshalling() { + return requestAfterMarshilling; + } + + @Override + public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { + this.requestAfterMarshilling = context.httpRequest(); + } + } +} diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index 7269952b4b3e..b799455ad6f3 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index cba84b18f050..ff0dfea132a5 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 80202bb964b3..986396055459 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index 66c5ca445f74..1024ac1a4e52 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 0885ba174310..dfca14d1d4fc 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index 59f494327d66..d00aad8aed49 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index c504ddccd891..403fb96263ab 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 3f50db06c45c..db1197d214b8 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 60f668e2a34d..f07e002fcdb8 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT ../../pom.xml 4.0.0 @@ -291,6 +291,11 @@ imds ${awsjavasdk.version} + + software.amazon.awssdk + http-client-spi + ${awsjavasdk.version} + diff --git a/third-party/pom.xml b/third-party/pom.xml index e06fb2507ada..cc51a5871efa 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index 0b22a821e2b1..770ee192dc02 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index 156c452e0e9c..c86cb3586c83 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-slf4j-api/pom.xml b/third-party/third-party-slf4j-api/pom.xml index 1531486f4510..105288c62554 100644 --- a/third-party/third-party-slf4j-api/pom.xml +++ b/third-party/third-party-slf4j-api/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 9760767a44df..6626b7463a02 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.23.21-SNAPSHOT + 2.24.10-SNAPSHOT 4.0.0