Skip to content

Commit

Permalink
Release Lambda FunctionRecursiveConfig, enabling customers to turn re…
Browse files Browse the repository at this point in the history
…cursive loop detection on or off on individual functions. This release adds two new APIs, GetFunctionRecursionConfig and PutFunctionRecursionConfig.

Add new attributes to the outputs of GetApplication and GetDatabase APIs.
Amazon Bedrock Batch Inference/ Model Invocation is a feature which allows customers to asynchronously run inference on a large set of records/files stored in S3.
This release adds additional search fields and provides sorting by multiple fields.
AWS CodeBuild now supports creating fleets with macOS platform for running builds.
  • Loading branch information
aws-sdk-cpp-automation committed Aug 19, 2024
1 parent 670c1d7 commit 611a3c0
Show file tree
Hide file tree
Showing 486 changed files with 17,885 additions and 13,164 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.11.386
1.11.387
117 changes: 117 additions & 0 deletions generated/src/aws-cpp-sdk-bedrock/include/aws/bedrock/BedrockClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,41 @@ namespace Bedrock
return SubmitAsync(&BedrockClient::CreateModelCustomizationJob, request, handler, context);
}

/**
* <p>Creates a job to invoke a model on multiple prompts (batch inference). Format
* your data according to <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-prerq.html#batch-inference-data">Format
* your inference data</a> and upload it to an Amazon S3 bucket. For more
* information, see <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-create.html">Create
* a batch inference job</a>.</p> <p>The response returns a <code>jobArn</code>
* that you can use to stop or get details about the job. You can check the status
* of the job by sending a <a
* href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GetModelCustomizationJob.html">GetModelCustomizationJob</a>
* request.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/bedrock-2023-04-20/CreateModelInvocationJob">AWS
* API Reference</a></p>
*/
virtual Model::CreateModelInvocationJobOutcome CreateModelInvocationJob(const Model::CreateModelInvocationJobRequest& request) const;

/**
* A Callable wrapper for CreateModelInvocationJob that returns a future to the operation so that it can be executed in parallel to other requests.
*/
template<typename CreateModelInvocationJobRequestT = Model::CreateModelInvocationJobRequest>
Model::CreateModelInvocationJobOutcomeCallable CreateModelInvocationJobCallable(const CreateModelInvocationJobRequestT& request) const
{
return SubmitCallable(&BedrockClient::CreateModelInvocationJob, request);
}

/**
* An Async wrapper for CreateModelInvocationJob that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
template<typename CreateModelInvocationJobRequestT = Model::CreateModelInvocationJobRequest>
void CreateModelInvocationJobAsync(const CreateModelInvocationJobRequestT& request, const CreateModelInvocationJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
{
return SubmitAsync(&BedrockClient::CreateModelInvocationJob, request, handler, context);
}

/**
* <p>Creates dedicated throughput for a base or custom model with the model units
* and for the duration that you specify. For pricing details, see <a
Expand Down Expand Up @@ -562,6 +597,33 @@ namespace Bedrock
return SubmitAsync(&BedrockClient::GetModelCustomizationJob, request, handler, context);
}

/**
* <p>Gets details about a batch inference job. For more information, see <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-manage.html#batch-inference-view">View
* details about a batch inference job</a> </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/bedrock-2023-04-20/GetModelInvocationJob">AWS
* API Reference</a></p>
*/
virtual Model::GetModelInvocationJobOutcome GetModelInvocationJob(const Model::GetModelInvocationJobRequest& request) const;

/**
* A Callable wrapper for GetModelInvocationJob that returns a future to the operation so that it can be executed in parallel to other requests.
*/
template<typename GetModelInvocationJobRequestT = Model::GetModelInvocationJobRequest>
Model::GetModelInvocationJobOutcomeCallable GetModelInvocationJobCallable(const GetModelInvocationJobRequestT& request) const
{
return SubmitCallable(&BedrockClient::GetModelInvocationJob, request);
}

/**
* An Async wrapper for GetModelInvocationJob that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
template<typename GetModelInvocationJobRequestT = Model::GetModelInvocationJobRequest>
void GetModelInvocationJobAsync(const GetModelInvocationJobRequestT& request, const GetModelInvocationJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
{
return SubmitAsync(&BedrockClient::GetModelInvocationJob, request, handler, context);
}

/**
* <p>Get the current configuration values for model invocation
* logging.</p><p><h3>See Also:</h3> <a
Expand Down Expand Up @@ -797,6 +859,33 @@ namespace Bedrock
return SubmitAsync(&BedrockClient::ListModelCustomizationJobs, request, handler, context);
}

/**
* <p>Lists all batch inference jobs in the account. For more information, see <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-manage.html#batch-inference-view">View
* details about a batch inference job</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/bedrock-2023-04-20/ListModelInvocationJobs">AWS
* API Reference</a></p>
*/
virtual Model::ListModelInvocationJobsOutcome ListModelInvocationJobs(const Model::ListModelInvocationJobsRequest& request = {}) const;

/**
* A Callable wrapper for ListModelInvocationJobs that returns a future to the operation so that it can be executed in parallel to other requests.
*/
template<typename ListModelInvocationJobsRequestT = Model::ListModelInvocationJobsRequest>
Model::ListModelInvocationJobsOutcomeCallable ListModelInvocationJobsCallable(const ListModelInvocationJobsRequestT& request = {}) const
{
return SubmitCallable(&BedrockClient::ListModelInvocationJobs, request);
}

/**
* An Async wrapper for ListModelInvocationJobs that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
template<typename ListModelInvocationJobsRequestT = Model::ListModelInvocationJobsRequest>
void ListModelInvocationJobsAsync(const ListModelInvocationJobsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr, const ListModelInvocationJobsRequestT& request = {}) const
{
return SubmitAsync(&BedrockClient::ListModelInvocationJobs, request, handler, context);
}

/**
* <p>Lists the Provisioned Throughputs in the account. For more information, see
* <a
Expand Down Expand Up @@ -937,6 +1026,34 @@ namespace Bedrock
return SubmitAsync(&BedrockClient::StopModelCustomizationJob, request, handler, context);
}

/**
* <p>Stops a batch inference job. You're only charged for tokens that were already
* processed. For more information, see <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-manage.html#batch-inference-stop">Stop
* a batch inference job</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/bedrock-2023-04-20/StopModelInvocationJob">AWS
* API Reference</a></p>
*/
virtual Model::StopModelInvocationJobOutcome StopModelInvocationJob(const Model::StopModelInvocationJobRequest& request) const;

/**
* A Callable wrapper for StopModelInvocationJob that returns a future to the operation so that it can be executed in parallel to other requests.
*/
template<typename StopModelInvocationJobRequestT = Model::StopModelInvocationJobRequest>
Model::StopModelInvocationJobOutcomeCallable StopModelInvocationJobCallable(const StopModelInvocationJobRequestT& request) const
{
return SubmitCallable(&BedrockClient::StopModelInvocationJob, request);
}

/**
* An Async wrapper for StopModelInvocationJob that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
template<typename StopModelInvocationJobRequestT = Model::StopModelInvocationJobRequest>
void StopModelInvocationJobAsync(const StopModelInvocationJobRequestT& request, const StopModelInvocationJobResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
{
return SubmitAsync(&BedrockClient::StopModelInvocationJob, request, handler, context);
}

/**
* <p>Associate tags with a resource. For more information, see <a
* href="https://docs.aws.amazon.com/bedrock/latest/userguide/tagging.html">Tagging
Expand Down
Loading

0 comments on commit 611a3c0

Please sign in to comment.