diff --git a/README.md b/README.md index 5e0e393..7a71e85 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ tools and applications which can work with both variants. | | OpenAI | Azure OpenAI | | ----------- | :----: | :----------: | -| Version | v2.0.0 | v2023-05-15 | +| Version | v2.0.0 | v2023-12-01-preview | | [Chat](https://platform.openai.com/docs/api-reference/chat) | X | X | | [Audio](https://platform.openai.com/docs/api-reference/audio) | X | | | [Completion](https://platform.openai.com/docs/api-reference/completions) | X | X | diff --git a/resources/azure_openai.json b/resources/azure_openai.json index 1738594..8cc3546 100644 --- a/resources/azure_openai.json +++ b/resources/azure_openai.json @@ -3,7 +3,7 @@ "info": { "title": "Azure OpenAI Service API", "description": "Azure OpenAI APIs for completions and search", - "version": "2023-05-15" + "version": "2023-12-01-preview" }, "servers": [ { @@ -47,7 +47,7 @@ "required": true, "schema": { "type": "string", - "example": "2023-05-15", + "example": "2023-12-01-preview", "description": "api version" } } @@ -130,12 +130,6 @@ "default": null, "nullable": true }, - "model": { - "type": "string", - "example": "davinci", - "nullable": true, - "description": "ID of the model to use. You can use the Models_List operation to see all of your available models, or see our Models_Get overview for descriptions of them." - }, "suffix": { "type": "string", "nullable": true, @@ -160,9 +154,7 @@ "type": "array", "items": { "type": "string", - "example": [ - "\n" - ], + "example": "\n", "nullable": false }, "description": "Array minimum size of 1 and maximum of 4" @@ -173,11 +165,6 @@ "type": "string", "nullable": true }, - "cache_level": { - "description": "can be used to disable any server-side caching, 0=no cache, 1=prompt prefix enabled, 2=full cache", - "type": "integer", - "nullable": true - }, "presence_penalty": { "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", "type": "number", @@ -189,7 +176,7 @@ "default": 0 }, "best_of": { - "description": "Generates best_of completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\nWhen used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.\nNote: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop. Has maximum value of 128.", + "description": "Generates best_of completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\nWhen used with n, best_of controls the number of candidate completions and n specifies how many to return - best_of must be greater than n.\nNote: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop. Has maximum value of 128.", "type": "integer" } } @@ -221,6 +208,9 @@ "model": { "type": "string" }, + "prompt_filter_results": { + "$ref": "#/components/schemas/promptFilterResults" + }, "choices": { "type": "array", "items": { @@ -262,10 +252,14 @@ "type": "integer" } } - } + }, + "nullable": true }, "finish_reason": { "type": "string" + }, + "content_filter_results": { + "$ref": "#/components/schemas/contentFilterChoiceResults" } } } @@ -344,6 +338,11 @@ } } } + }, + "x-ms-examples": { + "Create a completion.": { + "$ref": "./examples/completions.json" + } } } }, @@ -368,7 +367,7 @@ "required": true, "schema": { "type": "string", - "example": "2023-05-15", + "example": "2023-12-01-preview", "description": "api version" } } @@ -412,11 +411,6 @@ "description": "input type of embedding search to use", "type": "string", "example": "query" - }, - "model": { - "type": "string", - "description": "ID of the model to use. You can use the Models_List operation to see all of your available models, or see our Models_Get overview for descriptions of them.", - "nullable": false } }, "required": [ @@ -491,6 +485,11 @@ } } } + }, + "x-ms-examples": { + "Create a embeddings.": { + "$ref": "./examples/embeddings.json" + } } } }, @@ -514,7 +513,7 @@ "required": true, "schema": { "type": "string", - "example": "2023-05-15", + "example": "2023-12-01-preview", "description": "api version" } } @@ -524,135 +523,7 @@ "content": { "application/json": { "schema": { - "type": "object", - "properties": { - "messages": { - "description": "The messages to generate chat completions for, in the chat format.", - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "role": { - "type": "string", - "enum": [ - "system", - "user", - "assistant" - ], - "description": "The role of the author of this message." - }, - "content": { - "type": "string", - "description": "The contents of the message" - }, - "name": { - "type": "string", - "description": "The name of the user in a multi-user chat" - } - }, - "required": [ - "role", - "content" - ] - } - }, - "temperature": { - "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.", - "type": "number", - "minimum": 0, - "maximum": 2, - "default": 1, - "example": 1, - "nullable": true - }, - "top_p": { - "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\nWe generally recommend altering this or `temperature` but not both.", - "type": "number", - "minimum": 0, - "maximum": 1, - "default": 1, - "example": 1, - "nullable": true - }, - "n": { - "description": "How many chat completion choices to generate for each input message.", - "type": "integer", - "minimum": 1, - "maximum": 128, - "default": 1, - "example": 1, - "nullable": true - }, - "stream": { - "description": "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]` message.", - "type": "boolean", - "nullable": true, - "default": false - }, - "stop": { - "description": "Up to 4 sequences where the API will stop generating further tokens.", - "oneOf": [ - { - "type": "string", - "nullable": true - }, - { - "type": "array", - "items": { - "type": "string", - "nullable": false - }, - "minItems": 1, - "maxItems": 4, - "description": "Array minimum size of 1 and maximum of 4" - } - ], - "default": null - }, - "max_tokens": { - "description": "The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).", - "type": "integer", - "default": 4096 - }, - "presence_penalty": { - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", - "type": "number", - "default": 0, - "minimum": -2, - "maximum": 2 - }, - "frequency_penalty": { - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", - "type": "number", - "default": 0, - "minimum": -2, - "maximum": 2 - }, - "logit_bias": { - "description": "Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.", - "type": "object", - "nullable": true - }, - "user": { - "description": "A unique identifier representing your end-user, which can help Azure OpenAI to monitor and detect abuse.", - "type": "string", - "example": "user-1234", - "nullable": false - } - }, - "required": [ - "messages" - ] - }, - "example": { - "model": "gpt-35-turbo", - "messages": [ - { - "role": "user", - "content": "Hello!" - } - ] + "$ref": "#/components/schemas/createChatCompletionRequest" } } } @@ -663,135 +534,2157 @@ "content": { "application/json": { "schema": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "object": { - "type": "string" - }, - "created": { - "type": "integer", - "format": "unixtime" - }, - "model": { - "type": "string" - }, - "choices": { - "type": "array", - "items": { - "type": "object", - "properties": { - "index": { - "type": "integer" - }, - "message": { - "type": "object", - "properties": { - "role": { - "type": "string", - "enum": [ - "system", - "user", - "assistant" - ], - "description": "The role of the author of this message." - }, - "content": { - "type": "string", - "description": "The contents of the message" - } - }, - "required": [ - "role", - "content" - ] - }, - "finish_reason": { - "type": "string" - } - } - } - }, - "usage": { - "type": "object", - "properties": { - "prompt_tokens": { - "type": "integer" - }, - "completion_tokens": { - "type": "integer" - }, - "total_tokens": { - "type": "integer" - } - }, - "required": [ - "prompt_tokens", - "completion_tokens", - "total_tokens" - ] - } - }, - "required": [ - "id", - "object", - "created", - "model", - "choices" - ] - }, - "example": { - "id": "chatcmpl-123", - "object": "chat.completion", - "created": 1677652288, - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "\n\nHello there, how may I assist you today?" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 9, - "completion_tokens": 12, - "total_tokens": 21 - } + "$ref": "#/components/schemas/createChatCompletionResponse" + } + } + }, + "headers": { + "apim-request-id": { + "description": "Request ID for troubleshooting purposes", + "schema": { + "type": "string" + } + } + } + }, + "default": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/errorResponse" + } + } + }, + "headers": { + "apim-request-id": { + "description": "Request ID for troubleshooting purposes", + "schema": { + "type": "string" } } } } + }, + "x-ms-examples": { + "Create a chat completion.": { + "$ref": "./examples/chat_completions.json" + } } } - } - }, - "components": { - "schemas": { - "errorResponse": { - "type": "object", - "properties": { - "error": { - "type": "object", - "properties": { - "code": { - "type": "string" - }, - "message": { - "type": "string" - }, - "param": { - "type": "string" - }, - "type": { - "type": "string" + }, + "/deployments/{deployment-id}/extensions/chat/completions": { + "post": { + "summary": "Using extensions to creates a completion for the chat messages.", + "operationId": "ExtensionsChatCompletions_Create", + "parameters": [ + { + "in": "path", + "name": "deployment-id", + "required": true, + "schema": { + "type": "string", + "description": "Deployment id of the model which was deployed." + } + }, + { + "in": "query", + "name": "api-version", + "required": true, + "schema": { + "type": "string", + "example": "2023-12-01-preview", + "description": "api version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/extensionsChatCompletionsRequest" } } } - } + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/extensionsChatCompletionsResponse" + } + } + }, + "headers": { + "apim-request-id": { + "description": "Request ID for troubleshooting purposes", + "schema": { + "type": "string" + } + } + } + }, + "default": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/errorResponse" + } + } + }, + "headers": { + "apim-request-id": { + "description": "Request ID for troubleshooting purposes", + "schema": { + "type": "string" + } + } + } + } + }, + "x-ms-examples": { + "Create a chat completion with Azure OpenAI extensions.": { + "$ref": "./examples/extensions_chat_completions.json" + }, + "Create a chat completion with Azure OpenAI extensions using AML index.": { + "$ref": "./examples/extensions_chat_completions_aml_index.json" + }, + "Create a chat completion with Azure OpenAI extensions using Azure Search (simple).": { + "$ref": "./examples/extensions_chat_completions_azure_search_minimum.json" + }, + "Create a chat completion with Azure OpenAI extensions using Azure Search (advanced).": { + "$ref": "./examples/extensions_chat_completions_azure_search_advanced.json" + }, + "Create a chat completion with Azure OpenAI extensions using Azure Search (image vector).": { + "$ref": "./examples/extensions_chat_completions_azure_search_image_vector.json" + }, + "Create a chat completion with Azure OpenAI extensions using CosmosDB.": { + "$ref": "./examples/extensions_chat_completions_cosmos_db.json" + }, + "Create a chat completion with Azure OpenAI extensions using ElasticSearch.": { + "$ref": "./examples/extensions_chat_completions_elasticsearch.json" + }, + "Create a chat completion with Azure OpenAI extensions using Pinecone.": { + "$ref": "./examples/extensions_chat_completions_pinecone.json" + } + } + } + }, + "/deployments/{deployment-id}/audio/transcriptions": { + "post": { + "summary": "Transcribes audio into the input language.", + "operationId": "Transcriptions_Create", + "parameters": [ + { + "in": "path", + "name": "deployment-id", + "required": true, + "schema": { + "type": "string", + "example": "whisper", + "description": "Deployment id of the whisper model." + } + }, + { + "in": "query", + "name": "api-version", + "required": true, + "schema": { + "type": "string", + "example": "2023-12-01-preview", + "description": "api version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/createTranscriptionRequest" + } + } + } + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/audioResponse" + }, + { + "$ref": "#/components/schemas/audioVerboseResponse" + } + ] + } + }, + "text/plain": { + "schema": { + "type": "string", + "description": "Transcribed text in the output format (when response_format was one of text, vtt or srt)." + } + } + } + } + }, + "x-ms-examples": { + "Create an audio transcription with json response format.": { + "$ref": "./examples/audio_transcription_object.json" + }, + "Create an audio transcription with text response format.": { + "$ref": "./examples/audio_transcription_text.json" + } + } + } + }, + "/deployments/{deployment-id}/audio/translations": { + "post": { + "summary": "Transcribes and translates input audio into English text.", + "operationId": "Translations_Create", + "parameters": [ + { + "in": "path", + "name": "deployment-id", + "required": true, + "schema": { + "type": "string", + "example": "whisper", + "description": "Deployment id of the whisper model which was deployed." + } + }, + { + "in": "query", + "name": "api-version", + "required": true, + "schema": { + "type": "string", + "example": "2023-12-01-preview", + "description": "api version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/createTranslationRequest" + } + } + } + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/audioResponse" + }, + { + "$ref": "#/components/schemas/audioVerboseResponse" + } + ] + } + }, + "text/plain": { + "schema": { + "type": "string", + "description": "Transcribed text in the output format (when response_format was one of text, vtt or srt)." + } + } + } + } + }, + "x-ms-examples": { + "Create an audio translation with json response format.": { + "$ref": "./examples/audio_translation_object.json" + }, + "Create an audio translation with text response format.": { + "$ref": "./examples/audio_translation_text.json" + } + } + } + }, + "/deployments/{deployment-id}/images/generations": { + "post": { + "summary": "Generates a batch of images from a text caption on a given DALLE model deployment", + "operationId": "ImageGenerations_Create", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/imageGenerationsRequest" + } + } + } + }, + "parameters": [ + { + "in": "path", + "name": "deployment-id", + "required": true, + "schema": { + "type": "string", + "example": "dalle-deployment", + "description": "Deployment id of the dalle model which was deployed." + } + }, + { + "in": "query", + "name": "api-version", + "required": true, + "schema": { + "type": "string", + "example": "2023-12-01-preview", + "description": "api version" + } + } + ], + "responses": { + "200": { + "description": "Ok", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/generateImagesResponse" + } + } + } + }, + "default": { + "description": "An error occurred.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/errorResponse" + } + } + } + } + }, + "x-ms-examples": { + "Create an image.": { + "$ref": "./examples/image_generation.json" + } + } + } + } + }, + "components": { + "schemas": { + "errorResponse": { + "type": "object", + "properties": { + "error": { + "$ref": "#/components/schemas/error" + } + } + }, + "errorBase": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "error": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/errorBase" + } + ], + "properties": { + "code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "param": { + "type": "string" + }, + "type": { + "type": "string" + }, + "inner_error": { + "$ref": "#/components/schemas/innerError" + } + } + }, + "innerError": { + "description": "Inner error with additional details.", + "type": "object", + "properties": { + "code": { + "$ref": "#/components/schemas/innerErrorCode" + }, + "content_filter_results": { + "$ref": "#/components/schemas/contentFilterPromptResults" + } + } + }, + "innerErrorCode": { + "description": "Error codes for the inner error object.", + "enum": [ + "ResponsibleAIPolicyViolation" + ], + "type": "string", + "x-ms-enum": { + "name": "InnerErrorCode", + "modelAsString": true, + "values": [ + { + "value": "ResponsibleAIPolicyViolation", + "description": "The prompt violated one of more content filter rules." + } + ] + } + }, + "contentFilterResultBase": { + "type": "object", + "properties": { + "filtered": { + "type": "boolean" + } + }, + "required": [ + "filtered" + ] + }, + "contentFilterSeverityResult": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterResultBase" + }, + { + "properties": { + "severity": { + "type": "string", + "enum": [ + "safe", + "low", + "medium", + "high" + ], + "x-ms-enum": { + "name": "ContentFilterSeverity", + "modelAsString": true, + "values": [ + { + "value": "safe", + "description": "General content or related content in generic or non-harmful contexts." + }, + { + "value": "low", + "description": "Harmful content at a low intensity and risk level." + }, + { + "value": "medium", + "description": "Harmful content at a medium intensity and risk level." + }, + { + "value": "high", + "description": "Harmful content at a high intensity and risk level." + } + ] + } + } + } + } + ], + "required": [ + "severity", + "filtered" + ] + }, + "contentFilterDetectedResult": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterResultBase" + }, + { + "properties": { + "detected": { + "type": "boolean" + } + } + } + ], + "required": [ + "detected", + "filtered" + ] + }, + "contentFilterDetectedWithCitationResult": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterDetectedResult" + }, + { + "properties": { + "citation": { + "type": "object", + "properties": { + "URL": { + "type": "string" + }, + "license": { + "type": "string" + } + } + } + } + } + ], + "required": [ + "detected", + "filtered" + ] + }, + "contentFilterIdResult": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterResultBase" + }, + { + "properties": { + "id": { + "type": "string" + } + } + } + ], + "required": [ + "id", + "filtered" + ] + }, + "contentFilterResultsBase": { + "type": "object", + "description": "Information about the content filtering results.", + "properties": { + "sexual": { + "$ref": "#/components/schemas/contentFilterSeverityResult" + }, + "violence": { + "$ref": "#/components/schemas/contentFilterSeverityResult" + }, + "hate": { + "$ref": "#/components/schemas/contentFilterSeverityResult" + }, + "self_harm": { + "$ref": "#/components/schemas/contentFilterSeverityResult" + }, + "profanity": { + "$ref": "#/components/schemas/contentFilterDetectedResult" + }, + "custom_blocklists": { + "items": { + "$ref": "#/components/schemas/contentFilterIdResult" + }, + "type": "array" + }, + "error": { + "$ref": "#/components/schemas/errorBase" + } + } + }, + "contentFilterPromptResults": { + "type": "object", + "description": "Information about the content filtering category (hate, sexual, violence, self_harm), if it has been detected, as well as the severity level (very_low, low, medium, high-scale that determines the intensity and risk level of harmful content) and if it has been filtered or not. Information about jailbreak content and profanity, if it has been detected, and if it has been filtered or not. And information about customer block list, if it has been filtered and its id.", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterResultsBase" + }, + { + "properties": { + "jailbreak": { + "$ref": "#/components/schemas/contentFilterDetectedResult" + } + } + } + ] + }, + "contentFilterChoiceResults": { + "type": "object", + "description": "Information about the content filtering category (hate, sexual, violence, self_harm), if it has been detected, as well as the severity level (very_low, low, medium, high-scale that determines the intensity and risk level of harmful content) and if it has been filtered or not. Information about third party text and profanity, if it has been detected, and if it has been filtered or not. And information about customer block list, if it has been filtered and its id.", + "allOf": [ + { + "$ref": "#/components/schemas/contentFilterResultsBase" + }, + { + "properties": { + "protected_material_text": { + "$ref": "#/components/schemas/contentFilterDetectedResult" + } + } + }, + { + "properties": { + "protected_material_code": { + "$ref": "#/components/schemas/contentFilterDetectedWithCitationResult" + } + } + } + ] + }, + "promptFilterResult": { + "type": "object", + "description": "Content filtering results for a single prompt in the request.", + "properties": { + "prompt_index": { + "type": "integer" + }, + "content_filter_results": { + "$ref": "#/components/schemas/contentFilterPromptResults" + } + } + }, + "promptFilterResults": { + "type": "array", + "description": "Content filtering results for zero or more prompts in the request. In a streaming request, results for different prompts may arrive at different times or in different orders.", + "items": { + "$ref": "#/components/schemas/promptFilterResult" + } + }, + "chatCompletionsRequestCommon": { + "type": "object", + "properties": { + "temperature": { + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.", + "type": "number", + "minimum": 0, + "maximum": 2, + "default": 1, + "example": 1, + "nullable": true + }, + "top_p": { + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\nWe generally recommend altering this or `temperature` but not both.", + "type": "number", + "minimum": 0, + "maximum": 1, + "default": 1, + "example": 1, + "nullable": true + }, + "stream": { + "description": "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]` message.", + "type": "boolean", + "nullable": true, + "default": false + }, + "stop": { + "description": "Up to 4 sequences where the API will stop generating further tokens.", + "oneOf": [ + { + "type": "string", + "nullable": true + }, + { + "type": "array", + "items": { + "type": "string", + "nullable": false + }, + "minItems": 1, + "maxItems": 4, + "description": "Array minimum size of 1 and maximum of 4" + } + ], + "default": null + }, + "max_tokens": { + "description": "The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).", + "type": "integer", + "default": 4096 + }, + "presence_penalty": { + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", + "type": "number", + "default": 0, + "minimum": -2, + "maximum": 2 + }, + "frequency_penalty": { + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", + "type": "number", + "default": 0, + "minimum": -2, + "maximum": 2 + }, + "logit_bias": { + "description": "Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.", + "type": "object", + "nullable": true + }, + "user": { + "description": "A unique identifier representing your end-user, which can help Azure OpenAI to monitor and detect abuse.", + "type": "string", + "example": "user-1234", + "nullable": false + } + } + }, + "createChatCompletionRequest": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionsRequestCommon" + }, + { + "properties": { + "messages": { + "description": "A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).", + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + } + }, + "n": { + "type": "integer", + "minimum": 1, + "maximum": 128, + "default": 1, + "example": 1, + "nullable": true, + "description": "How many chat completion choices to generate for each input message." + }, + "seed": { + "type": "integer", + "minimum": -9223372036854775808, + "maximum": 9223372036854775807, + "default": 0, + "example": 1, + "nullable": true, + "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend." + }, + "response_format": { + "type": "object", + "description": "An object specifying the format that the model must output. Used to enable JSON mode.", + "properties": { + "type": { + "$ref": "#/components/schemas/chatCompletionResponseFormat" + } + } + }, + "tools": { + "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.", + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/components/schemas/chatCompletionTool" + } + }, + "tool_choice": { + "$ref": "#/components/schemas/chatCompletionToolChoiceOption" + }, + "functions": { + "description": "Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for.", + "type": "array", + "minItems": 1, + "maxItems": 128, + "items": { + "$ref": "#/components/schemas/chatCompletionFunction" + } + }, + "function_call": { + "description": "Deprecated in favor of `tool_choice`. Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.", + "oneOf": [ + { + "type": "string", + "enum": [ + "none", + "auto" + ], + "description": "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function." + }, + { + "type": "object", + "description": "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.", + "properties": { + "name": { + "type": "string", + "description": "The name of the function to call." + } + }, + "required": [ + "name" + ] + } + ] + } + } + } + ], + "required": [ + "messages" + ] + }, + "chatCompletionResponseFormat": { + "type": "string", + "enum": [ + "text", + "json_object" + ], + "default": "text", + "example": "json_object", + "nullable": true, + "description": "Setting to `json_object` enables JSON mode. This guarantees that the message the model generates is valid JSON.", + "x-ms-enum": { + "name": "ChatCompletionResponseFormat", + "modelAsString": true, + "values": [ + { + "value": "text", + "description": "Response format is a plain text string." + }, + { + "value": "json_object", + "description": "Response format is a JSON object." + } + ] + } + }, + "chatCompletionFunction": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." + }, + "description": { + "type": "string", + "description": "The description of what the function does." + }, + "parameters": { + "$ref": "#/components/schemas/chatCompletionFunctionParameters" + } + }, + "required": [ + "name" + ] + }, + "chatCompletionFunctionParameters": { + "type": "object", + "description": "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.", + "additionalProperties": true + }, + "chatCompletionRequestMessage": { + "type": "object", + "properties": { + "role": { + "$ref": "#/components/schemas/chatCompletionRequestMessageRole" + } + }, + "discriminator": { + "propertyName": "role", + "mapping": { + "system": "#/components/schemas/chatCompletionRequestMessageSystem", + "user": "#/components/schemas/chatCompletionRequestMessageUser", + "assistant": "#/components/schemas/chatCompletionRequestMessageAssistant", + "tool": "#/components/schemas/chatCompletionRequestMessageTool", + "function": "#/components/schemas/chatCompletionRequestMessageFunction" + } + }, + "required": [ + "role" + ] + }, + "chatCompletionRequestMessageRole": { + "type": "string", + "enum": [ + "system", + "user", + "assistant", + "tool", + "function" + ], + "description": "The role of the messages author.", + "x-ms-enum": { + "name": "ChatCompletionRequestMessageRole", + "modelAsString": true, + "values": [ + { + "value": "system", + "description": "The message author role is system." + }, + { + "value": "user", + "description": "The message author role is user." + }, + { + "value": "assistant", + "description": "The message author role is assistant." + }, + { + "value": "tool", + "description": "The message author role is tool." + }, + { + "value": "function", + "description": "Deprecated. The message author role is function." + } + ] + } + }, + "chatCompletionRequestMessageSystem": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + }, + { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The contents of the message.", + "nullable": true + } + } + } + ], + "required": [ + "content" + ] + }, + "chatCompletionRequestMessageUser": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + }, + { + "type": "object", + "properties": { + "content": { + "oneOf": [ + { + "type": "string", + "description": "The contents of the message." + }, + { + "type": "array", + "description": "An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model.", + "minimum": 1, + "items": { + "$ref": "#/components/schemas/chatCompletionRequestMessageContentPart" + } + } + ], + "nullable": true + } + } + } + ], + "required": [ + "content" + ] + }, + "chatCompletionRequestMessageContentPart": { + "type": "object", + "properties": { + "type": { + "$ref": "#/components/schemas/chatCompletionRequestMessageContentPartType" + } + }, + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/chatCompletionRequestMessageContentPartText", + "image_url": "#/components/schemas/chatCompletionRequestMessageContentPartImage" + } + }, + "required": [ + "type" + ] + }, + "chatCompletionRequestMessageContentPartType": { + "type": "string", + "enum": [ + "text", + "image_url" + ], + "description": "The type of the content part.", + "x-ms-enum": { + "name": "ChatCompletionRequestMessageContentPartType", + "modelAsString": true, + "values": [ + { + "value": "text", + "description": "The content part type is text." + }, + { + "value": "image_url", + "description": "The content part type is image_url." + } + ] + } + }, + "chatCompletionRequestMessageContentPartText": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessageContentPart" + }, + { + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "The text content." + } + } + } + ], + "required": [ + "text" + ] + }, + "chatCompletionRequestMessageContentPartImage": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessageContentPart" + }, + { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Either a URL of the image or the base64 encoded image data.", + "format": "uri" + }, + "detail": { + "$ref": "#/components/schemas/imageDetailLevel" + } + } + } + ], + "required": [ + "url" + ] + }, + "imageDetailLevel": { + "type": "string", + "description": "Specifies the detail level of the image.", + "enum": [ + "auto", + "low", + "high" + ], + "default": "auto", + "x-ms-enum": { + "name": "ImageDetailLevel", + "modelAsString": true, + "values": [ + { + "value": "auto", + "description": "The image detail level is auto." + }, + { + "value": "low", + "description": "The image detail level is low." + }, + { + "value": "high", + "description": "The image detail level is high." + } + ] + } + }, + "chatCompletionRequestMessageAssistant": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + }, + { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The contents of the message.", + "nullable": true + }, + "tool_calls": { + "type": "array", + "description": "The tool calls generated by the model, such as function calls.", + "items": { + "$ref": "#/components/schemas/chatCompletionMessageToolCall" + } + } + } + } + ], + "required": [ + "content" + ] + }, + "chatCompletionMessageToolCall": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the tool call." + }, + "type": { + "$ref": "#/components/schemas/toolCallType" + }, + "function": { + "type": "object", + "description": "The function that the model called.", + "properties": { + "name": { + "type": "string", + "description": "The name of the function to call." + }, + "arguments": { + "type": "string", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." + } + }, + "required": [ + "name", + "arguments" + ] + } + }, + "required": [ + "id", + "type", + "function" + ] + }, + "toolCallType": { + "type": "string", + "enum": [ + "function" + ], + "description": "The type of the tool call, in this case `function`.", + "x-ms-enum": { + "name": "ToolCallType", + "modelAsString": true, + "values": [ + { + "value": "function", + "description": "The tool call type is function." + } + ] + } + }, + "chatCompletionRequestMessageTool": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + }, + { + "type": "object", + "nullable": true, + "properties": { + "tool_call_id": { + "type": "string", + "description": "Tool call that this message is responding to." + }, + "content": { + "type": "string", + "description": "The contents of the message.", + "nullable": true + } + } + } + ], + "required": [ + "tool_call_id", + "content" + ] + }, + "chatCompletionRequestMessageFunction": { + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionRequestMessage" + }, + { + "type": "object", + "description": "Deprecated. Message that represents a function.", + "nullable": true, + "properties": { + "role": { + "type": "string", + "enum": [ + "function" + ], + "description": "The role of the messages author, in this case `function`." + }, + "name": { + "type": "string", + "description": "The contents of the message." + }, + "content": { + "type": "string", + "description": "The contents of the message.", + "nullable": true + } + } + } + ], + "required": [ + "function_call_id", + "content" + ] + }, + "createChatCompletionResponse": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionsResponseCommon" + }, + { + "properties": { + "prompt_filter_results": { + "$ref": "#/components/schemas/promptFilterResults" + }, + "choices": { + "type": "array", + "items": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionChoiceCommon" + }, + { + "properties": { + "message": { + "$ref": "#/components/schemas/chatCompletionResponseMessage" + }, + "content_filter_results": { + "$ref": "#/components/schemas/contentFilterChoiceResults" + } + } + } + ] + } + } + } + } + ], + "required": [ + "id", + "object", + "created", + "model", + "choices" + ] + }, + "chatCompletionResponseMessage": { + "type": "object", + "description": "A chat completion message generated by the model.", + "properties": { + "role": { + "$ref": "#/components/schemas/chatCompletionResponseMessageRole" + }, + "content": { + "type": "string", + "description": "The contents of the message.", + "nullable": true + }, + "tool_calls": { + "type": "array", + "description": "The tool calls generated by the model, such as function calls.", + "items": { + "$ref": "#/components/schemas/chatCompletionMessageToolCall" + } + }, + "function_call": { + "$ref": "#/components/schemas/chatCompletionFunctionCall" + } + } + }, + "chatCompletionResponseMessageRole": { + "type": "string", + "enum": [ + "assistant" + ], + "description": "The role of the author of the response message." + }, + "chatCompletionToolChoiceOption": { + "description": "Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that function.", + "oneOf": [ + { + "type": "string", + "description": "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.", + "enum": [ + "none", + "auto" + ] + }, + { + "$ref": "#/components/schemas/chatCompletionNamedToolChoice" + } + ] + }, + "chatCompletionNamedToolChoice": { + "type": "object", + "description": "Specifies a tool the model should use. Use to force the model to call a specific function.", + "properties": { + "type": { + "type": "string", + "enum": [ + "function" + ], + "description": "The type of the tool. Currently, only `function` is supported." + }, + "function": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the function to call." + } + }, + "required": [ + "name" + ] + } + } + }, + "chatCompletionFunctionCall": { + "type": "object", + "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", + "properties": { + "name": { + "type": "string", + "description": "The name of the function to call." + }, + "arguments": { + "type": "string", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." + } + }, + "required": [ + "name", + "arguments" + ] + }, + "extensionsChatCompletionsRequest": { + "type": "object", + "description": "Request for the chat completions using extensions", + "required": [ + "messages" + ], + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionsRequestCommon" + }, + { + "properties": { + "messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/message" + } + }, + "dataSources": { + "type": "array", + "description": "The data sources to be used for the Azure OpenAI on your data feature.", + "items": { + "$ref": "#/components/schemas/dataSource" + } + }, + "enhancements": { + "type": "object", + "description": "The type of enhancements needed.", + "properties": { + "grounding": { + "type": "object", + "description": "Request object to specify if grounding enhancement is needed.", + "properties": { + "enabled": { + "type": "boolean", + "default": false + } + } + }, + "ocr": { + "type": "object", + "description": "Request object to specify if ocr enhancement is needed.", + "properties": { + "enabled": { + "type": "boolean", + "default": false + } + } + } + } + } + } + } + ], + "example": { + "dataSources": [ + { + "type": "AzureCognitiveSearch", + "parameters": { + "endpoint": "https://mysearchexample.search.windows.net", + "key": "***(admin key)", + "indexName": "my-chunk-index", + "fieldsMapping": { + "titleField": "productName", + "urlField": "productUrl", + "filepathField": "productFilePath", + "contentFields": [ + "productDescription" + ], + "contentFieldsSeparator": "\n" + }, + "topNDocuments": 5, + "queryType": "semantic", + "semanticConfiguration": "defaultConfiguration", + "inScope": true, + "roleInformation": "roleInformation" + } + } + ], + "messages": [ + { + "role": "user", + "content": "Where can I find a hiking place in Seattle?" + } + ], + "temperature": 0.9 + } + }, + "dataSource": { + "type": "object", + "description": "The data source to be used for the Azure OpenAI on your data feature.", + "properties": { + "type": { + "type": "string", + "description": "The data source type." + }, + "parameters": { + "type": "object", + "description": "The parameters to be used for the data source in runtime.", + "additionalProperties": true + } + }, + "required": [ + "type" + ] + }, + "message": { + "type": "object", + "description": "A chat message.", + "properties": { + "index": { + "type": "integer", + "description": "The index of the message in the conversation." + }, + "role": { + "type": "string", + "enum": [ + "system", + "user", + "assistant", + "tool" + ], + "description": "The role of the author of this message." + }, + "recipient": { + "type": "string", + "example": "Contoso.productsUsingGET", + "description": "The recipient of the message in the format of .. Present if and only if the recipient is tool." + }, + "content": { + "type": "string", + "description": "The contents of the message" + }, + "end_turn": { + "type": "boolean", + "description": "Whether the message ends the turn." + }, + "context": { + "type": "object", + "description": "The conversation context", + "nullable": true, + "properties": { + "messages": { + "type": "array", + "description": "Messages exchanged between model and extensions prior to final message from model", + "minItems": 1, + "items": { + "$ref": "#/components/schemas/message" + }, + "nullable": true + } + } + } + }, + "required": [ + "role", + "content" + ] + }, + "chatCompletionsResponseCommon": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "A unique identifier for the chat completion." + }, + "object": { + "$ref": "#/components/schemas/chatCompletionResponseObject" + }, + "created": { + "type": "integer", + "format": "unixtime", + "description": "The Unix timestamp (in seconds) of when the chat completion was created." + }, + "model": { + "type": "string", + "description": "The model used for the chat completion." + }, + "usage": { + "$ref": "#/components/schemas/completionUsage" + }, + "system_fingerprint": { + "type": "string", + "description": "Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism." + } + }, + "required": [ + "id", + "object", + "created", + "model" + ] + }, + "chatCompletionResponseObject": { + "type": "string", + "description": "The object type.", + "enum": [ + "chat.completion" + ], + "x-ms-enum": { + "name": "ChatCompletionResponseObject", + "modelAsString": true, + "values": [ + { + "value": "chat.completion", + "description": "The object type is chat completion." + } + ] + } + }, + "completionUsage": { + "type": "object", + "description": "Usage statistics for the completion request.", + "properties": { + "prompt_tokens": { + "type": "integer", + "description": "Number of tokens in the prompt." + }, + "completion_tokens": { + "type": "integer", + "description": "Number of tokens in the generated completion." + }, + "total_tokens": { + "type": "integer", + "description": "Total number of tokens used in the request (prompt + completion)." + } + }, + "required": [ + "prompt_tokens", + "completion_tokens", + "total_tokens" + ] + }, + "chatCompletionTool": { + "type": "object", + "properties": { + "type": { + "$ref": "#/components/schemas/chatCompletionToolType" + }, + "function": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "A description of what the function does, used by the model to choose when and how to call the function." + }, + "name": { + "type": "string", + "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." + }, + "parameters": { + "$ref": "#/components/schemas/chatCompletionFunctionParameters" + } + }, + "required": [ + "name", + "parameters" + ] + } + }, + "required": [ + "type", + "function" + ] + }, + "chatCompletionToolType": { + "type": "string", + "enum": [ + "function" + ], + "description": "The type of the tool. Currently, only `function` is supported.", + "x-ms-enum": { + "name": "ChatCompletionToolType", + "modelAsString": true, + "values": [ + { + "value": "function", + "description": "The tool type is function." + } + ] + } + }, + "chatCompletionChoiceCommon": { + "type": "object", + "properties": { + "index": { + "type": "integer" + }, + "finish_reason": { + "type": "string" + } + } + }, + "extensionsChatCompletionChoice": { + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionChoiceCommon" + }, + { + "properties": { + "message": { + "$ref": "#/components/schemas/message" + }, + "enhancements": { + "description": "The enhancement results returned by the service.", + "$ref": "#/components/schemas/enhancement" + } + } + } + ] + }, + "extensionsChatCompletionsResponse": { + "type": "object", + "description": "The response of the extensions chat completions.", + "allOf": [ + { + "$ref": "#/components/schemas/chatCompletionsResponseCommon" + }, + { + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/extensionsChatCompletionChoice" + } + } + } + } + ], + "example": { + "id": "1", + "object": "extensions.chat.completion", + "created": 1679201802, + "model": "gpt-3.5-turbo-0301", + "choices": [ + { + "index": 0, + "finish_reason": "stop", + "message": { + "role": "assistant", + "content": "Seattle is a great place for hiking! Here are some of the best hiking places in Seattle according to Contoso Traveler [doc1] and West Coast Traveler, Snow Lake, Mount Si, and Mount Tenerife [doc2]. I hope this helps! Let me know if you need more information.", + "end_turn": true, + "context": { + "messages": [ + { + "role": "tool", + "content": "{\"citations\":[{\"filepath\":\"ContosoTraveler.pdf\",\"content\":\"This is the content of the citation 1\"},{\"filepath\":\"WestCoastTraveler.html\",\"content\":\"This is the content of the citation 2\"},{\"content\":\"This is the content of the citation 3 without filepath\"}],\"intent\":\"hiking place in seattle\"}", + "end_turn": false + } + ] + } + } + } + ] + } + }, + "createTranslationRequest": { + "type": "object", + "description": "Translation request.", + "properties": { + "file": { + "type": "string", + "description": "The audio file to translate.", + "format": "binary" + }, + "prompt": { + "type": "string", + "description": "An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English." + }, + "response_format": { + "$ref": "#/components/schemas/audioResponseFormat" + }, + "temperature": { + "type": "number", + "default": 0, + "description": "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit." + } + }, + "required": [ + "file" + ] + }, + "audioResponse": { + "description": "Translation or transcription response when response_format was json", + "type": "object", + "properties": { + "text": { + "type": "string", + "description": "Translated or transcribed text." + } + }, + "required": [ + "text" + ] + }, + "audioVerboseResponse": { + "description": "Translation or transcription response when response_format was verbose_json", + "type": "object", + "allOf": [ + { + "$ref": "#/components/schemas/audioResponse" + }, + { + "properties": { + "task": { + "type": "string", + "description": "Type of audio task.", + "enum": [ + "transcribe", + "translate" + ], + "x-ms-enum": { + "modelAsString": true + } + }, + "language": { + "type": "string", + "description": "Language." + }, + "duration": { + "type": "number", + "description": "Duration." + }, + "segments": { + "type": "array", + "items": { + "$ref": "#/components/schemas/audioSegment" + } + } + } + } + ], + "required": [ + "text" + ] + }, + "audioResponseFormat": { + "title": "AudioResponseFormat", + "description": "Defines the format of the output.", + "enum": [ + "json", + "text", + "srt", + "verbose_json", + "vtt" + ], + "type": "string", + "x-ms-enum": { + "modelAsString": true + } + }, + "createTranscriptionRequest": { + "type": "object", + "description": "Transcription request.", + "properties": { + "file": { + "type": "string", + "description": "The audio file object to transcribe.", + "format": "binary" + }, + "prompt": { + "type": "string", + "description": "An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language." + }, + "response_format": { + "$ref": "#/components/schemas/audioResponseFormat" + }, + "temperature": { + "type": "number", + "default": 0, + "description": "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit." + }, + "language": { + "type": "string", + "description": "The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency." + } + }, + "required": [ + "file" + ] + }, + "audioSegment": { + "type": "object", + "description": "Transcription or translation segment.", + "properties": { + "id": { + "type": "integer", + "description": "Segment identifier." + }, + "seek": { + "type": "number", + "description": "Offset of the segment." + }, + "start": { + "type": "number", + "description": "Segment start offset." + }, + "end": { + "type": "number", + "description": "Segment end offset." + }, + "text": { + "type": "string", + "description": "Segment text." + }, + "tokens": { + "type": "array", + "items": { + "type": "number", + "nullable": false + }, + "description": "Tokens of the text." + }, + "temperature": { + "type": "number", + "description": "Temperature." + }, + "avg_logprob": { + "type": "number", + "description": "Average log probability." + }, + "compression_ratio": { + "type": "number", + "description": "Compression ratio." + }, + "no_speech_prob": { + "type": "number", + "description": "Probability of 'no speech'." + } + } + }, + "imageQuality": { + "description": "The quality of the image that will be generated.", + "type": "string", + "enum": [ + "standard", + "hd" + ], + "default": "standard", + "x-ms-enum": { + "name": "Quality", + "modelAsString": true, + "values": [ + { + "value": "standard", + "description": "Standard quality creates images with standard quality.", + "name": "Standard" + }, + { + "value": "hd", + "description": "HD quality creates images with finer details and greater consistency across the image.", + "name": "HD" + } + ] + } + }, + "imagesResponseFormat": { + "description": "The format in which the generated images are returned.", + "type": "string", + "enum": [ + "url", + "b64_json" + ], + "default": "url", + "x-ms-enum": { + "name": "ImagesResponseFormat", + "modelAsString": true, + "values": [ + { + "value": "url", + "description": "The URL that provides temporary access to download the generated images.", + "name": "Url" + }, + { + "value": "b64_json", + "description": "The generated images are returned as base64 encoded string.", + "name": "Base64Json" + } + ] + } + }, + "imageSize": { + "description": "The size of the generated images.", + "type": "string", + "enum": [ + "1792x1024", + "1024x1792", + "1024x1024" + ], + "default": "1024x1024", + "x-ms-enum": { + "name": "Size", + "modelAsString": true, + "values": [ + { + "value": "1792x1024", + "description": "The desired size of the generated image is 1792x1024 pixels.", + "name": "Size1792x1024" + }, + { + "value": "1024x1792", + "description": "The desired size of the generated image is 1024x1792 pixels.", + "name": "Size1024x1792" + }, + { + "value": "1024x1024", + "description": "The desired size of the generated image is 1024x1024 pixels.", + "name": "Size1024x1024" + } + ] + } + }, + "imageStyle": { + "description": "The style of the generated images.", + "type": "string", + "enum": [ + "vivid", + "natural" + ], + "default": "vivid", + "x-ms-enum": { + "name": "Style", + "modelAsString": true, + "values": [ + { + "value": "vivid", + "description": "Vivid creates images that are hyper-realistic and dramatic.", + "name": "Vivid" + }, + { + "value": "natural", + "description": "Natural creates images that are more natural and less hyper-realistic.", + "name": "Natural" + } + ] + } + }, + "imageGenerationsRequest": { + "type": "object", + "properties": { + "prompt": { + "description": "A text description of the desired image(s). The maximum length is 4000 characters.", + "type": "string", + "format": "string", + "example": "a corgi in a field", + "minLength": 1 + }, + "n": { + "description": "The number of images to generate.", + "type": "integer", + "minimum": 1, + "maximum": 1, + "default": 1 + }, + "size": { + "$ref": "#/components/schemas/imageSize" + }, + "response_format": { + "$ref": "#/components/schemas/imagesResponseFormat" + }, + "user": { + "description": "A unique identifier representing your end-user, which can help to monitor and detect abuse.", + "type": "string", + "format": "string", + "example": "user123456" + }, + "quality": { + "$ref": "#/components/schemas/imageQuality" + }, + "style": { + "$ref": "#/components/schemas/imageStyle" + } + }, + "required": [ + "prompt" + ] + }, + "generateImagesResponse": { + "type": "object", + "properties": { + "created": { + "type": "integer", + "format": "unixtime", + "description": "The unix timestamp when the operation was created.", + "example": "1676540381" + }, + "data": { + "type": "array", + "description": "The result data of the operation, if successful", + "items": { + "$ref": "#/components/schemas/imageResult" + } + }, + "error": { + "$ref": "#/components/schemas/error" + } + }, + "required": [ + "created" + ] + }, + "imageResult": { + "type": "object", + "description": "The image url or encoded image if successful, and an error otherwise.", + "properties": { + "url": { + "type": "string", + "description": "The image url.", + "example": "https://www.contoso.com" + }, + "b64_json": { + "type": "string", + "description": "The base64 encoded image" + }, + "revised_prompt": { + "type": "string", + "description": "The prompt that was used to generate the image, if there was any revision to the prompt." + } + } + }, + "enhancement": { + "type": "object", + "properties": { + "grounding": { + "type": "object", + "description": "The grounding enhancement that returns the bounding box of the objects detected in the image.", + "properties": { + "lines": { + "type": "array", + "items": { + "$ref": "#/components/schemas/line" + } + } + }, + "required": [ + "lines" + ] + } + } + }, + "line": { + "type": "object", + "description": "A content line object consisting of an adjacent sequence of content elements, such as words and selection marks.", + "properties": { + "text": { + "type": "string" + }, + "spans": { + "type": "array", + "description": "An array of spans that represent detected objects and its bounding box information.", + "items": { + "$ref": "#/components/schemas/span" + } + } + }, + "required": [ + "text", + "spans" + ] + }, + "span": { + "type": "object", + "description": "A span object that represents a detected object and its bounding box information.", + "properties": { + "text": { + "type": "string", + "description": "The text content of the span that represents the detected object." + }, + "offset": { + "type": "integer", + "description": "The character offset within the text where the span begins. This offset is defined as the position of the first character of the span, counting from the start of the text as Unicode codepoints." + }, + "length": { + "type": "integer", + "description": "The length of the span in characters, measured in Unicode codepoints." + }, + "polygon": { + "type": "array", + "description": "An array of objects representing points in the polygon that encloses the detected object.", + "items": { + "type": "object", + "properties": { + "x": { + "type": "number", + "description": "The x-coordinate of the point." + }, + "y": { + "type": "number", + "description": "The y-coordinate of the point." + } + } + } + } + }, + "required": [ + "text", + "offset", + "length", + "polygon" + ] } }, "securitySchemes": { diff --git a/src/wkok/openai_clojure/azure.clj b/src/wkok/openai_clojure/azure.clj index 3f659b8..fdccc1d 100644 --- a/src/wkok/openai_clojure/azure.clj +++ b/src/wkok/openai_clojure/azure.clj @@ -61,6 +61,6 @@ sse/perform-sse-capable-request]))))))) (defn patch-params [params] - {:api-version "2023-05-15" + {:api-version "2023-12-01-preview" :deployment-id (:model params) :martian.core/body (dissoc params :model)})