From c40a26e72b0d16bacbee2e396cb86567717cc8f4 Mon Sep 17 00:00:00 2001 From: "Gusakovsky, Sergey" Date: Sun, 5 Mar 2023 00:43:46 +0300 Subject: [PATCH] Release 1.1.6 --- OpenAIService.podspec | 2 +- README.md | 2 +- .../OpenAIGenerationImageBody.swift | 12 +-- .../Networking/MultipartFormDataRequest.swift | 82 +++++++++++++++++++ .../Networking/OpenAIApiClient.swift | 30 +++---- .../Networking/OpenAIEndpoint.swift | 2 +- .../Networking/OpenAIImageEditsBody.swift | 77 +++++++++++++++++ Sources/OpenAIService/OpenAIService.swift | 67 +++++++++++---- 8 files changed, 229 insertions(+), 45 deletions(-) create mode 100644 Sources/OpenAIService/Networking/MultipartFormDataRequest.swift create mode 100644 Sources/OpenAIService/Networking/OpenAIImageEditsBody.swift diff --git a/OpenAIService.podspec b/OpenAIService.podspec index 29ba1fb..c34a996 100755 --- a/OpenAIService.podspec +++ b/OpenAIService.podspec @@ -1,6 +1,6 @@ Pod::Spec.new do |spec| spec.name = 'OpenAIService' - spec.version = '1.1.5' + spec.version = '1.1.6' spec.homepage = 'https://github.com/sgusakovsky/OpenAIService' spec.license = { :type => 'MIT', diff --git a/README.md b/README.md index 453f58a..9b4b62c 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ You can use Swift Package Manager to integrate the library by adding the followi You can use CocoaPods to integrate the library by adding the following dependency. -`pod 'OpenAIService''` +`pod 'OpenAIService'` ## Example Usage diff --git a/Sources/OpenAIService/Models/Images/Generation/OpenAIGenerationImageBody.swift b/Sources/OpenAIService/Models/Images/Generation/OpenAIGenerationImageBody.swift index a66f540..1cfe436 100644 --- a/Sources/OpenAIService/Models/Images/Generation/OpenAIGenerationImageBody.swift +++ b/Sources/OpenAIService/Models/Images/Generation/OpenAIGenerationImageBody.swift @@ -15,21 +15,21 @@ public struct OpenAIGenerationImageBody: Encodable { public let prompt: String /// The number of images to generate. Must be between 1 and 10. - public let imageCount: Int? + public let imageCount: Int /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 - public let size: OpenAIGenerationImageSize? + public let size: OpenAIGenerationImageSize /// The format in which the generated images are returned. Must be one of url or b64_json. - public let responseFormat: OpenAIGenerationImageResponseFormat? + public let responseFormat: OpenAIGenerationImageResponseFormat public let user: String? public init( prompt: String, - imageCount: Int? = 1, - size: OpenAIGenerationImageSize? = .large, - responseFormat: OpenAIGenerationImageResponseFormat? = .url, + imageCount: Int = 1, + size: OpenAIGenerationImageSize = .large, + responseFormat: OpenAIGenerationImageResponseFormat = .url, user: String? = nil ) { self.prompt = prompt diff --git a/Sources/OpenAIService/Networking/MultipartFormDataRequest.swift b/Sources/OpenAIService/Networking/MultipartFormDataRequest.swift new file mode 100644 index 0000000..c4b5713 --- /dev/null +++ b/Sources/OpenAIService/Networking/MultipartFormDataRequest.swift @@ -0,0 +1,82 @@ +// +// MultipartFormDataRequest.swift +// OpenAIDemo +// +// Created by Gusakovsky, Sergey on 5.03.23. +// + +import Foundation + +struct MultipartFormDataRequest { + private let boundary: String = UUID().uuidString + private let body = NSMutableData() + let endpoint: OpenAIEndpoint + + init(endpoint: OpenAIEndpoint) { + self.endpoint = endpoint + } + + func asURLRequest() -> URLRequest? { + guard let baseUrl = URL(string: endpoint.baseURL()) else { + return nil + } + + guard var urlComponents = URLComponents(url: baseUrl, resolvingAgainstBaseURL: true) else { + return nil + } + + urlComponents.path = endpoint.path + + guard let url = urlComponents.url else { + return nil + } + + var request = URLRequest(url: url) + + request.httpMethod = endpoint.method.rawValue + request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type") + + self.body.append("--\(boundary)--") + request.httpBody = self.body as Data + + return request + } + + func addTextField(named name: String, value: String) { + self.body.append(textFormField(named: name, value: value)) + } + + func addDataField(named name: String, formData: FormData) { + self.body.append(dataFormField(named: name, formData: formData)) + } + + private func textFormField(named name: String, value: String) -> String { + var fieldString = "--\(boundary)\r\n" + fieldString += "Content-Disposition: form-data; name=\"\(name)\"\r\n" + fieldString += "\r\n" + fieldString += "\(value)\r\n" + + return fieldString + } + + private func dataFormField(named name: String, formData: FormData) -> Data { + let fieldData = NSMutableData() + + fieldData.append("--\(boundary)\r\n") + fieldData.append("Content-Disposition: form-data; name=\"\(name)\"; filename=\"\(formData.fileName)\"\r\n") + fieldData.append("Content-Type: \(formData.mimeType)\r\n") + fieldData.append("\r\n") + fieldData.append(formData.data) + fieldData.append("\r\n") + + return fieldData as Data + } +} + +extension NSMutableData { + func append(_ string: String) { + if let data = string.data(using: .utf8) { + self.append(data) + } + } +} diff --git a/Sources/OpenAIService/Networking/OpenAIApiClient.swift b/Sources/OpenAIService/Networking/OpenAIApiClient.swift index 747022a..d407978 100644 --- a/Sources/OpenAIService/Networking/OpenAIApiClient.swift +++ b/Sources/OpenAIService/Networking/OpenAIApiClient.swift @@ -80,35 +80,27 @@ class OpenAIApiClient { return request } - func prepareMultipartFormDataRequest( + func prepareMultipartFormDataRequest( _ endpoint: OpenAIEndpoint, - body: BodyType, + body: [String: Any], config: OpenAIConfiguration ) -> URLRequest? { - guard let baseUrl = URL(string: endpoint.baseURL()) else { - return nil - } - guard var urlComponents = URLComponents(url: baseUrl, resolvingAgainstBaseURL: true) else { - return nil - } + let multipartRequest = MultipartFormDataRequest(endpoint: endpoint) - urlComponents.path = endpoint.path + for (key, value) in body { + if let dataValue = value as? FormData { + multipartRequest.addDataField(named: key, formData: dataValue) + } else { + multipartRequest.addTextField(named: key, value: "\(value)") + } + } - guard let url = urlComponents.url else { + guard var request = multipartRequest.asURLRequest() else { return nil } - var request = URLRequest(url: url) - request.httpMethod = endpoint.method.rawValue request.setValue("Bearer \(config.apiKey)", forHTTPHeaderField: "Authorization") - request.setValue("application/json", forHTTPHeaderField: "Content-Type") - - let encoder = JSONEncoder() - if let encoded = try? encoder.encode(body) { - request.httpBody = encoded - } - return request } diff --git a/Sources/OpenAIService/Networking/OpenAIEndpoint.swift b/Sources/OpenAIService/Networking/OpenAIEndpoint.swift index cfd040f..36ba014 100644 --- a/Sources/OpenAIService/Networking/OpenAIEndpoint.swift +++ b/Sources/OpenAIService/Networking/OpenAIEndpoint.swift @@ -25,7 +25,7 @@ enum OpenAIEndpoint { case .imagesGenerations: return "/v1/images/generations" case .imageEdits: - return "/v1/images/generations" + return "/v1/images/edits" } } diff --git a/Sources/OpenAIService/Networking/OpenAIImageEditsBody.swift b/Sources/OpenAIService/Networking/OpenAIImageEditsBody.swift new file mode 100644 index 0000000..113791a --- /dev/null +++ b/Sources/OpenAIService/Networking/OpenAIImageEditsBody.swift @@ -0,0 +1,77 @@ +// +// OpenAIImageEditsBody.swift +// OpenAIDemo +// +// Created by Gusakovsky, Sergey on 5.03.23. +// + +import Foundation +#if os(iOS) +import UIKit +#endif + +public struct OpenAIImageEditsBody { + public let image: FormData + public let mask: FormData + public let prompt: String + public let numberOfImages: Int + public let size: OpenAIGenerationImageSize + public let responseFormat: OpenAIGenerationImageResponseFormat + public let user: String? + + public init( + image: Data, + mask: Data, + prompt: String, + numberOfImages: Int = 1, + size: OpenAIGenerationImageSize = .large, + responseFormat: OpenAIGenerationImageResponseFormat = .url, + user: String? = nil + ) { + self.image = FormData(data: image, mimeType: "image/png", fileName: "image.png") + self.mask = FormData(data: mask, mimeType: "image/png", fileName: "image.png") + self.prompt = prompt + self.numberOfImages = numberOfImages + self.size = size + self.responseFormat = responseFormat + self.user = user + } + + #if os(iOS) + public init?( + image: UIImage, + mask: UIImage, + prompt: String, + numberOfImages: Int = 1, + size: OpenAIGenerationImageSize = .large, + responseFormat: OpenAIGenerationImageResponseFormat = .url, + user: String? = nil + ) { + guard let imageData = image.pngData() else { return nil } + guard let maskData = mask.pngData() else { return nil } + self.image = FormData(data: imageData, mimeType: "image/png", fileName: "image.png") + self.mask = FormData(data: maskData, mimeType: "image/png", fileName: "image.png") + self.prompt = prompt + self.numberOfImages = numberOfImages + self.size = size + self.responseFormat = responseFormat + self.user = user + } + #endif + + public var body: [String: Any] { + var result: [String: Any] = [ + "image": self.image, + "mask": self.mask, + "prompt": self.prompt, + "n": self.numberOfImages, + "size": self.size.rawValue, + "response_format": self.responseFormat.rawValue + ] + if let user = self.user { + result["user"] = user + } + + return result + } +} diff --git a/Sources/OpenAIService/OpenAIService.swift b/Sources/OpenAIService/OpenAIService.swift index df53237..92dc473 100644 --- a/Sources/OpenAIService/OpenAIService.swift +++ b/Sources/OpenAIService/OpenAIService.swift @@ -71,9 +71,7 @@ public final class OpenAIService { /// Send a Edit request to the OpenAI API /// - Parameters: - /// - input: The input text to use as a starting point for the edit. - /// - model: The AI Model to Use. Set to `OpenAIEditsModelType.feature(.davinci)` by default which is the most capable model - /// - instruction: The instruction that tells the model how to edit the prompt. + /// - body: Body of chat completion request /// - completionHandler: Returns an OpenAIEditsResponse Data Model public func sendEdits( with body: OpenAIEditsBody, @@ -96,9 +94,7 @@ public final class OpenAIService { /// Send a Image generation request to the OpenAI API /// - Parameters: - /// - prompt: A text description of the desired image(s). The maximum length is 1000 characters. - /// - imageSize: Size of expected image to Use. Set to `OpenAIGenerationImageSize.large` by default. - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - body: Body of chat completion request /// - completionHandler: Returns an OpenAIGenerationImageResponse Data Model public func sendImageGeneration( with body: OpenAIGenerationImageBody, @@ -119,14 +115,33 @@ public final class OpenAIService { ) } + /// Send a Image edits request to the OpenAI API + /// - Parameters: + /// - body: Body of chat completion request + /// - completionHandler: Returns an OpenAIGenerationImageResponse Data Model + public func sendImageEdits( + with body: OpenAIImageEditsBody, + networkQueue: DispatchQueue = .global(qos: .background), + responseQueue: DispatchQueue = .main, + completionHandler: @escaping (Result) -> Void + ) { + let endpoint = OpenAIEndpoint.imageEdits + guard let request = apiClient.prepareMultipartFormDataRequest(endpoint, body: body.body, config: config) else { + completionHandler(.failure(.genericError(error: RequestError()))) + return + } + + apiClient.makeRequest( + request: request, + networkQueue: networkQueue, + responseQueue: responseQueue, + completionHandler: completionHandler + ) + } /// Send a Completion to the OpenAI API /// - Parameters: - /// - prompt: The Text Prompt - /// - model: The AI Model to Use. Set to `OpenAICompletionModelType.gpt3(.davinci)` by default which is the most capable model - /// - maxTokens: The limit character for the returned response, defaults to 16 as per the API - /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - body: Body of chat completion request /// - Returns: Returns an OpenAICompletionResponse Data Model @available(swift 5.5) @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) @@ -170,9 +185,7 @@ public final class OpenAIService { /// Send a Edit request to the OpenAI API /// - Parameters: - /// - input: The input text to use as a starting point for the edit. - /// - model: The AI Model to Use. Set to `OpenAIEditsModelType.feature(.davinci)` by default which is the most capable model - /// - instruction: The instruction that tells the model how to edit the prompt. + /// - body: Body of chat completion request /// - Returns: Returns an OpenAIEditsResponse Data Model @available(swift 5.5) @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) @@ -194,9 +207,7 @@ public final class OpenAIService { /// Send a Image generation request to the OpenAI API /// - Parameters: - /// - prompt: A text description of the desired image(s). The maximum length is 1000 characters. - /// - imageSize: Size of expected image to Use. Set to `OpenAIGenerationImageSize.large` by default. - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - body: Body of chat completion request /// - Returns: Returns an OpenAIGenerationImageResponse Data Model @available(swift 5.5) @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) @@ -215,4 +226,26 @@ public final class OpenAIService { } } } + + /// Send a Image edits request to the OpenAI API + /// - Parameters: + /// - body: Body of chat completion request + /// - Returns: Returns an OpenAIGenerationImageResponse Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendImageEdits( + with body: OpenAIImageEditsBody, + networkQueue: DispatchQueue = .global(qos: .background), + responseQueue: DispatchQueue = .main + ) async throws -> OpenAIGenerationImageResponse { + return try await withCheckedThrowingContinuation { continuation in + sendImageEdits( + with: body, + networkQueue: networkQueue, + responseQueue: responseQueue + ) { result in + continuation.resume(with: result) + } + } + } }