initial commit

This commit is contained in:
Sam
2023-03-26 17:31:42 -04:00
commit e3b5b090fb
51 changed files with 4222 additions and 0 deletions

21
OpenAISwift/LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Adam Rush
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,85 @@
//
// File.swift
//
//
// Created by Bogdan Farca on 02.03.2023.
//
import Foundation
/// An enumeration of possible roles in a chat conversation.
public enum ChatRole: String, Codable {
/// The role for the system that manages the chat interface.
case system
/// The role for the human user who initiates the chat.
case user
/// The role for the artificial assistant who responds to the user.
case assistant
}
/// A structure that represents a single message in a chat conversation.
public struct ChatMessage: Codable {
/// The role of the sender of the message.
public let role: ChatRole
/// The content of the message.
public let content: String
/// Creates a new chat message with a given role and content.
/// - Parameters:
/// - role: The role of the sender of the message.
/// - content: The content of the message.
public init(role: ChatRole, content: String) {
self.role = role
self.content = content
}
}
/// A structure that represents a chat conversation.
public struct ChatConversation: Encodable {
/// The name or identifier of the user who initiates the chat. Optional if not provided by the user interface.
let user: String?
/// The messages to generate chat completions for. Ordered chronologically from oldest to newest.
let messages: [ChatMessage]
/// The ID of the model used by the assistant to generate responses. See OpenAI documentation for details on which models work with the Chat API.
let model: String
/// A parameter that controls how random or deterministic the responses are, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Optional, defaults to 1.
let temperature: Double?
/// A parameter that controls how diverse or narrow-minded the responses are, between 0 and 1. Higher values like 0.9 mean only the tokens comprising the top 90% probability mass are considered, while lower values like 0.1 mean only the top 10%. Optional, defaults to 1.
let topProbabilityMass: Double?
/// How many chat completion choices to generate for each input message. Optional, defaults to 1.
let choices: Int?
/// An array of up to 4 sequences where the API will stop generating further tokens. Optional.
let stop: [String]?
/// The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. Optional.
let maxTokens: Int?
/// A parameter that penalizes new tokens based on whether they appear in the text so far, between -2 and 2. Positive values increase the model's likelihood to talk about new topics. Optional if not specified by default or by user input. Optional, defaults to 0.
let presencePenalty: Double?
/// A parameter that penalizes new tokens based on their existing frequency in the text so far, between -2 and 2. Positive values decrease the model's likelihood to repeat the same line verbatim. Optional if not specified by default or by user input. Optional, defaults to 0.
let frequencyPenalty: Double?
/// Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizernot English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
let logitBias: [Int: Double]?
enum CodingKeys: String, CodingKey {
case user
case messages
case model
case temperature
case topProbabilityMass = "top_p"
case choices = "n"
case stop
case maxTokens = "max_tokens"
case presencePenalty = "presence_penalty"
case frequencyPenalty = "frequency_penalty"
case logitBias = "logit_bias"
}
}

View File

@ -0,0 +1,19 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
struct Command: Encodable {
let prompt: String
let model: String
let maxTokens: Int
let temperature: Double
enum CodingKeys: String, CodingKey {
case prompt
case model
case maxTokens = "max_tokens"
case temperature
}
}

View File

@ -0,0 +1,21 @@
//
// ImageGeneration.swift
//
//
// Created by Arjun Dureja on 2023-03-11.
//
import Foundation
struct ImageGeneration: Encodable {
let prompt: String
let n: Int
let size: ImageSize
let user: String?
}
public enum ImageSize: String, Codable {
case size1024 = "1024x1024"
case size512 = "512x512"
case size256 = "256x256"
}

View File

@ -0,0 +1,11 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
struct Instruction: Encodable {
let instruction: String
let model: String
let input: String
}

View File

@ -0,0 +1,39 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
public protocol Payload: Codable { }
public struct OpenAI<T: Payload>: Codable {
public let object: String?
public let model: String?
public let choices: [T]?
public let usage: UsageResult?
public let data: [T]?
}
public struct TextResult: Payload {
public let text: String
}
public struct MessageResult: Payload {
public let message: ChatMessage
}
public struct UsageResult: Codable {
public let promptTokens: Int
public let completionTokens: Int
public let totalTokens: Int
enum CodingKeys: String, CodingKey {
case promptTokens = "prompt_tokens"
case completionTokens = "completion_tokens"
case totalTokens = "total_tokens"
}
}
public struct UrlResult: Payload {
public let url: String
}

View File

@ -0,0 +1,120 @@
//
// OpenAIModelType.swift
//
//
// Created by Yash Shah on 06/12/2022.
//
import Foundation
/// The type of model used to generate the output
public enum OpenAIModelType {
/// ``GPT3`` Family of Models
case gpt3(GPT3)
/// ``Codex`` Family of Models
case codex(Codex)
/// ``Feature`` Family of Models
case feature(Feature)
/// ``Chat`` Family of Models
case chat(Chat)
/// Other Custom Models
case other(String)
public var modelName: String {
switch self {
case .gpt3(let model): return model.rawValue
case .codex(let model): return model.rawValue
case .feature(let model): return model.rawValue
case .chat(let model): return model.rawValue
case .other(let modelName): return modelName
}
}
/// A set of models that can understand and generate natural language
///
/// [GPT-3 Models OpenAI API Docs](https://beta.openai.com/docs/models/gpt-3)
public enum GPT3: String {
/// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text.
///
/// > Model Name: text-davinci-003
case davinci = "text-davinci-003"
/// Very capable, but faster and lower cost than GPT3 ``davinci``.
///
/// > Model Name: text-curie-001
case curie = "text-curie-001"
/// Capable of straightforward tasks, very fast, and lower cost.
///
/// > Model Name: text-babbage-001
case babbage = "text-babbage-001"
/// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
///
/// > Model Name: text-ada-001
case ada = "text-ada-001"
}
/// A set of models that can understand and generate code, including translating natural language to code
///
/// [Codex Models OpenAI API Docs](https://beta.openai.com/docs/models/codex)
///
/// > Limited Beta
public enum Codex: String {
/// Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code.
///
/// > Model Name: code-davinci-002
case davinci = "code-davinci-002"
/// Almost as capable as ``davinci`` Codex, but slightly faster. This speed advantage may make it preferable for real-time applications.
///
/// > Model Name: code-cushman-001
case cushman = "code-cushman-001"
}
/// A set of models that are feature specific.
///
/// For example using the Edits endpoint requires a specific data model
///
/// You can read the [API Docs](https://beta.openai.com/docs/guides/completion/editing-text)
public enum Feature: String {
/// > Model Name: text-davinci-edit-001
case davinci = "text-davinci-edit-001"
}
/// A set of models for the new chat completions
/// You can read the [API Docs](https://platform.openai.com/docs/api-reference/chat/create)
public enum Chat: String {
/// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration.
/// > Model Name: gpt-3.5-turbo
case chatgpt = "gpt-3.5-turbo"
/// Snapshot of gpt-3.5-turbo from March 1st 2023. Unlike gpt-3.5-turbo, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
/// > Model Name: gpt-3.5-turbo-0301
case chatgpt0301 = "gpt-3.5-turbo-0301"
/// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration.
/// > Model Name: gpt-4
case gpt4 = "gpt-4"
/// Snapshot of gpt-4 from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
/// > Model Name: gpt-4-0314
case gpt4_0314 = "gpt-4-0314"
/// Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration.
/// > Model Name: gpt-4-32k
case gpt4_32k = "gpt-4-32k"
/// Snapshot of gpt-4-32 from March 14th 2023. Unlike gpt-4-32k, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
/// > Model Name: gpt-4-32k-0314
case gpt4_32k_0314 = "gpt-4-32k-0314"
}
}

View File

@ -0,0 +1,41 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
enum Endpoint {
case completions
case edits
case chat
case images
}
extension Endpoint {
var path: String {
switch self {
case .completions:
return "/v1/completions"
case .edits:
return "/v1/edits"
case .chat:
return "/v1/chat/completions"
case .images:
return "/v1/images/generations"
}
}
var method: String {
switch self {
case .completions, .edits, .chat, .images:
return "POST"
}
}
func baseURL() -> String {
switch self {
case .completions, .edits, .chat, .images:
return "https://api.openai.com"
}
}
}

View File

@ -0,0 +1,298 @@
import Foundation
#if canImport(FoundationNetworking) && canImport(FoundationXML)
import FoundationNetworking
import FoundationXML
#endif
public enum OpenAIError: Error {
case genericError(error: Error)
case decodingError(error: Error)
}
public class OpenAISwift {
fileprivate(set) var token: String?
fileprivate let config: Config
/// Configuration object for the client
public struct Config {
/// Initialiser
/// - Parameter session: the session to use for network requests.
public init(session: URLSession = URLSession.shared) {
self.session = session
}
let session:URLSession
}
public init(authToken: String, config: Config = Config()) {
self.token = authToken
self.config = Config()
}
}
extension OpenAISwift {
/// Send a Completion to the OpenAI API
/// - Parameters:
/// - prompt: The Text Prompt
/// - model: The AI Model to Use. Set to `OpenAIModelType.gpt3(.davinci)` by default which is the most capable model
/// - maxTokens: The limit character for the returned response, defaults to 16 as per the API
/// - completionHandler: Returns an OpenAI Data Model
public func sendCompletion(with prompt: String, model: OpenAIModelType = .gpt3(.davinci), maxTokens: Int = 16, temperature: Double = 1, completionHandler: @escaping (Result<OpenAI<TextResult>, OpenAIError>) -> Void) {
let endpoint = Endpoint.completions
let body = Command(prompt: prompt, model: model.modelName, maxTokens: maxTokens, temperature: temperature)
let request = prepareRequest(endpoint, body: body)
makeRequest(request: request) { result in
switch result {
case .success(let success):
do {
let res = try JSONDecoder().decode(OpenAI<TextResult>.self, from: success)
completionHandler(.success(res))
} catch {
completionHandler(.failure(.decodingError(error: error)))
}
case .failure(let failure):
completionHandler(.failure(.genericError(error: failure)))
}
}
}
/// Send a Edit request to the OpenAI API
/// - Parameters:
/// - instruction: The Instruction For Example: "Fix the spelling mistake"
/// - model: The Model to use, the only support model is `text-davinci-edit-001`
/// - input: The Input For Example "My nam is Adam"
/// - completionHandler: Returns an OpenAI Data Model
public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "", completionHandler: @escaping (Result<OpenAI<TextResult>, OpenAIError>) -> Void) {
let endpoint = Endpoint.edits
let body = Instruction(instruction: instruction, model: model.modelName, input: input)
let request = prepareRequest(endpoint, body: body)
makeRequest(request: request) { result in
switch result {
case .success(let success):
do {
let res = try JSONDecoder().decode(OpenAI<TextResult>.self, from: success)
completionHandler(.success(res))
} catch {
completionHandler(.failure(.decodingError(error: error)))
}
case .failure(let failure):
completionHandler(.failure(.genericError(error: failure)))
}
}
}
/// Send a Chat request to the OpenAI API
/// - Parameters:
/// - messages: Array of `ChatMessages`
/// - model: The Model to use, the only support model is `gpt-3.5-turbo`
/// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both.
/// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
/// - choices: How many chat completion choices to generate for each input message.
/// - stop: Up to 4 sequences where the API will stop generating further tokens.
/// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
/// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
/// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
/// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizernot English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
/// - completionHandler: Returns an OpenAI Data Model
public func sendChat(with messages: [ChatMessage],
model: OpenAIModelType = .chat(.chatgpt),
user: String? = nil,
temperature: Double? = 1,
topProbabilityMass: Double? = 0,
choices: Int? = 1,
stop: [String]? = nil,
maxTokens: Int? = nil,
presencePenalty: Double? = 0,
frequencyPenalty: Double? = 0,
logitBias: [Int: Double]? = nil,
completionHandler: @escaping (Result<OpenAI<MessageResult>, OpenAIError>) -> Void) {
let endpoint = Endpoint.chat
let body = ChatConversation(user: user,
messages: messages,
model: model.modelName,
temperature: temperature,
topProbabilityMass: topProbabilityMass,
choices: choices,
stop: stop,
maxTokens: maxTokens,
presencePenalty: presencePenalty,
frequencyPenalty: frequencyPenalty,
logitBias: logitBias)
let request = prepareRequest(endpoint, body: body)
makeRequest(request: request) { result in
switch result {
case .success(let success):
do {
let res = try JSONDecoder().decode(OpenAI<MessageResult>.self, from: success)
completionHandler(.success(res))
} catch {
completionHandler(.failure(.decodingError(error: error)))
}
case .failure(let failure):
completionHandler(.failure(.genericError(error: failure)))
}
}
}
/// Send a Image generation request to the OpenAI API
/// - Parameters:
/// - prompt: The Text Prompt
/// - numImages: The number of images to generate, defaults to 1
/// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256
/// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - completionHandler: Returns an OpenAI Data Model
public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil, completionHandler: @escaping (Result<OpenAI<UrlResult>, OpenAIError>) -> Void) {
let endpoint = Endpoint.images
let body = ImageGeneration(prompt: prompt, n: numImages, size: size, user: user)
let request = prepareRequest(endpoint, body: body)
makeRequest(request: request) { result in
switch result {
case .success(let success):
do {
let res = try JSONDecoder().decode(OpenAI<UrlResult>.self, from: success)
completionHandler(.success(res))
} catch {
completionHandler(.failure(.decodingError(error: error)))
}
case .failure(let failure):
completionHandler(.failure(.genericError(error: failure)))
}
}
}
private func makeRequest(request: URLRequest, completionHandler: @escaping (Result<Data, Error>) -> Void) {
let session = config.session
let task = session.dataTask(with: request) { (data, response, error) in
if let error = error {
completionHandler(.failure(error))
} else if let data = data {
completionHandler(.success(data))
}
}
task.resume()
}
private func prepareRequest<BodyType: Encodable>(_ endpoint: Endpoint, body: BodyType) -> URLRequest {
var urlComponents = URLComponents(url: URL(string: endpoint.baseURL())!, resolvingAgainstBaseURL: true)
urlComponents?.path = endpoint.path
var request = URLRequest(url: urlComponents!.url!)
request.httpMethod = endpoint.method
if let token = self.token {
request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
}
request.setValue("application/json", forHTTPHeaderField: "content-type")
let encoder = JSONEncoder()
if let encoded = try? encoder.encode(body) {
request.httpBody = encoded
}
return request
}
}
extension OpenAISwift {
/// Send a Completion to the OpenAI API
/// - Parameters:
/// - prompt: The Text Prompt
/// - model: The AI Model to Use. Set to `OpenAIModelType.gpt3(.davinci)` by default which is the most capable model
/// - maxTokens: The limit character for the returned response, defaults to 16 as per the API
/// - temperature: Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Defaults to 1
/// - Returns: Returns an OpenAI Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendCompletion(with prompt: String, model: OpenAIModelType = .gpt3(.davinci), maxTokens: Int = 16, temperature: Double = 1) async throws -> OpenAI<TextResult> {
return try await withCheckedThrowingContinuation { continuation in
sendCompletion(with: prompt, model: model, maxTokens: maxTokens, temperature: temperature) { result in
continuation.resume(with: result)
}
}
}
/// Send a Edit request to the OpenAI API
/// - Parameters:
/// - instruction: The Instruction For Example: "Fix the spelling mistake"
/// - model: The Model to use, the only support model is `text-davinci-edit-001`
/// - input: The Input For Example "My nam is Adam"
/// - Returns: Returns an OpenAI Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "") async throws -> OpenAI<TextResult> {
return try await withCheckedThrowingContinuation { continuation in
sendEdits(with: instruction, model: model, input: input) { result in
continuation.resume(with: result)
}
}
}
/// Send a Chat request to the OpenAI API
/// - Parameters:
/// - messages: Array of `ChatMessages`
/// - model: The Model to use, the only support model is `gpt-3.5-turbo`
/// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both.
/// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
/// - choices: How many chat completion choices to generate for each input message.
/// - stop: Up to 4 sequences where the API will stop generating further tokens.
/// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
/// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
/// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
/// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizernot English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
/// - completionHandler: Returns an OpenAI Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendChat(with messages: [ChatMessage],
model: OpenAIModelType = .chat(.chatgpt),
user: String? = nil,
temperature: Double? = 1,
topProbabilityMass: Double? = 0,
choices: Int? = 1,
stop: [String]? = nil,
maxTokens: Int? = nil,
presencePenalty: Double? = 0,
frequencyPenalty: Double? = 0,
logitBias: [Int: Double]? = nil) async throws -> OpenAI<MessageResult> {
return try await withCheckedThrowingContinuation { continuation in
sendChat(with: messages,
model: model,
user: user,
temperature: temperature,
topProbabilityMass: topProbabilityMass,
choices: choices,
stop: stop,
maxTokens: maxTokens,
presencePenalty: presencePenalty,
frequencyPenalty: frequencyPenalty,
logitBias: logitBias) { result in
continuation.resume(with: result)
}
}
}
/// Send a Image generation request to the OpenAI API
/// - Parameters:
/// - prompt: The Text Prompt
/// - numImages: The number of images to generate, defaults to 1
/// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256
/// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// - Returns: Returns an OpenAI Data Model
@available(swift 5.5)
@available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *)
public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil) async throws -> OpenAI<UrlResult> {
return try await withCheckedThrowingContinuation { continuation in
sendImages(with: prompt, numImages: numImages, size: size, user: user) { result in
continuation.resume(with: result)
}
}
}
}