initial commit

This commit is contained in:
Sam
2023-03-26 17:31:42 -04:00
commit e3b5b090fb
51 changed files with 4222 additions and 0 deletions

View File

@ -0,0 +1,85 @@
//
// File.swift
//
//
// Created by Bogdan Farca on 02.03.2023.
//
import Foundation
/// An enumeration of possible roles in a chat conversation.
public enum ChatRole: String, Codable {
/// The role for the system that manages the chat interface.
case system
/// The role for the human user who initiates the chat.
case user
/// The role for the artificial assistant who responds to the user.
case assistant
}
/// A structure that represents a single message in a chat conversation.
public struct ChatMessage: Codable {
/// The role of the sender of the message.
public let role: ChatRole
/// The content of the message.
public let content: String
/// Creates a new chat message with a given role and content.
/// - Parameters:
/// - role: The role of the sender of the message.
/// - content: The content of the message.
public init(role: ChatRole, content: String) {
self.role = role
self.content = content
}
}
/// A structure that represents a chat conversation.
public struct ChatConversation: Encodable {
/// The name or identifier of the user who initiates the chat. Optional if not provided by the user interface.
let user: String?
/// The messages to generate chat completions for. Ordered chronologically from oldest to newest.
let messages: [ChatMessage]
/// The ID of the model used by the assistant to generate responses. See OpenAI documentation for details on which models work with the Chat API.
let model: String
/// A parameter that controls how random or deterministic the responses are, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Optional, defaults to 1.
let temperature: Double?
/// A parameter that controls how diverse or narrow-minded the responses are, between 0 and 1. Higher values like 0.9 mean only the tokens comprising the top 90% probability mass are considered, while lower values like 0.1 mean only the top 10%. Optional, defaults to 1.
let topProbabilityMass: Double?
/// How many chat completion choices to generate for each input message. Optional, defaults to 1.
let choices: Int?
/// An array of up to 4 sequences where the API will stop generating further tokens. Optional.
let stop: [String]?
/// The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. Optional.
let maxTokens: Int?
/// A parameter that penalizes new tokens based on whether they appear in the text so far, between -2 and 2. Positive values increase the model's likelihood to talk about new topics. Optional if not specified by default or by user input. Optional, defaults to 0.
let presencePenalty: Double?
/// A parameter that penalizes new tokens based on their existing frequency in the text so far, between -2 and 2. Positive values decrease the model's likelihood to repeat the same line verbatim. Optional if not specified by default or by user input. Optional, defaults to 0.
let frequencyPenalty: Double?
/// Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizernot English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
let logitBias: [Int: Double]?
enum CodingKeys: String, CodingKey {
case user
case messages
case model
case temperature
case topProbabilityMass = "top_p"
case choices = "n"
case stop
case maxTokens = "max_tokens"
case presencePenalty = "presence_penalty"
case frequencyPenalty = "frequency_penalty"
case logitBias = "logit_bias"
}
}

View File

@ -0,0 +1,19 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
struct Command: Encodable {
let prompt: String
let model: String
let maxTokens: Int
let temperature: Double
enum CodingKeys: String, CodingKey {
case prompt
case model
case maxTokens = "max_tokens"
case temperature
}
}

View File

@ -0,0 +1,21 @@
//
// ImageGeneration.swift
//
//
// Created by Arjun Dureja on 2023-03-11.
//
import Foundation
struct ImageGeneration: Encodable {
let prompt: String
let n: Int
let size: ImageSize
let user: String?
}
public enum ImageSize: String, Codable {
case size1024 = "1024x1024"
case size512 = "512x512"
case size256 = "256x256"
}

View File

@ -0,0 +1,11 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
struct Instruction: Encodable {
let instruction: String
let model: String
let input: String
}

View File

@ -0,0 +1,39 @@
//
// Created by Adam Rush - OpenAISwift
//
import Foundation
public protocol Payload: Codable { }
public struct OpenAI<T: Payload>: Codable {
public let object: String?
public let model: String?
public let choices: [T]?
public let usage: UsageResult?
public let data: [T]?
}
public struct TextResult: Payload {
public let text: String
}
public struct MessageResult: Payload {
public let message: ChatMessage
}
public struct UsageResult: Codable {
public let promptTokens: Int
public let completionTokens: Int
public let totalTokens: Int
enum CodingKeys: String, CodingKey {
case promptTokens = "prompt_tokens"
case completionTokens = "completion_tokens"
case totalTokens = "total_tokens"
}
}
public struct UrlResult: Payload {
public let url: String
}

View File

@ -0,0 +1,120 @@
//
// OpenAIModelType.swift
//
//
// Created by Yash Shah on 06/12/2022.
//
import Foundation
/// The type of model used to generate the output
public enum OpenAIModelType {
/// ``GPT3`` Family of Models
case gpt3(GPT3)
/// ``Codex`` Family of Models
case codex(Codex)
/// ``Feature`` Family of Models
case feature(Feature)
/// ``Chat`` Family of Models
case chat(Chat)
/// Other Custom Models
case other(String)
public var modelName: String {
switch self {
case .gpt3(let model): return model.rawValue
case .codex(let model): return model.rawValue
case .feature(let model): return model.rawValue
case .chat(let model): return model.rawValue
case .other(let modelName): return modelName
}
}
/// A set of models that can understand and generate natural language
///
/// [GPT-3 Models OpenAI API Docs](https://beta.openai.com/docs/models/gpt-3)
public enum GPT3: String {
/// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text.
///
/// > Model Name: text-davinci-003
case davinci = "text-davinci-003"
/// Very capable, but faster and lower cost than GPT3 ``davinci``.
///
/// > Model Name: text-curie-001
case curie = "text-curie-001"
/// Capable of straightforward tasks, very fast, and lower cost.
///
/// > Model Name: text-babbage-001
case babbage = "text-babbage-001"
/// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
///
/// > Model Name: text-ada-001
case ada = "text-ada-001"
}
/// A set of models that can understand and generate code, including translating natural language to code
///
/// [Codex Models OpenAI API Docs](https://beta.openai.com/docs/models/codex)
///
/// > Limited Beta
public enum Codex: String {
/// Most capable Codex model. Particularly good at translating natural language to code. In addition to completing code, also supports inserting completions within code.
///
/// > Model Name: code-davinci-002
case davinci = "code-davinci-002"
/// Almost as capable as ``davinci`` Codex, but slightly faster. This speed advantage may make it preferable for real-time applications.
///
/// > Model Name: code-cushman-001
case cushman = "code-cushman-001"
}
/// A set of models that are feature specific.
///
/// For example using the Edits endpoint requires a specific data model
///
/// You can read the [API Docs](https://beta.openai.com/docs/guides/completion/editing-text)
public enum Feature: String {
/// > Model Name: text-davinci-edit-001
case davinci = "text-davinci-edit-001"
}
/// A set of models for the new chat completions
/// You can read the [API Docs](https://platform.openai.com/docs/api-reference/chat/create)
public enum Chat: String {
/// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration.
/// > Model Name: gpt-3.5-turbo
case chatgpt = "gpt-3.5-turbo"
/// Snapshot of gpt-3.5-turbo from March 1st 2023. Unlike gpt-3.5-turbo, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
/// > Model Name: gpt-3.5-turbo-0301
case chatgpt0301 = "gpt-3.5-turbo-0301"
/// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration.
/// > Model Name: gpt-4
case gpt4 = "gpt-4"
/// Snapshot of gpt-4 from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
/// > Model Name: gpt-4-0314
case gpt4_0314 = "gpt-4-0314"
/// Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration.
/// > Model Name: gpt-4-32k
case gpt4_32k = "gpt-4-32k"
/// Snapshot of gpt-4-32 from March 14th 2023. Unlike gpt-4-32k, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
/// > Model Name: gpt-4-32k-0314
case gpt4_32k_0314 = "gpt-4-32k-0314"
}
}