openai

package
v0.0.0-...-8a3d196 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 12, 2024 License: BSD-3-Clause Imports: 11 Imported by: 3

Documentation

Index

Constants

This section is empty.

Variables

View Source
var Err429 = errors.New("ratelimit or overload")
View Source
var PricingPer1000TokensPerModel = map[Model][]float64{
	GPT4_128k_Preview:        {0.01, 0.03},
	GPT4_128k_Vision_Preview: {0.01, 0.03},
	GPT4_8k:                  {0.03, 0.06},
	GPT4_8k_0613:             {0.03, 0.06},
	GPT4_32k:                 {0.06, 0.12},
	GPT4_32k_0613:            {0.06, 0.12},

	GPT3_5_turbo_4k:      {0.0015, 0.0020},
	GPT3_5_turbo_4k_0301: {0.0015, 0.0020},
	GPT3_5_turbo_4k_0613: {0.0015, 0.0020},

	GPT3_5_turbo_16k:      {0.0030, 0.0040},
	GPT3_5_turbo_16k_0613: {0.0030, 0.0040},

	TextDavinci3_4k:    {0.02, 0.02},
	TextDavinci2_4k:    {0.02, 0.02},
	TextDavinci_1_Edit: {0.02, 0.02},
	CodeDavinci2_8k:    {0.02, 0.02},

	Embedding_V3_1536:       {0.00002, 0.00002},
	Embedding_V3_3072:       {0.00013, 0.00013},
	Text_Embedding_Ada_2_8k: {0.0001, 0.0001},
}

order in array: prompt, completion

Functions

func CountTokens

func CountTokens(prompt string, m Model) (int, error)

func CountTokensCompletion

func CountTokensCompletion(req *ChatCompletionRequest) (int, error)

func FormatPrompt

func FormatPrompt(p string) string

func GetMaxRemainingTokens

func GetMaxRemainingTokens(prompt string, m Model) (int, error)

GetMaxRemainingTokens uses openai tiktoken to compute the number of tokens and thus you need to have python3 installed Watchout for functions definitions which count toward the model context length. I did not find information as to the function syntax used by openai to compute its number of tokens. You can probably approximate it.

func GetMaxRemainingTokensChatCompletion

func GetMaxRemainingTokensChatCompletion(req *ChatCompletionRequest) (int, error)

func Init

func Init(defaultApiKey string) error

func IsErrorContextLengthOverflow

func IsErrorContextLengthOverflow(err string) (bool, int, error)

IsErrorContextLengthOverflow returns true and the delta to apply to the MaxTokens request parameter if it is a context length overflow, false otherwise

Types

type APIError

type APIError struct {
	Code       *string `json:"code,omitempty"`
	Message    string  `json:"message"`
	Param      *string `json:"param,omitempty"`
	Type       string  `json:"type"`
	StatusCode int     `json:"-"`
}

APIError provides error information returned by the OpenAI API.

func (*APIError) Error

func (e *APIError) Error() string

type ChatCompletionChoice

type ChatCompletionChoice struct {
	Index        int                   `json:"index"`
	Message      ChatCompletionMessage `json:"message"`
	FinishReason string                `json:"finish_reason"`
}

type ChatCompletionFunction

type ChatCompletionFunction struct {
	Name        string              `json:"name,omitempty"`
	Description string              `json:"description,omitempty"`
	Parameters  *FunctionParameters `json:"parameters,omitempty"`
}

type ChatCompletionMessage

type ChatCompletionMessage struct {
	Role      MessageRole `json:"role"`
	Content   interface{} `json:"content"` // string, []ContentPart, or null
	ToolCalls []*ToolCall `json:"tool_calls,omitempty"`
}

type ChatCompletionRequest

type ChatCompletionRequest struct {
	APIKEY     string `json:"-"`
	MaxRetries int    `json:"-"`

	Model          Model                    `json:"model"`
	Messages       []ChatCompletionMessage  `json:"messages"`
	Tools          []ChatCompletionToolCall `json:"tools,omitempty"`
	ResponseFormat *ResponseFormat          `json:"response_format,omitempty"`
	Seed           *int                     `json:"seed,omitempty"`
	ToolChoice     interface{}              `json:"tool_choice,omitempty"` // string or object
	MaxTokens      int                      `json:"max_tokens,omitempty"`

	Temperature      float32        `json:"temperature,omitempty"`
	TopP             float32        `json:"top_p,omitempty"`
	N                int            `json:"n,omitempty"`
	Stream           bool           `json:"stream,omitempty"`
	Stop             []string       `json:"stop,omitempty"`
	PresencePenalty  float32        `json:"presence_penalty,omitempty"`
	FrequencyPenalty float32        `json:"frequency_penalty,omitempty"`
	LogitBias        map[string]int `json:"logit_bias,omitempty"`
	User             string         `json:"user,omitempty"`
}

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID                string                 `json:"id"`
	Choices           []ChatCompletionChoice `json:"choices"`
	FinishReason      string                 `json:"finish_reason"`
	Created           int64                  `json:"created"`
	Model             string                 `json:"model"`
	SystemFingerprint string                 `json:"system_fingerprint"`
	Object            string                 `json:"object"`
	Usage             Usage                  `json:"usage"`

	Price float64 `json:"price,omitempty"`
}

ChatCompletionResponse represents a response structure for chat completion API.

func CreateChatCompletion

func CreateChatCompletion(req *ChatCompletionRequest) (*ChatCompletionResponse, error)

type ChatCompletionToolCall

type ChatCompletionToolCall struct {
	Type     string                  `json:"type"` // Currently, only "function" is supported
	Function *ChatCompletionFunction `json:"function"`
}

type CompletionChoice

type CompletionChoice struct {
	Text         string        `json:"text"`
	Index        int           `json:"index"`
	FinishReason string        `json:"finish_reason"`
	LogProbs     LogprobResult `json:"logprobs"`
}

CompletionChoice represents one of possible completions.

type CompletionRequest

type CompletionRequest struct {
	// Only required if no default api key was initialized
	APIKEY string `json:"-"`

	MaxRetries int `json:"-"`

	Model Model `json:"model"`

	// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	Prompt any `json:"prompt,omitempty"`

	Suffix           string         `json:"suffix,omitempty"`
	MaxTokens        int            `json:"max_tokens,omitempty"`
	Temperature      float32        `json:"temperature,omitempty"`
	TopP             float32        `json:"top_p,omitempty"`
	N                int            `json:"n,omitempty"`
	Stream           bool           `json:"stream,omitempty"`
	LogProbs         int            `json:"logprobs,omitempty"`
	Echo             bool           `json:"echo,omitempty"`
	Stop             []string       `json:"stop,omitempty"`
	PresencePenalty  float32        `json:"presence_penalty,omitempty"`
	FrequencyPenalty float32        `json:"frequency_penalty,omitempty"`
	BestOf           int            `json:"best_of,omitempty"`
	LogitBias        map[string]int `json:"logit_bias,omitempty"`
	User             string         `json:"user,omitempty"`
}

CompletionRequest represents a request structure for completion API.

type CompletionResponse

type CompletionResponse struct {
	ID      string             `json:"id"`
	Object  string             `json:"object"`
	Created int64              `json:"created"`
	Model   string             `json:"model"`
	Choices []CompletionChoice `json:"choices"`
	Usage   Usage              `json:"usage"`

	Price float64 `json:"price,omitempty"`
}

CompletionResponse represents a response structure for completion API.

func CreateCompletion

func CreateCompletion(req *CompletionRequest) (*CompletionResponse, error)

type ContentPart

type ContentPart struct {
	Type string `json:"type"`           // "text" or "image_url"
	Text string `json:"text,omitempty"` // for text type
	// For image_url type
	URL    string `json:"url,omitempty"`
	Detail string `json:"detail,omitempty"`
}

type ContextLength

type ContextLength int
const (
	Context4K   ContextLength = 4096
	Context8K   ContextLength = 8192
	Context16K  ContextLength = 16384
	Context32K  ContextLength = 32768
	Context128K ContextLength = 128000
)

type EditsChoice

type EditsChoice struct {
	Text  string `json:"text"`
	Index int    `json:"index"`
}

EditsChoice represents one of possible edits.

type EditsRequest

type EditsRequest struct {
	// Only required if no default api key was initialized
	APIKEY     string `json:"-"`
	MaxRetries int    `json:"-"`

	Model       Model   `json:"model,omitempty"`
	Input       string  `json:"input,omitempty"`
	Instruction string  `json:"instruction,omitempty"`
	N           int     `json:"n,omitempty"`
	Temperature float32 `json:"temperature,omitempty"`
	TopP        float32 `json:"top_p,omitempty"`
}

EditsRequest represents a request structure for Edits API.

type EditsResponse

type EditsResponse struct {
	Object  string        `json:"object"`
	Created int64         `json:"created"`
	Usage   Usage         `json:"usage"`
	Choices []EditsChoice `json:"choices"`

	Price float64 `json:"price,omitempty"`
}

EditsResponse represents a response structure for Edits API.

func CreateEdit

func CreateEdit(req *EditsRequest) (*EditsResponse, error)

type Embedding

type Embedding struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

type EmbeddingRequest

type EmbeddingRequest struct {
	// Only required if no default api key was initialized
	APIKEY     string `json:"-"`
	MaxRetries int    `json:"-"`

	Model Model `json:"model"`

	// Input text to get embeddings for, encoded as a string or array of tokens.
	// To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays.
	// Each input must not exceed 8192 tokens in length.
	Input interface{} `json:"input"`

	// Only supported in text-embedding-3 and later models
	Dimensions int `json:"dimensions,omitempty"`

	User string `json:"user,omitempty"`
}

type EmbeddingResponse

type EmbeddingResponse struct {
	Object string      `json:"object"`
	Data   []Embedding `json:"data"`
	Model  string      `json:"model"`
	Usage  Usage       `json:"usage"`

	Price float64 `json:"price,omitempty"`
}

func CreateEmbedding

func CreateEmbedding(req *EmbeddingRequest) (*EmbeddingResponse, error)

type ErrorResponse

type ErrorResponse struct {
	Error *APIError `json:"error,omitempty"`
}

type Function

type Function struct {
	Name      string `json:"name"`
	Arguments string `json:"arguments"` // JSON format, validation needed
}

type FunctionParameters

type FunctionParameters struct {
	Type       string                      `json:"type"` // object
	Properties map[string]FunctionProperty `json:"properties"`
	Required   []string                    `json:"required,omitempty"`
}

type FunctionProperty

type FunctionProperty struct {
	Type        string   `json:"type"` // string, number, integer, array, boolean, object, null, anyof
	Description string   `json:"description,omitempty"`
	Enum        []string `json:"enum,omitempty"`
	// For arrays
	Items *FunctionProperty `json:"items,omitempty"`
	// For objects
	Properties map[string]FunctionProperty `json:"properties,omitempty"`
	Required   []string                    `json:"required,omitempty"`
}

type ListModelsObject

type ListModelsObject struct {
	Object     string `json:"object"`
	ID         string `json:"id"`
	Created    int    `json:"created"`
	OwnedBy    string `json:"owned_by"`
	Permission []struct {
		ID                 string      `json:"id"`
		Object             string      `json:"object"`
		Created            int         `json:"created"`
		AllowCreateEngine  bool        `json:"allow_create_engine"`
		AllowSampling      bool        `json:"allow_sampling"`
		AllowLogprobs      bool        `json:"allow_logprobs"`
		AllowSearchIndices bool        `json:"allow_search_indices"`
		AllowView          bool        `json:"allow_view"`
		AllowFineTuning    bool        `json:"allow_fine_tuning"`
		Organization       string      `json:"organization"`
		Group              interface{} `json:"group"`
		IsBlocking         bool        `json:"is_blocking"`
	} `json:"permission"`
	Root   string      `json:"root"`
	Parent interface{} `json:"parent"`
}

type ListModelsRequest

type ListModelsRequest struct {
	// Only required if no default api key was initialized
	APIKEY     string `json:"-"`
	MaxRetries int    `json:"-"`
}

type ListModelsResponse

type ListModelsResponse struct {
	Data   []ListModelsObject `json:"data"`
	Object string             `json:"object"`
}

func ListModels

func ListModels(req *ListModelsRequest) (*ListModelsResponse, error)

type LogprobResult

type LogprobResult struct {
	Tokens        []string             `json:"tokens"`
	TokenLogprobs []float32            `json:"token_logprobs"`
	TopLogprobs   []map[string]float32 `json:"top_logprobs"`
	TextOffset    []int                `json:"text_offset"`
}

LogprobResult represents logprob result of Choice.

type MessageRole

type MessageRole string
const (
	System    MessageRole = "system"
	User      MessageRole = "user"
	Assistant MessageRole = "assistant"
	Tool      MessageRole = "tool"
)

type Model

type Model string
const (
	Text_Embedding_Ada_2_8k Model = "text-embedding-ada-002"

	Embedding_V3_3072 Model = "text-embedding-3-large"

	Embedding_V3_1536 Model = "text-embedding-3-small"

	GPT4_128k_Preview Model = "gpt-4-0125-preview"

	GPT4_128k_Vision_Preview Model = "gpt-4-vision-preview"

	GPT4_8k Model = "gpt-4"

	GPT4_8k_0613 Model = "gpt-4-0613"

	GPT4_32k Model = "gpt-4-32k"

	GPT4_32k_0613 Model = "gpt-4-32k-0613"

	GPT3_5_turbo_4k Model = "gpt-3.5-turbo"

	GPT3_5_turbo_16k Model = "gpt-3.5-turbo-16k"

	GPT3_5_turbo_4k_0613 Model = "gpt-3.5-turbo-0613"

	GPT3_5_turbo_16k_0613 Model = "gpt-3.5-turbo-16k-0613"

	GPT3_5_turbo_4k_0301 Model = "gpt-3.5-turbo-0301"

	TextDavinci3_4k Model = "text-davinci-003"

	TextDavinci2_4k Model = "text-davinci-002"

	TextDavinci_1_Edit Model = "text-davinci-edit-001"

	CodeDavinci2_8k Model = "code-davinci-002"

	DallE3 Model = "dall-e-3"

	DallE2 Model = "dall-e-2"
)

func (Model) GetContextLength

func (m Model) GetContextLength() ContextLength

func (Model) GetSimilarWithNextContextLength

func (m Model) GetSimilarWithNextContextLength() (bool, Model)

type ModerateRequest

type ModerateRequest struct {
	// Only required if no default api key was initialized
	APIKEY     string `json:"-"`
	MaxRetries int    `json:"-"`

	Input string `json:"input"`
}

type ModerateResponse

type ModerateResponse struct {
	ID      string   `json:"id"`
	Model   string   `json:"model"`
	Results []Result `json:"results"`
}

func Moderate

func Moderate(req *ModerateRequest) (*ModerateResponse, error)

type RequestError

type RequestError struct {
	StatusCode int
	Err        error
}

RequestError provides informations about generic request errors.

func (*RequestError) Error

func (e *RequestError) Error() string

func (*RequestError) Unwrap

func (e *RequestError) Unwrap() error

type ResponseFormat

type ResponseFormat struct {
	Type string `json:"type"` // "text" or "json_object"
}

type Result

type Result struct {
	Categories     map[string]bool    `json:"categories"`
	CategoryScores map[string]float64 `json:"category_scores"`
	Flagged        bool               `json:"flagged"`
}

type ToolCall

type ToolCall struct {
	ID       string    `json:"id"`
	Type     string    `json:"type"` // Currently, only "function" is supported
	Function *Function `json:"function"`
}

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Usage Represents the total token usage per request to OpenAI.

func (*Usage) ComputePrice

func (u *Usage) ComputePrice(m Model) float64

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL