65 lines
1.4 KiB
Crystal
65 lines
1.4 KiB
Crystal
|
|
|
|
module AIUtils
|
|
enum OpenAIModel
|
|
Gpt_4
|
|
Gpt_3_5_Turbo
|
|
Gpt_3_5_Turbo_16k
|
|
|
|
def to_s
|
|
vals = [
|
|
"gpt-4",
|
|
"gpt-3.5-turbo",
|
|
"gpt-3.5-turbo-16k"
|
|
]
|
|
vals[self.value]
|
|
end
|
|
|
|
def self.from_s?(str_model : String) : OpenAIModel?
|
|
return OpenAIModel.values.find do |openai_model|
|
|
openai_model.to_s.downcase == str_model.downcase
|
|
end
|
|
end
|
|
end
|
|
|
|
enum OpenAIMode
|
|
Chat
|
|
Complete
|
|
Insert
|
|
|
|
def self.from_s?(str_mode) : OpenAIMode?
|
|
return OpenAIMode.parse?(str_mode)
|
|
# return OpenAIMode.values.find do |openai_mode|
|
|
# openai_mode.to_s.downcase == str_mode.downcase
|
|
# end
|
|
end
|
|
end
|
|
|
|
def self.context_token_limit(model : OpenAIModel) : UInt32
|
|
hash = {
|
|
OpenAIModel::Gpt_3_5_Turbo => 1024_u32 * 4_u32,
|
|
OpenAIModel::Gpt_3_5_Turbo_16k => 1024_u32 * 16_u32,
|
|
OpenAIModel::Gpt_4 => 1024_u32 * 8_u32,
|
|
}
|
|
|
|
return 0_u32 unless hash[model]?
|
|
return hash[model]
|
|
end
|
|
|
|
class GptConfig
|
|
# defaults
|
|
# property temperature = 0.82
|
|
property model : OpenAIModel = OpenAIModel::Gpt_3_5_Turbo
|
|
# property model = "gpt-3.5-turbo-16k"
|
|
# property prompt_size = 10_000
|
|
property prompt_size = 2_700
|
|
property temperature = 0.8
|
|
property presence_penalty = 1.0
|
|
property frequency_penalty = 1.0
|
|
property max_tokens = 384
|
|
property mode = OpenAIMode::Chat
|
|
property openai_key : String = ENV.fetch("OPENAI_API_KEY")
|
|
end
|
|
end
|
|
|