202 lines
5.8 KiB
Crystal
202 lines
5.8 KiB
Crystal
|
|
require "./config"
|
|
require "./requests/chat"
|
|
|
|
class Storyteller
|
|
property config = AppConfig.new
|
|
|
|
def initialize()
|
|
end
|
|
|
|
def prepare()
|
|
self._parse_config()
|
|
self._parse_command_line(ARGV)
|
|
end
|
|
|
|
private def _parse_config()
|
|
end
|
|
|
|
private def _parse_command_line(args)
|
|
parser = OptionParser.new do |parser|
|
|
parser.banner = "Usage: storyteller [options]"
|
|
|
|
parser.separator("Options:")
|
|
|
|
parser.on("-i FILE", "--input=FILE", "Path to input file") do |file|
|
|
@config.input_file_path = file
|
|
end
|
|
|
|
parser.on("-h", "--help", "Show this help") do
|
|
puts parser
|
|
exit
|
|
end
|
|
|
|
parser.on("-n", "--no-color", "Disable color output") do
|
|
@config.use_color = false
|
|
end
|
|
|
|
parser.on("-o FILE", "--output=FILE", "Path to output file") do |file|
|
|
@config.use_color = false
|
|
@config.output_file_path = file
|
|
end
|
|
|
|
parser.on("-v", "--verbose", "Be verbose (cumulative)") do
|
|
@config.verbose = true
|
|
end
|
|
|
|
parser.on("--dry-run", "Don't call the API") do
|
|
@config.make_request = false
|
|
end
|
|
|
|
parser.separator("GPT options")
|
|
|
|
parser.on("--gpt-model MODEL", "GPT model (...) (default: gpt-3.5-turbo)") do |chosen_model|
|
|
result_model = AIUtils::OpenAIModel.from_s?(chosen_model.downcase)
|
|
if result_model.nil?
|
|
STDERR.puts "ERROR: unknown model #{chosen_model}"
|
|
exit 1
|
|
end
|
|
@config.gpt_config.model = result_model unless result_model.nil?
|
|
end
|
|
|
|
# parser.on("--gpt-mode MODE", "GPT mode (chat,insert,complete) (default: chat)") do |chosen_mode|
|
|
# result_mode = AIUtils::OpenAIMode.from_s(chosen_mode.downcase)
|
|
# if result_mode.nil?
|
|
# STDERR.puts "ERROR: unknown mode #{chosen_mode}"
|
|
# exit 1
|
|
# end
|
|
# @config.gpt_config.mode = result_mode unless result_mode.nil?
|
|
# end
|
|
|
|
parser.on("--gpt-temperature TEMPERATURE", "GPT Temperature (default #{@config.gpt_config.temperature})") do |temperature|
|
|
@config.gpt_config.temperature = temperature.to_f
|
|
end
|
|
parser.on("--gpt-presence-penalty PENALTY", "GPT Presence Penalty (default #{@config.gpt_config.presence_penalty})") do |presence_penalty|
|
|
@config.gpt_config.presence_penalty = presence_penalty.to_f
|
|
end
|
|
parser.on("--gpt-frequency-penalty PENALTY", "GPT Frequency Penalty (default #{@config.gpt_config.frequency_penalty})") do |frequency_penalty|
|
|
@config.gpt_config.frequency_penalty = frequency_penalty.to_f
|
|
end
|
|
parser.on("--gpt-max-tokens TOKENS", "GPT Max Tokens (default #{@config.gpt_config.max_tokens})") do |max_tokens|
|
|
@config.gpt_config.max_tokens = max_tokens.to_i
|
|
end
|
|
|
|
end
|
|
parser.parse(args)
|
|
end
|
|
|
|
def load_file()
|
|
# Read file and initialize zones
|
|
if !@config.input_file_path.empty?
|
|
# puts "d: Using input file #{input_file_path}"
|
|
@config.input_file = File.open(@config.input_file_path)
|
|
end
|
|
prompt = self.read_file(@config.input_file)
|
|
@config.input_file.close
|
|
prompt
|
|
end
|
|
|
|
def run()
|
|
original_prompt = self.load_file()
|
|
|
|
# Build GPT-3 request
|
|
prompt = self.complete(original_prompt, @config.make_request, @config.verbose)
|
|
pp prompt if @config.verbose
|
|
exit 0 if !@config.make_request
|
|
|
|
if !@config.output_file_path.empty?
|
|
# puts "d: Using output file #{input_file_path}"
|
|
@config.output_file = File.open(@config.output_file_path, "w")
|
|
end
|
|
self.write_file(@config.output_file, prompt, @config.use_color)
|
|
@config.output_file.close
|
|
end
|
|
|
|
def complete(prompt : Prompt, make_request : Bool, verbose : Bool)
|
|
builder = Builder::OpenAIChat.new(verbose: verbose)
|
|
context_token_limit = AIUtils.context_token_limit(@config.gpt_config.model)
|
|
messages = builder.build(prompt, context_token_limit)
|
|
|
|
STDERR.puts "model = #{@config.gpt_config.model}"
|
|
STDERR.puts "context_token_limit = #{context_token_limit}"
|
|
STDERR.puts messages if verbose
|
|
|
|
return prompt if !make_request
|
|
|
|
channel_ready = Channel(Bool).new
|
|
channel_tick = Channel(Bool).new
|
|
# sp = Spin.new(0.5, Spinner::Charset[:progress])
|
|
|
|
spawn do
|
|
tick = 0
|
|
loop do
|
|
tick += 1
|
|
print "."
|
|
if tick > (2 * 60)
|
|
print "(timeout)"
|
|
exit 1
|
|
end
|
|
# channel_tick.send(true)
|
|
sleep 1.seconds
|
|
end
|
|
end
|
|
|
|
# TODO:
|
|
# request = Request::Chat.new(config: @config.gpt_config)
|
|
# result = request.perform(message)
|
|
|
|
spawn do
|
|
request = Requests::ChatRequest.new(config: @config.gpt_config)
|
|
result = request.perform(messages)
|
|
|
|
pp result.choices if @config.verbose
|
|
prompt.present_zone.content << result.choices.first["message"]["content"]
|
|
channel_ready.send(true)
|
|
rescue ex: KeyError
|
|
puts "(openai error)"
|
|
STDERR.puts "ERROR: #{ex.message}"
|
|
exit 1
|
|
|
|
rescue ex: IO::Error
|
|
puts "(network error)"
|
|
STDERR.puts "ERROR: #{ex.message}"
|
|
exit 1
|
|
|
|
rescue ex: Socket::Addrinfo::Error
|
|
puts "(network error)"
|
|
STDERR.puts "ERROR: #{ex.message}"
|
|
exit 1
|
|
end
|
|
|
|
# sp.start
|
|
channel_ready.receive
|
|
channel_ready.close
|
|
# sp.stop
|
|
|
|
prompt
|
|
end
|
|
|
|
def read_file(input_file : IO::FileDescriptor)
|
|
content = input_file.gets_to_end
|
|
|
|
# puts "d: building parser"
|
|
parser = Parser::PromptString.new
|
|
# puts "d: parsing"
|
|
prompt = parser.parse(content)
|
|
# pp prompt
|
|
end
|
|
|
|
def write_file(output_file : IO::FileDescriptor, prompt : Prompt, use_color : Bool)
|
|
# STDERR.puts "d: building builder"
|
|
builder = ContextBuilder::PromptString.new(use_color)
|
|
# STDERR.puts "d: building"
|
|
text = builder.build(prompt)
|
|
output_file.write_string(text.to_slice)
|
|
end
|
|
|
|
def display_completion(completion : String)
|
|
# Code pour afficher la complétion
|
|
end
|
|
end
|
|
|