ai-chat 0.2.3 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +143 -76
- data/ai-chat.gemspec +8 -5
- data/lib/ai/amazing_print.rb +7 -1
- data/lib/ai/chat.rb +452 -81
- data/lib/ai/http.rb +45 -0
- data/lib/prompts/schema_generator.md +123 -0
- metadata +58 -12
data/lib/ai/chat.rb
CHANGED
|
@@ -4,9 +4,15 @@ require "base64"
|
|
|
4
4
|
require "json"
|
|
5
5
|
require "marcel"
|
|
6
6
|
require "openai"
|
|
7
|
+
require "ostruct"
|
|
7
8
|
require "pathname"
|
|
8
9
|
require "stringio"
|
|
9
10
|
require "fileutils"
|
|
11
|
+
require "tty-spinner"
|
|
12
|
+
require "timeout"
|
|
13
|
+
|
|
14
|
+
require_relative "http"
|
|
15
|
+
include AI::Http
|
|
10
16
|
|
|
11
17
|
module AI
|
|
12
18
|
# :reek:MissingSafeMethod { exclude: [ generate! ] }
|
|
@@ -16,34 +22,72 @@ module AI
|
|
|
16
22
|
# :reek:IrresponsibleModule
|
|
17
23
|
class Chat
|
|
18
24
|
# :reek:Attribute
|
|
19
|
-
attr_accessor :messages, :model, :
|
|
25
|
+
attr_accessor :background, :code_interpreter, :conversation_id, :image_generation, :image_folder, :messages, :model, :proxy, :previous_response_id, :web_search
|
|
20
26
|
attr_reader :reasoning_effort, :client, :schema
|
|
21
27
|
|
|
22
28
|
VALID_REASONING_EFFORTS = [:low, :medium, :high].freeze
|
|
29
|
+
PROXY_URL = "https://prepend.me/".freeze
|
|
23
30
|
|
|
24
31
|
def initialize(api_key: nil, api_key_env_var: "OPENAI_API_KEY")
|
|
25
|
-
api_key
|
|
32
|
+
@api_key = api_key || ENV.fetch(api_key_env_var)
|
|
26
33
|
@messages = []
|
|
27
34
|
@reasoning_effort = nil
|
|
28
35
|
@model = "gpt-4.1-nano"
|
|
29
|
-
@client = OpenAI::Client.new(api_key: api_key)
|
|
36
|
+
@client = OpenAI::Client.new(api_key: @api_key)
|
|
30
37
|
@previous_response_id = nil
|
|
38
|
+
@proxy = false
|
|
31
39
|
@image_generation = false
|
|
32
40
|
@image_folder = "./images"
|
|
33
41
|
end
|
|
34
42
|
|
|
43
|
+
def self.generate_schema!(description, api_key: nil, api_key_env_var: "OPENAI_API_KEY", proxy: false)
|
|
44
|
+
@api_key ||= ENV.fetch(api_key_env_var)
|
|
45
|
+
prompt_path = File.expand_path("../prompts/schema_generator.md", __dir__)
|
|
46
|
+
system_prompt = File.open(prompt_path).read
|
|
47
|
+
|
|
48
|
+
json = if proxy
|
|
49
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/responses")
|
|
50
|
+
parameters = {
|
|
51
|
+
model: "o4-mini",
|
|
52
|
+
input: [
|
|
53
|
+
{role: :system, content: system_prompt},
|
|
54
|
+
{role: :user, content: description},
|
|
55
|
+
],
|
|
56
|
+
text: {format: {type: "json_object"}},
|
|
57
|
+
reasoning: {effort: "high"}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
send_request(uri, content_type: "json", parameters: parameters, method: "post")
|
|
61
|
+
else
|
|
62
|
+
client = OpenAI::Client.new(api_key: api_key)
|
|
63
|
+
response = client.responses.create(
|
|
64
|
+
model: "o4-mini",
|
|
65
|
+
input: [
|
|
66
|
+
{role: :system, content: system_prompt},
|
|
67
|
+
{role: :user, content: description}
|
|
68
|
+
],
|
|
69
|
+
text: {format: {type: "json_object"}},
|
|
70
|
+
reasoning: {effort: "high"}
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
output_text = response.output_text
|
|
74
|
+
JSON.parse(output_text)
|
|
75
|
+
end
|
|
76
|
+
JSON.pretty_generate(json)
|
|
77
|
+
end
|
|
78
|
+
|
|
35
79
|
# :reek:TooManyStatements
|
|
36
80
|
# :reek:NilCheck
|
|
37
|
-
def add(content, role: "user", response: nil, image: nil, images: nil, file: nil, files: nil)
|
|
81
|
+
def add(content, role: "user", response: nil, status: nil, image: nil, images: nil, file: nil, files: nil)
|
|
38
82
|
if image.nil? && images.nil? && file.nil? && files.nil?
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
83
|
+
message = {
|
|
84
|
+
role: role,
|
|
85
|
+
content: content,
|
|
86
|
+
response: response
|
|
87
|
+
}
|
|
88
|
+
message[:content] = content if content
|
|
89
|
+
message[:status] = status if status
|
|
90
|
+
messages.push(message)
|
|
47
91
|
else
|
|
48
92
|
text_and_files_array = [
|
|
49
93
|
{
|
|
@@ -76,7 +120,8 @@ module AI
|
|
|
76
120
|
messages.push(
|
|
77
121
|
{
|
|
78
122
|
role: role,
|
|
79
|
-
content: text_and_files_array
|
|
123
|
+
content: text_and_files_array,
|
|
124
|
+
status: status
|
|
80
125
|
}
|
|
81
126
|
)
|
|
82
127
|
end
|
|
@@ -90,53 +135,32 @@ module AI
|
|
|
90
135
|
add(message, role: "user", image: image, images: images, file: file, files: files)
|
|
91
136
|
end
|
|
92
137
|
|
|
93
|
-
def assistant(message, response: nil)
|
|
94
|
-
add(message, role: "assistant", response: response)
|
|
138
|
+
def assistant(message, response: nil, status: nil)
|
|
139
|
+
add(message, role: "assistant", response: response, status: status)
|
|
95
140
|
end
|
|
96
141
|
|
|
97
142
|
# :reek:NilCheck
|
|
98
143
|
# :reek:TooManyStatements
|
|
99
144
|
def generate!
|
|
145
|
+
validate_api_key
|
|
100
146
|
response = create_response
|
|
147
|
+
parse_response(response)
|
|
101
148
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
106
|
-
|
|
107
|
-
chat_response = {
|
|
108
|
-
id: response.id,
|
|
109
|
-
model: response.model,
|
|
110
|
-
usage: response_usage,
|
|
111
|
-
total_tokens: response_usage[:total_tokens],
|
|
112
|
-
images: image_filenames
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
message = if schema
|
|
116
|
-
if text_response.nil? || text_response.empty?
|
|
117
|
-
raise ArgumentError, "No text content in response to parse as JSON for schema: #{schema.inspect}"
|
|
118
|
-
end
|
|
119
|
-
JSON.parse(text_response, symbolize_names: true)
|
|
120
|
-
else
|
|
121
|
-
text_response
|
|
122
|
-
end
|
|
149
|
+
self.previous_response_id = last.dig(:response, :id) unless (conversation_id && !background)
|
|
150
|
+
last
|
|
151
|
+
end
|
|
123
152
|
|
|
124
|
-
|
|
125
|
-
|
|
153
|
+
# :reek:BooleanParameter
|
|
154
|
+
# :reek:ControlParameter
|
|
155
|
+
# :reek:DuplicateMethodCall
|
|
156
|
+
# :reek:TooManyStatements
|
|
157
|
+
def get_response(wait: false, timeout: 600)
|
|
158
|
+
response = if wait
|
|
159
|
+
wait_for_response(timeout)
|
|
126
160
|
else
|
|
127
|
-
|
|
128
|
-
{
|
|
129
|
-
role: "assistant",
|
|
130
|
-
content: message,
|
|
131
|
-
images: image_filenames,
|
|
132
|
-
response: chat_response
|
|
133
|
-
}.compact
|
|
134
|
-
)
|
|
161
|
+
retrieve_response(previous_response_id)
|
|
135
162
|
end
|
|
136
|
-
|
|
137
|
-
self.previous_response_id = chat_response[:id]
|
|
138
|
-
|
|
139
|
-
message
|
|
163
|
+
parse_response(response)
|
|
140
164
|
end
|
|
141
165
|
|
|
142
166
|
# :reek:NilCheck
|
|
@@ -172,15 +196,64 @@ module AI
|
|
|
172
196
|
messages.last
|
|
173
197
|
end
|
|
174
198
|
|
|
199
|
+
def items(order: :asc)
|
|
200
|
+
raise "No conversation_id set. Call generate! first to create a conversation." unless conversation_id
|
|
201
|
+
|
|
202
|
+
if proxy
|
|
203
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/conversations/#{conversation_id}/items?order=#{order.to_s}")
|
|
204
|
+
response_hash = send_request(uri, content_type: "json", method: "get")
|
|
205
|
+
|
|
206
|
+
if response_hash.key?(:data)
|
|
207
|
+
response_hash.dig(:data).map do |hash|
|
|
208
|
+
# Transform values to allow expected symbols that non-proxied request returns
|
|
209
|
+
|
|
210
|
+
hash.transform_values! do |value|
|
|
211
|
+
if hash.key(value) == :type
|
|
212
|
+
value.to_sym
|
|
213
|
+
else
|
|
214
|
+
value
|
|
215
|
+
end
|
|
216
|
+
end
|
|
217
|
+
end
|
|
218
|
+
response_hash
|
|
219
|
+
end
|
|
220
|
+
# Convert to Struct to allow same interface as non-proxied request
|
|
221
|
+
create_deep_struct(response_hash)
|
|
222
|
+
else
|
|
223
|
+
client.conversations.items.list(conversation_id, order: order)
|
|
224
|
+
end
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
def verbose
|
|
228
|
+
page = items
|
|
229
|
+
|
|
230
|
+
box_width = 78
|
|
231
|
+
inner_width = box_width - 4
|
|
232
|
+
|
|
233
|
+
puts
|
|
234
|
+
puts "┌#{"─" * (box_width - 2)}┐"
|
|
235
|
+
puts "│ Conversation: #{conversation_id.ljust(inner_width - 14)} │"
|
|
236
|
+
puts "│ Items: #{page.data.length.to_s.ljust(inner_width - 7)} │"
|
|
237
|
+
puts "└#{"─" * (box_width - 2)}┘"
|
|
238
|
+
puts
|
|
239
|
+
|
|
240
|
+
ap page.data, limit: 10, indent: 2
|
|
241
|
+
end
|
|
242
|
+
|
|
175
243
|
def inspect
|
|
176
244
|
"#<#{self.class.name} @messages=#{messages.inspect} @model=#{@model.inspect} @schema=#{@schema.inspect} @reasoning_effort=#{@reasoning_effort.inspect}>"
|
|
177
245
|
end
|
|
178
246
|
|
|
179
247
|
# Support for Ruby's pp (pretty print)
|
|
248
|
+
# :reek:TooManyStatements
|
|
249
|
+
# :reek:NilCheck
|
|
250
|
+
# :reek:FeatureEnvy
|
|
251
|
+
# :reek:DuplicateMethodCall
|
|
252
|
+
# :reek:UncommunicativeParameterName
|
|
180
253
|
def pretty_print(q)
|
|
181
|
-
q.group(1, "#<#{self.class}",
|
|
254
|
+
q.group(1, "#<#{self.class}", ">") do
|
|
182
255
|
q.breakable
|
|
183
|
-
|
|
256
|
+
|
|
184
257
|
# Show messages with truncation
|
|
185
258
|
q.text "@messages="
|
|
186
259
|
truncated_messages = @messages.map do |msg|
|
|
@@ -191,7 +264,7 @@ module AI
|
|
|
191
264
|
truncated_msg
|
|
192
265
|
end
|
|
193
266
|
q.pp truncated_messages
|
|
194
|
-
|
|
267
|
+
|
|
195
268
|
# Show other instance variables (except sensitive ones)
|
|
196
269
|
skip_vars = [:@messages, :@api_key, :@client]
|
|
197
270
|
instance_variables.sort.each do |var|
|
|
@@ -207,10 +280,10 @@ module AI
|
|
|
207
280
|
end
|
|
208
281
|
end
|
|
209
282
|
|
|
210
|
-
|
|
211
283
|
private
|
|
212
284
|
|
|
213
285
|
class InputClassificationError < StandardError; end
|
|
286
|
+
class WrongAPITokenUsedError < StandardError; end
|
|
214
287
|
|
|
215
288
|
# :reek:FeatureEnvy
|
|
216
289
|
# :reek:ManualDispatch
|
|
@@ -225,21 +298,128 @@ module AI
|
|
|
225
298
|
end
|
|
226
299
|
end
|
|
227
300
|
|
|
301
|
+
def create_conversation
|
|
302
|
+
self.conversation_id = if proxy
|
|
303
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/conversations")
|
|
304
|
+
response = send_request(uri, content_type: "json", method: "post")
|
|
305
|
+
response.dig(:id)
|
|
306
|
+
else
|
|
307
|
+
conversation = client.conversations.create
|
|
308
|
+
conversation.id
|
|
309
|
+
end
|
|
310
|
+
end
|
|
311
|
+
|
|
228
312
|
# :reek:TooManyStatements
|
|
229
313
|
def create_response
|
|
230
314
|
parameters = {
|
|
231
315
|
model: model
|
|
232
316
|
}
|
|
233
317
|
|
|
318
|
+
parameters[:background] = background if background
|
|
234
319
|
parameters[:tools] = tools unless tools.empty?
|
|
235
320
|
parameters[:text] = schema if schema
|
|
236
321
|
parameters[:reasoning] = {effort: reasoning_effort} if reasoning_effort
|
|
237
|
-
|
|
322
|
+
|
|
323
|
+
if previous_response_id && conversation_id
|
|
324
|
+
warn "Both conversation_id and previous_response_id are set. Using previous_response_id for forking. Only set one."
|
|
325
|
+
parameters[:previous_response_id] = previous_response_id
|
|
326
|
+
elsif previous_response_id
|
|
327
|
+
parameters[:previous_response_id] = previous_response_id
|
|
328
|
+
elsif conversation_id
|
|
329
|
+
parameters[:conversation] = conversation_id
|
|
330
|
+
else
|
|
331
|
+
create_conversation
|
|
332
|
+
end
|
|
238
333
|
|
|
239
334
|
messages_to_send = prepare_messages_for_api
|
|
240
335
|
parameters[:input] = strip_responses(messages_to_send) unless messages_to_send.empty?
|
|
241
336
|
|
|
242
|
-
|
|
337
|
+
if proxy
|
|
338
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/responses")
|
|
339
|
+
send_request(uri, content_type: "json", parameters: parameters, method: "post")
|
|
340
|
+
else
|
|
341
|
+
client.responses.create(**parameters)
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
# :reek:NilCheck
|
|
346
|
+
# :reek:TooManyStatements
|
|
347
|
+
def parse_response(response)
|
|
348
|
+
if proxy && response.is_a?(Hash)
|
|
349
|
+
response_messages = response.dig(:output).select do |output|
|
|
350
|
+
output.dig(:type) == "message"
|
|
351
|
+
end
|
|
352
|
+
|
|
353
|
+
message_contents = response_messages.map do |message|
|
|
354
|
+
message.dig(:content)
|
|
355
|
+
end.flatten
|
|
356
|
+
|
|
357
|
+
output_texts = message_contents.select do |content|
|
|
358
|
+
content[:type] == "output_text"
|
|
359
|
+
end
|
|
360
|
+
|
|
361
|
+
text_response = output_texts.map { |output| output[:text] }.join
|
|
362
|
+
response_id = response.dig(:id)
|
|
363
|
+
response_status = response.dig(:status).to_sym
|
|
364
|
+
response_model = response.dig(:model)
|
|
365
|
+
response_usage = response.dig(:usage)&.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
366
|
+
|
|
367
|
+
if response.key?(:conversation)
|
|
368
|
+
self.conversation_id = response.dig(:conversation, :id)
|
|
369
|
+
end
|
|
370
|
+
else
|
|
371
|
+
text_response = response.output_text
|
|
372
|
+
response_id = response.id
|
|
373
|
+
response_status = response.status
|
|
374
|
+
response_model = response.model
|
|
375
|
+
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
376
|
+
|
|
377
|
+
if response.conversation
|
|
378
|
+
self.conversation_id = response.conversation.id
|
|
379
|
+
end
|
|
380
|
+
end
|
|
381
|
+
image_filenames = extract_and_save_images(response) + extract_and_save_files(response)
|
|
382
|
+
|
|
383
|
+
chat_response = {
|
|
384
|
+
id: response_id,
|
|
385
|
+
model: response_model,
|
|
386
|
+
usage: response_usage || {},
|
|
387
|
+
total_tokens: response_usage&.fetch(:total_tokens, 0),
|
|
388
|
+
images: image_filenames
|
|
389
|
+
}.compact
|
|
390
|
+
|
|
391
|
+
response_content = if schema
|
|
392
|
+
if text_response.nil? || text_response.empty?
|
|
393
|
+
raise ArgumentError, "No text content in response to parse as JSON for schema: #{schema.inspect}"
|
|
394
|
+
end
|
|
395
|
+
JSON.parse(text_response, symbolize_names: true)
|
|
396
|
+
else
|
|
397
|
+
text_response
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
existing_message_position = messages.find_index do |message|
|
|
401
|
+
message.dig(:response, :id) == response_id
|
|
402
|
+
end
|
|
403
|
+
|
|
404
|
+
message = {
|
|
405
|
+
role: "assistant",
|
|
406
|
+
content: response_content,
|
|
407
|
+
response: chat_response,
|
|
408
|
+
status: response_status
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
message.store(:images, image_filenames) unless image_filenames.empty?
|
|
412
|
+
|
|
413
|
+
if existing_message_position
|
|
414
|
+
messages[existing_message_position] = message
|
|
415
|
+
else
|
|
416
|
+
messages.push(message)
|
|
417
|
+
message
|
|
418
|
+
end
|
|
419
|
+
end
|
|
420
|
+
|
|
421
|
+
def cancel_request
|
|
422
|
+
client.responses.cancel(previous_response_id)
|
|
243
423
|
end
|
|
244
424
|
|
|
245
425
|
def prepare_messages_for_api
|
|
@@ -389,19 +569,12 @@ module AI
|
|
|
389
569
|
if image_generation
|
|
390
570
|
tools_list << {type: "image_generation"}
|
|
391
571
|
end
|
|
572
|
+
if code_interpreter
|
|
573
|
+
tools_list << {type: "code_interpreter", container: {type: "auto"}}
|
|
574
|
+
end
|
|
392
575
|
tools_list
|
|
393
576
|
end
|
|
394
577
|
|
|
395
|
-
# :reek:UtilityFunction
|
|
396
|
-
# :reek:ManualDispatch
|
|
397
|
-
def extract_text_from_response(response)
|
|
398
|
-
response.output.flat_map { |output|
|
|
399
|
-
output.respond_to?(:content) ? output.content : []
|
|
400
|
-
}.compact.find { |content|
|
|
401
|
-
content.is_a?(OpenAI::Models::Responses::ResponseOutputText)
|
|
402
|
-
}&.text
|
|
403
|
-
end
|
|
404
|
-
|
|
405
578
|
# :reek:FeatureEnvy
|
|
406
579
|
# :reek:UtilityFunction
|
|
407
580
|
def wrap_schema_if_needed(schema)
|
|
@@ -432,37 +605,235 @@ module AI
|
|
|
432
605
|
def extract_and_save_images(response)
|
|
433
606
|
image_filenames = []
|
|
434
607
|
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
608
|
+
if proxy
|
|
609
|
+
image_outputs = response.dig(:output).select { |output|
|
|
610
|
+
output.dig(:type) == "image_generation_call"
|
|
611
|
+
}
|
|
612
|
+
else
|
|
613
|
+
image_outputs = response.output.select { |output|
|
|
614
|
+
output.respond_to?(:type) && output.type == :image_generation_call
|
|
615
|
+
}
|
|
616
|
+
end
|
|
438
617
|
|
|
439
618
|
return image_filenames if image_outputs.empty?
|
|
440
619
|
|
|
620
|
+
response_id = proxy ? response.dig(:id) : response.id
|
|
621
|
+
subfolder_path = create_images_folder(response_id)
|
|
622
|
+
|
|
623
|
+
image_outputs.each_with_index do |output, index|
|
|
624
|
+
if proxy
|
|
625
|
+
next unless output.key?(:result) && output.dig(:result)
|
|
626
|
+
else
|
|
627
|
+
next unless output.respond_to?(:result) && output.result
|
|
628
|
+
end
|
|
629
|
+
|
|
630
|
+
warn_if_file_fails_to_save do
|
|
631
|
+
result = proxy ? output.dig(:result) : output.result
|
|
632
|
+
image_data = Base64.strict_decode64(result)
|
|
633
|
+
|
|
634
|
+
filename = "#{(index + 1).to_s.rjust(3, "0")}.png"
|
|
635
|
+
file_path = File.join(subfolder_path, filename)
|
|
636
|
+
|
|
637
|
+
File.binwrite(file_path, image_data)
|
|
638
|
+
|
|
639
|
+
image_filenames << file_path
|
|
640
|
+
end
|
|
641
|
+
end
|
|
642
|
+
|
|
643
|
+
image_filenames
|
|
644
|
+
end
|
|
645
|
+
|
|
646
|
+
def create_images_folder(response_id)
|
|
441
647
|
# ISO 8601 basic format with centisecond precision
|
|
442
648
|
timestamp = Time.now.strftime("%Y%m%dT%H%M%S%2N")
|
|
443
649
|
|
|
444
|
-
subfolder_name = "#{timestamp}_#{
|
|
650
|
+
subfolder_name = "#{timestamp}_#{response_id}"
|
|
445
651
|
subfolder_path = File.join(image_folder || "./images", subfolder_name)
|
|
446
652
|
FileUtils.mkdir_p(subfolder_path)
|
|
653
|
+
subfolder_path
|
|
654
|
+
end
|
|
447
655
|
|
|
448
|
-
|
|
449
|
-
|
|
656
|
+
def warn_if_file_fails_to_save
|
|
657
|
+
yield
|
|
658
|
+
rescue => error
|
|
659
|
+
warn "Failed to save image: #{error.message}"
|
|
660
|
+
end
|
|
450
661
|
|
|
451
|
-
|
|
452
|
-
|
|
662
|
+
def validate_api_key
|
|
663
|
+
openai_api_key_used = @api_key.start_with?("sk-proj")
|
|
664
|
+
proxy_api_key_used = !openai_api_key_used
|
|
665
|
+
proxy_enabled = proxy
|
|
666
|
+
proxy_disabled = !proxy
|
|
667
|
+
|
|
668
|
+
if openai_api_key_used && proxy_enabled
|
|
669
|
+
raise WrongAPITokenUsedError, <<~STRING
|
|
670
|
+
It looks like you're using an official API key from OpenAI with proxying enabled. When proxying is enabled you must use an OpenAI API key from prepend.me. Please disable proxy or update your API key before generating a response.
|
|
671
|
+
STRING
|
|
672
|
+
elsif proxy_api_key_used && proxy_disabled
|
|
673
|
+
raise WrongAPITokenUsedError, <<~STRING
|
|
674
|
+
It looks like you're using an unofficial OpenAI API key from prepend.me. When using an unofficial API key you must enable proxy before generating a response. Proxying is currently disabled, please enable it before generating a response.
|
|
675
|
+
|
|
676
|
+
Example:
|
|
677
|
+
|
|
678
|
+
chat = AI::Chat.new
|
|
679
|
+
chat.proxy = true
|
|
680
|
+
chat.user(...)
|
|
681
|
+
chat.generate!
|
|
682
|
+
STRING
|
|
683
|
+
end
|
|
684
|
+
end
|
|
453
685
|
|
|
454
|
-
|
|
455
|
-
|
|
686
|
+
# :reek:FeatureEnvy
|
|
687
|
+
# :reek:ManualDispatch
|
|
688
|
+
# :reek:NestedIterators
|
|
689
|
+
# :reek:TooManyStatements
|
|
690
|
+
def extract_and_save_files(response)
|
|
691
|
+
filenames = []
|
|
692
|
+
|
|
693
|
+
if proxy
|
|
694
|
+
message_outputs = response.dig(:output).select do |output|
|
|
695
|
+
output.dig(:type) == "message"
|
|
696
|
+
end
|
|
697
|
+
|
|
698
|
+
outputs_with_annotations = message_outputs.map do |message|
|
|
699
|
+
message.dig(:content).find do |content|
|
|
700
|
+
content.dig(:annotations).length.positive?
|
|
701
|
+
end
|
|
702
|
+
end.compact
|
|
703
|
+
else
|
|
704
|
+
message_outputs = response.output.select do |output|
|
|
705
|
+
output.respond_to?(:type) && output.type == :message
|
|
706
|
+
end
|
|
707
|
+
|
|
708
|
+
outputs_with_annotations = message_outputs.map do |message|
|
|
709
|
+
message.content.find do |content|
|
|
710
|
+
content.respond_to?(:annotations) && content.annotations.length.positive?
|
|
711
|
+
end
|
|
712
|
+
end.compact
|
|
713
|
+
end
|
|
456
714
|
|
|
457
|
-
|
|
715
|
+
return filenames if outputs_with_annotations.empty?
|
|
458
716
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
717
|
+
response_id = proxy ? response.dig(:id) : response.id
|
|
718
|
+
subfolder_path = create_images_folder(response_id)
|
|
719
|
+
|
|
720
|
+
if proxy
|
|
721
|
+
annotations = outputs_with_annotations.map do |output|
|
|
722
|
+
output.dig(:annotations).find do |annotation|
|
|
723
|
+
annotation.key?(:filename)
|
|
724
|
+
end
|
|
725
|
+
end.compact
|
|
726
|
+
|
|
727
|
+
annotations.each do |annotation|
|
|
728
|
+
container_id = annotation.dig(:container_id)
|
|
729
|
+
file_id = annotation.dig(:file_id)
|
|
730
|
+
filename = annotation.dig(:filename)
|
|
731
|
+
|
|
732
|
+
warn_if_file_fails_to_save do
|
|
733
|
+
file_content = retrieve_file(file_id, container_id: container_id)
|
|
734
|
+
file_path = File.join(subfolder_path, filename)
|
|
735
|
+
File.binwrite(file_path, file_content)
|
|
736
|
+
filenames << file_path
|
|
737
|
+
end
|
|
738
|
+
end
|
|
739
|
+
else
|
|
740
|
+
annotations = outputs_with_annotations.map do |output|
|
|
741
|
+
output.annotations.find do |annotation|
|
|
742
|
+
annotation.respond_to?(:filename)
|
|
743
|
+
end
|
|
744
|
+
end.compact
|
|
745
|
+
|
|
746
|
+
annotations.each do |annotation|
|
|
747
|
+
container_id = annotation.container_id
|
|
748
|
+
file_id = annotation.file_id
|
|
749
|
+
filename = annotation.filename
|
|
750
|
+
|
|
751
|
+
warn_if_file_fails_to_save do
|
|
752
|
+
file_content = retrieve_file(file_id, container_id: container_id)
|
|
753
|
+
file_path = File.join(subfolder_path, filename)
|
|
754
|
+
File.open(file_path, "wb") do |file|
|
|
755
|
+
file.write(file_content.read)
|
|
756
|
+
end
|
|
757
|
+
filenames << file_path
|
|
758
|
+
end
|
|
462
759
|
end
|
|
463
760
|
end
|
|
761
|
+
filenames
|
|
762
|
+
end
|
|
464
763
|
|
|
465
|
-
|
|
764
|
+
# This is similar to ActiveJob's :polynomially_longer retry option
|
|
765
|
+
# :reek:DuplicateMethodCall
|
|
766
|
+
# :reek:UtilityFunction
|
|
767
|
+
def calculate_wait(executions)
|
|
768
|
+
# cap the maximum wait time to ~110 seconds
|
|
769
|
+
executions = executions.clamp(1..10)
|
|
770
|
+
jitter = 0.15
|
|
771
|
+
((executions**2) + (Kernel.rand * (executions**2) * jitter)) + 2
|
|
772
|
+
end
|
|
773
|
+
|
|
774
|
+
def timeout_request(duration)
|
|
775
|
+
Timeout.timeout(duration) do
|
|
776
|
+
yield
|
|
777
|
+
end
|
|
778
|
+
rescue Timeout::Error
|
|
779
|
+
client.responses.cancel(previous_response_id)
|
|
780
|
+
end
|
|
781
|
+
|
|
782
|
+
# :reek:DuplicateMethodCall
|
|
783
|
+
# :reek:TooManyStatements
|
|
784
|
+
def wait_for_response(timeout)
|
|
785
|
+
spinner = TTY::Spinner.new("[:spinner] Thinking ...", format: :dots)
|
|
786
|
+
spinner.auto_spin
|
|
787
|
+
api_response = retrieve_response(previous_response_id)
|
|
788
|
+
number_of_times_polled = 0
|
|
789
|
+
response = timeout_request(timeout) do
|
|
790
|
+
status = if api_response.respond_to?(:status)
|
|
791
|
+
api_response.status
|
|
792
|
+
else
|
|
793
|
+
api_response.dig(:status)&.to_sym
|
|
794
|
+
end
|
|
795
|
+
|
|
796
|
+
while status != :completed
|
|
797
|
+
some_amount_of_seconds = calculate_wait(number_of_times_polled)
|
|
798
|
+
sleep some_amount_of_seconds
|
|
799
|
+
number_of_times_polled += 1
|
|
800
|
+
api_response = retrieve_response(previous_response_id)
|
|
801
|
+
status = if api_response.respond_to?(:status)
|
|
802
|
+
api_response.status
|
|
803
|
+
else
|
|
804
|
+
api_response.dig(:status)&.to_sym
|
|
805
|
+
end
|
|
806
|
+
end
|
|
807
|
+
api_response
|
|
808
|
+
end
|
|
809
|
+
|
|
810
|
+
status = if api_response.respond_to?(:status)
|
|
811
|
+
api_response.status
|
|
812
|
+
else
|
|
813
|
+
api_response.dig(:status).to_sym
|
|
814
|
+
end
|
|
815
|
+
exit_message = status == :cancelled ? "request timed out" : "done!"
|
|
816
|
+
spinner.stop(exit_message)
|
|
817
|
+
response
|
|
818
|
+
end
|
|
819
|
+
|
|
820
|
+
def retrieve_response(previous_response_id)
|
|
821
|
+
if proxy
|
|
822
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/responses/#{previous_response_id}")
|
|
823
|
+
send_request(uri, content_type: "json", method: "get")
|
|
824
|
+
else
|
|
825
|
+
client.responses.retrieve(previous_response_id)
|
|
826
|
+
end
|
|
827
|
+
end
|
|
828
|
+
|
|
829
|
+
def retrieve_file(file_id, container_id: nil)
|
|
830
|
+
if proxy
|
|
831
|
+
uri = URI(PROXY_URL + "api.openai.com/v1/containers/#{container_id}/files/#{file_id}/content")
|
|
832
|
+
send_request(uri, method: "get")
|
|
833
|
+
else
|
|
834
|
+
container_content = client.containers.files.content
|
|
835
|
+
file_content = container_content.retrieve(file_id, container_id: container_id)
|
|
836
|
+
end
|
|
466
837
|
end
|
|
467
838
|
end
|
|
468
839
|
end
|