ai-chat 0.4.0 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +220 -141
- data/ai-chat.gemspec +3 -3
- data/lib/ai/amazing_print.rb +25 -26
- data/lib/ai/chat.rb +145 -271
- data/lib/ai/items.rb +54 -0
- data/lib/ai/message.rb +23 -0
- data/lib/ai-chat.rb +11 -0
- metadata +8 -5
- data/lib/ai/http.rb +0 -45
data/lib/ai/chat.rb
CHANGED
|
@@ -11,9 +11,6 @@ require "fileutils"
|
|
|
11
11
|
require "tty-spinner"
|
|
12
12
|
require "timeout"
|
|
13
13
|
|
|
14
|
-
require_relative "http"
|
|
15
|
-
include AI::Http
|
|
16
|
-
|
|
17
14
|
module AI
|
|
18
15
|
# :reek:MissingSafeMethod { exclude: [ generate! ] }
|
|
19
16
|
# :reek:TooManyMethods
|
|
@@ -22,21 +19,22 @@ module AI
|
|
|
22
19
|
# :reek:IrresponsibleModule
|
|
23
20
|
class Chat
|
|
24
21
|
# :reek:Attribute
|
|
25
|
-
attr_accessor :background, :code_interpreter, :conversation_id, :image_generation, :image_folder, :messages, :model, :
|
|
26
|
-
attr_reader :client, :last_response_id, :schema, :schema_file
|
|
22
|
+
attr_accessor :background, :code_interpreter, :conversation_id, :image_generation, :image_folder, :messages, :model, :reasoning_effort, :web_search
|
|
23
|
+
attr_reader :client, :last_response_id, :proxy, :schema, :schema_file
|
|
27
24
|
|
|
28
|
-
|
|
25
|
+
BASE_PROXY_URL = "https://prepend.me/api.openai.com/v1"
|
|
29
26
|
|
|
30
27
|
def initialize(api_key: nil, api_key_env_var: "OPENAI_API_KEY")
|
|
31
28
|
@api_key = api_key || ENV.fetch(api_key_env_var)
|
|
29
|
+
@proxy = false
|
|
32
30
|
@messages = []
|
|
33
31
|
@reasoning_effort = nil
|
|
34
|
-
@model = "gpt-5.
|
|
32
|
+
@model = "gpt-5.2"
|
|
35
33
|
@client = OpenAI::Client.new(api_key: @api_key)
|
|
36
34
|
@last_response_id = nil
|
|
37
|
-
@proxy = false
|
|
38
35
|
@image_generation = false
|
|
39
36
|
@image_folder = "./images"
|
|
37
|
+
@api_key_validated = false
|
|
40
38
|
end
|
|
41
39
|
|
|
42
40
|
def self.generate_schema!(description, location: "schema.json", api_key: nil, api_key_env_var: "OPENAI_API_KEY", proxy: false)
|
|
@@ -44,34 +42,25 @@ module AI
|
|
|
44
42
|
prompt_path = File.expand_path("../prompts/schema_generator.md", __dir__)
|
|
45
43
|
system_prompt = File.read(prompt_path)
|
|
46
44
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
input: [
|
|
52
|
-
{role: :system, content: system_prompt},
|
|
53
|
-
{role: :user, content: description}
|
|
54
|
-
],
|
|
55
|
-
text: {format: {type: "json_object"}},
|
|
56
|
-
reasoning: {effort: "high"}
|
|
57
|
-
}
|
|
45
|
+
options = {
|
|
46
|
+
api_key: api_key,
|
|
47
|
+
base_url: proxy ? BASE_PROXY_URL : nil
|
|
48
|
+
}.compact
|
|
58
49
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
50
|
+
client = OpenAI::Client.new(**options)
|
|
51
|
+
response = client.responses.create(
|
|
52
|
+
model: "gpt-5.2",
|
|
53
|
+
input: [
|
|
54
|
+
{role: :system, content: system_prompt},
|
|
55
|
+
{role: :user, content: description}
|
|
56
|
+
],
|
|
57
|
+
text: {format: {type: "json_object"}},
|
|
58
|
+
reasoning: {effort: "high"}
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
output_text = response.output_text
|
|
62
|
+
json = JSON.parse(output_text)
|
|
71
63
|
|
|
72
|
-
output_text = response.output_text
|
|
73
|
-
JSON.parse(output_text)
|
|
74
|
-
end
|
|
75
64
|
content = JSON.pretty_generate(json)
|
|
76
65
|
if location
|
|
77
66
|
path = Pathname.new(location)
|
|
@@ -84,15 +73,12 @@ module AI
|
|
|
84
73
|
# :reek:TooManyStatements
|
|
85
74
|
# :reek:NilCheck
|
|
86
75
|
def add(content, role: "user", response: nil, status: nil, image: nil, images: nil, file: nil, files: nil)
|
|
87
|
-
if image.nil? && images.nil? && file.nil? && files.nil?
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
message[:content] = content if content
|
|
94
|
-
message[:status] = status if status
|
|
95
|
-
messages.push(message)
|
|
76
|
+
message = if image.nil? && images.nil? && file.nil? && files.nil?
|
|
77
|
+
msg = Message[role: role]
|
|
78
|
+
msg[:content] = content if content
|
|
79
|
+
msg[:response] = response if response
|
|
80
|
+
msg[:status] = status if status
|
|
81
|
+
msg
|
|
96
82
|
else
|
|
97
83
|
text_and_files_array = [
|
|
98
84
|
{
|
|
@@ -122,14 +108,15 @@ module AI
|
|
|
122
108
|
text_and_files_array.push(process_file_input(file))
|
|
123
109
|
end
|
|
124
110
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
}
|
|
131
|
-
)
|
|
111
|
+
Message[
|
|
112
|
+
role: role,
|
|
113
|
+
content: text_and_files_array,
|
|
114
|
+
status: status
|
|
115
|
+
]
|
|
132
116
|
end
|
|
117
|
+
|
|
118
|
+
messages.push(message)
|
|
119
|
+
message
|
|
133
120
|
end
|
|
134
121
|
|
|
135
122
|
def system(message)
|
|
@@ -147,7 +134,7 @@ module AI
|
|
|
147
134
|
# :reek:NilCheck
|
|
148
135
|
# :reek:TooManyStatements
|
|
149
136
|
def generate!
|
|
150
|
-
validate_api_key
|
|
137
|
+
validate_api_key unless @api_key_validated
|
|
151
138
|
response = create_response
|
|
152
139
|
parse_response(response)
|
|
153
140
|
|
|
@@ -168,6 +155,19 @@ module AI
|
|
|
168
155
|
parse_response(response)
|
|
169
156
|
end
|
|
170
157
|
|
|
158
|
+
def proxy=(value)
|
|
159
|
+
@proxy = value
|
|
160
|
+
if value
|
|
161
|
+
@client = OpenAI::Client.new(
|
|
162
|
+
api_key: @api_key,
|
|
163
|
+
base_url: BASE_PROXY_URL
|
|
164
|
+
)
|
|
165
|
+
else
|
|
166
|
+
@client = OpenAI::Client.new(api_key: @api_key)
|
|
167
|
+
end
|
|
168
|
+
value
|
|
169
|
+
end
|
|
170
|
+
|
|
171
171
|
def schema=(value)
|
|
172
172
|
if value.is_a?(String)
|
|
173
173
|
parsed = JSON.parse(value, symbolize_names: true)
|
|
@@ -189,88 +189,54 @@ module AI
|
|
|
189
189
|
messages.last
|
|
190
190
|
end
|
|
191
191
|
|
|
192
|
-
def
|
|
192
|
+
def get_items(order: :asc)
|
|
193
193
|
raise "No conversation_id set. Call generate! first to create a conversation." unless conversation_id
|
|
194
194
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
if response_hash.key?(:data)
|
|
200
|
-
response_hash.dig(:data).map do |hash|
|
|
201
|
-
# Transform values to allow expected symbols that non-proxied request returns
|
|
202
|
-
|
|
203
|
-
hash.transform_values! do |value|
|
|
204
|
-
if hash.key(value) == :type
|
|
205
|
-
value.to_sym
|
|
206
|
-
else
|
|
207
|
-
value
|
|
208
|
-
end
|
|
209
|
-
end
|
|
210
|
-
end
|
|
211
|
-
response_hash
|
|
212
|
-
end
|
|
213
|
-
# Convert to Struct to allow same interface as non-proxied request
|
|
214
|
-
create_deep_struct(response_hash)
|
|
215
|
-
else
|
|
216
|
-
client.conversations.items.list(conversation_id, order: order)
|
|
217
|
-
end
|
|
195
|
+
raw_items = client.conversations.items.list(conversation_id, order: order)
|
|
196
|
+
|
|
197
|
+
Items.new(raw_items, conversation_id: conversation_id)
|
|
218
198
|
end
|
|
219
199
|
|
|
220
|
-
def
|
|
221
|
-
|
|
200
|
+
def inspectable_attributes
|
|
201
|
+
attrs = []
|
|
222
202
|
|
|
223
|
-
|
|
224
|
-
|
|
203
|
+
# 1. Model and reasoning (configuration)
|
|
204
|
+
attrs << [:@model, @model]
|
|
205
|
+
attrs << [:@reasoning_effort, @reasoning_effort]
|
|
225
206
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
puts "│ Items: #{page.data.length.to_s.ljust(inner_width - 7)} │"
|
|
230
|
-
puts "└#{"─" * (box_width - 2)}┘"
|
|
231
|
-
puts
|
|
207
|
+
# 2. Conversation state
|
|
208
|
+
attrs << [:@conversation_id, @conversation_id]
|
|
209
|
+
attrs << [:@last_response_id, @last_response_id] if @last_response_id
|
|
232
210
|
|
|
233
|
-
|
|
211
|
+
# 3. Messages (the main content, without response details)
|
|
212
|
+
display_messages = @messages.map { |msg| msg.except(:response) }
|
|
213
|
+
attrs << [:@messages, display_messages]
|
|
214
|
+
|
|
215
|
+
# 4. Optional features (only if enabled/changed from default)
|
|
216
|
+
attrs << [:@proxy, @proxy] if @proxy != false
|
|
217
|
+
attrs << [:@image_generation, @image_generation] if @image_generation != false
|
|
218
|
+
attrs << [:@image_folder, @image_folder] if @image_folder != "./images"
|
|
219
|
+
|
|
220
|
+
# 5. Optional state (only if set)
|
|
221
|
+
attrs << [:@background, @background] if @background
|
|
222
|
+
attrs << [:@code_interpreter, @code_interpreter] if @code_interpreter
|
|
223
|
+
attrs << [:@web_search, @web_search] if @web_search
|
|
224
|
+
attrs << [:@schema, @schema] if @schema
|
|
225
|
+
attrs << [:@schema_file, @schema_file] if @schema_file
|
|
226
|
+
|
|
227
|
+
attrs
|
|
234
228
|
end
|
|
235
229
|
|
|
236
230
|
def inspect
|
|
237
|
-
|
|
231
|
+
ai(plain: !$stdout.tty?, multiline: true)
|
|
238
232
|
end
|
|
239
233
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
def pretty_print(q)
|
|
247
|
-
q.group(1, "#<#{self.class}", ">") do
|
|
248
|
-
q.breakable
|
|
249
|
-
|
|
250
|
-
# Show messages with truncation
|
|
251
|
-
q.text "@messages="
|
|
252
|
-
truncated_messages = @messages.map do |msg|
|
|
253
|
-
truncated_msg = msg.dup
|
|
254
|
-
if msg[:content].is_a?(String) && msg[:content].length > 80
|
|
255
|
-
truncated_msg[:content] = msg[:content][0..77] + "..."
|
|
256
|
-
end
|
|
257
|
-
truncated_msg
|
|
258
|
-
end
|
|
259
|
-
q.pp truncated_messages
|
|
260
|
-
|
|
261
|
-
# Show other instance variables (except sensitive ones)
|
|
262
|
-
skip_vars = [:@messages, :@api_key, :@client]
|
|
263
|
-
instance_variables.sort.each do |var|
|
|
264
|
-
next if skip_vars.include?(var)
|
|
265
|
-
value = instance_variable_get(var)
|
|
266
|
-
unless value.nil?
|
|
267
|
-
q.text ","
|
|
268
|
-
q.breakable
|
|
269
|
-
q.text "#{var}="
|
|
270
|
-
q.pp value
|
|
271
|
-
end
|
|
272
|
-
end
|
|
273
|
-
end
|
|
234
|
+
def to_html
|
|
235
|
+
AI.wrap_html(ai(html: true, multiline: true))
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
def pretty_inspect
|
|
239
|
+
"#{inspect}\n"
|
|
274
240
|
end
|
|
275
241
|
|
|
276
242
|
private
|
|
@@ -293,14 +259,8 @@ module AI
|
|
|
293
259
|
end
|
|
294
260
|
|
|
295
261
|
def create_conversation
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
response = send_request(uri, content_type: "json", method: "post")
|
|
299
|
-
response.dig(:id)
|
|
300
|
-
else
|
|
301
|
-
conversation = client.conversations.create
|
|
302
|
-
conversation.id
|
|
303
|
-
end
|
|
262
|
+
conversation = client.conversations.create
|
|
263
|
+
self.conversation_id = conversation.id
|
|
304
264
|
end
|
|
305
265
|
|
|
306
266
|
# :reek:TooManyStatements
|
|
@@ -312,7 +272,7 @@ module AI
|
|
|
312
272
|
parameters[:background] = background if background
|
|
313
273
|
parameters[:tools] = tools unless tools.empty?
|
|
314
274
|
parameters[:text] = schema if schema
|
|
315
|
-
parameters[:reasoning] = {effort: reasoning_effort} if reasoning_effort
|
|
275
|
+
parameters[:reasoning] = {effort: reasoning_effort, summary: "auto"} if reasoning_effort
|
|
316
276
|
|
|
317
277
|
create_conversation unless conversation_id
|
|
318
278
|
parameters[:conversation] = conversation_id
|
|
@@ -320,50 +280,22 @@ module AI
|
|
|
320
280
|
messages_to_send = prepare_messages_for_api
|
|
321
281
|
parameters[:input] = strip_responses(messages_to_send) unless messages_to_send.empty?
|
|
322
282
|
|
|
323
|
-
|
|
324
|
-
uri = URI(PROXY_URL + "api.openai.com/v1/responses")
|
|
325
|
-
send_request(uri, content_type: "json", parameters: parameters, method: "post")
|
|
326
|
-
else
|
|
327
|
-
client.responses.create(**parameters)
|
|
328
|
-
end
|
|
283
|
+
client.responses.create(**parameters)
|
|
329
284
|
end
|
|
330
285
|
|
|
331
286
|
# :reek:NilCheck
|
|
332
287
|
# :reek:TooManyStatements
|
|
333
288
|
def parse_response(response)
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
message_contents = response_messages.map do |message|
|
|
340
|
-
message.dig(:content)
|
|
341
|
-
end.flatten
|
|
289
|
+
text_response = response.output_text
|
|
290
|
+
response_id = response.id
|
|
291
|
+
response_status = response.status
|
|
292
|
+
response_model = response.model
|
|
293
|
+
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
342
294
|
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
end
|
|
346
|
-
|
|
347
|
-
text_response = output_texts.map { |output| output[:text] }.join
|
|
348
|
-
response_id = response.dig(:id)
|
|
349
|
-
response_status = response.dig(:status).to_sym
|
|
350
|
-
response_model = response.dig(:model)
|
|
351
|
-
response_usage = response.dig(:usage)&.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
352
|
-
|
|
353
|
-
if response.key?(:conversation)
|
|
354
|
-
self.conversation_id = response.dig(:conversation, :id)
|
|
355
|
-
end
|
|
356
|
-
else
|
|
357
|
-
text_response = response.output_text
|
|
358
|
-
response_id = response.id
|
|
359
|
-
response_status = response.status
|
|
360
|
-
response_model = response.model
|
|
361
|
-
response_usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
|
|
362
|
-
|
|
363
|
-
if response.conversation
|
|
364
|
-
self.conversation_id = response.conversation.id
|
|
365
|
-
end
|
|
295
|
+
if response.conversation
|
|
296
|
+
self.conversation_id = response.conversation.id
|
|
366
297
|
end
|
|
298
|
+
|
|
367
299
|
image_filenames = extract_and_save_images(response) + extract_and_save_files(response)
|
|
368
300
|
|
|
369
301
|
chat_response = {
|
|
@@ -387,12 +319,12 @@ module AI
|
|
|
387
319
|
message.dig(:response, :id) == response_id
|
|
388
320
|
end
|
|
389
321
|
|
|
390
|
-
message =
|
|
322
|
+
message = Message[
|
|
391
323
|
role: "assistant",
|
|
392
324
|
content: response_content,
|
|
393
325
|
response: chat_response,
|
|
394
326
|
status: response_status
|
|
395
|
-
|
|
327
|
+
]
|
|
396
328
|
|
|
397
329
|
message.store(:images, image_filenames) unless image_filenames.empty?
|
|
398
330
|
|
|
@@ -400,8 +332,9 @@ module AI
|
|
|
400
332
|
messages[existing_message_position] = message
|
|
401
333
|
else
|
|
402
334
|
messages.push(message)
|
|
403
|
-
message
|
|
404
335
|
end
|
|
336
|
+
|
|
337
|
+
message
|
|
405
338
|
end
|
|
406
339
|
|
|
407
340
|
def cancel_request
|
|
@@ -591,30 +524,20 @@ module AI
|
|
|
591
524
|
def extract_and_save_images(response)
|
|
592
525
|
image_filenames = []
|
|
593
526
|
|
|
594
|
-
image_outputs =
|
|
595
|
-
response.dig(:output).select { |output|
|
|
596
|
-
output.dig(:type) == "image_generation_call"
|
|
597
|
-
}
|
|
598
|
-
else
|
|
599
|
-
response.output.select { |output|
|
|
527
|
+
image_outputs = response.output.select { |output|
|
|
600
528
|
output.respond_to?(:type) && output.type == :image_generation_call
|
|
601
529
|
}
|
|
602
|
-
end
|
|
603
530
|
|
|
604
531
|
return image_filenames if image_outputs.empty?
|
|
605
532
|
|
|
606
|
-
response_id =
|
|
533
|
+
response_id = response.id
|
|
607
534
|
subfolder_path = create_images_folder(response_id)
|
|
608
535
|
|
|
609
536
|
image_outputs.each_with_index do |output, index|
|
|
610
|
-
|
|
611
|
-
next unless output.key?(:result) && output.dig(:result)
|
|
612
|
-
else
|
|
613
|
-
next unless output.respond_to?(:result) && output.result
|
|
614
|
-
end
|
|
537
|
+
next unless output.respond_to?(:result) && output.result
|
|
615
538
|
|
|
616
539
|
warn_if_file_fails_to_save do
|
|
617
|
-
result =
|
|
540
|
+
result = output.result
|
|
618
541
|
image_data = Base64.strict_decode64(result)
|
|
619
542
|
|
|
620
543
|
filename = "#{(index + 1).to_s.rjust(3, "0")}.png"
|
|
@@ -646,27 +569,20 @@ module AI
|
|
|
646
569
|
end
|
|
647
570
|
|
|
648
571
|
def validate_api_key
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
It looks like you're using an official API key from OpenAI with proxying enabled. When proxying is enabled you must use an OpenAI API key from prepend.me. Please disable proxy or update your API key before generating a response.
|
|
572
|
+
# Simple API call to validate the token
|
|
573
|
+
client.models.list
|
|
574
|
+
@api_key_validated = true
|
|
575
|
+
rescue OpenAI::Errors::AuthenticationError
|
|
576
|
+
message = if proxy
|
|
577
|
+
<<~STRING
|
|
578
|
+
It looks like you're using an invalid API key. Proxying is enabled, so you must use an OpenAI API key from prepend.me. Please disable proxy or update your API key before generating a response.
|
|
657
579
|
STRING
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
It looks like you're using an
|
|
661
|
-
|
|
662
|
-
Example:
|
|
663
|
-
|
|
664
|
-
chat = AI::Chat.new
|
|
665
|
-
chat.proxy = true
|
|
666
|
-
chat.user(...)
|
|
667
|
-
chat.generate!
|
|
580
|
+
else
|
|
581
|
+
<<~STRING
|
|
582
|
+
It looks like you're using an invalid API key. Check to make sure your API key is valid before generating a response.
|
|
668
583
|
STRING
|
|
669
584
|
end
|
|
585
|
+
raise WrongAPITokenUsedError, message, cause: nil
|
|
670
586
|
end
|
|
671
587
|
|
|
672
588
|
# :reek:FeatureEnvy
|
|
@@ -676,72 +592,40 @@ module AI
|
|
|
676
592
|
def extract_and_save_files(response)
|
|
677
593
|
filenames = []
|
|
678
594
|
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
end
|
|
595
|
+
message_outputs = response.output.select do |output|
|
|
596
|
+
output.respond_to?(:type) && output.type == :message
|
|
597
|
+
end
|
|
683
598
|
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
end
|
|
688
|
-
end.compact
|
|
689
|
-
else
|
|
690
|
-
message_outputs = response.output.select do |output|
|
|
691
|
-
output.respond_to?(:type) && output.type == :message
|
|
599
|
+
outputs_with_annotations = message_outputs.map do |message|
|
|
600
|
+
message.content.find do |content|
|
|
601
|
+
content.respond_to?(:annotations) && content.annotations.length.positive?
|
|
692
602
|
end
|
|
693
|
-
|
|
694
|
-
outputs_with_annotations = message_outputs.map do |message|
|
|
695
|
-
message.content.find do |content|
|
|
696
|
-
content.respond_to?(:annotations) && content.annotations.length.positive?
|
|
697
|
-
end
|
|
698
|
-
end.compact
|
|
699
|
-
end
|
|
603
|
+
end.compact
|
|
700
604
|
|
|
701
605
|
return filenames if outputs_with_annotations.empty?
|
|
702
606
|
|
|
703
|
-
response_id =
|
|
607
|
+
response_id = response.id
|
|
704
608
|
subfolder_path = create_images_folder(response_id)
|
|
705
609
|
|
|
706
|
-
|
|
707
|
-
annotations
|
|
708
|
-
|
|
709
|
-
annotation.key?(:filename)
|
|
710
|
-
end
|
|
711
|
-
end.compact
|
|
712
|
-
|
|
713
|
-
annotations.each do |annotation|
|
|
714
|
-
container_id = annotation.dig(:container_id)
|
|
715
|
-
file_id = annotation.dig(:file_id)
|
|
716
|
-
filename = annotation.dig(:filename)
|
|
717
|
-
|
|
718
|
-
warn_if_file_fails_to_save do
|
|
719
|
-
file_content = retrieve_file(file_id, container_id: container_id)
|
|
720
|
-
file_path = File.join(subfolder_path, filename)
|
|
721
|
-
File.binwrite(file_path, file_content)
|
|
722
|
-
filenames << file_path
|
|
723
|
-
end
|
|
610
|
+
annotations = outputs_with_annotations.map do |output|
|
|
611
|
+
output.annotations.find do |annotation|
|
|
612
|
+
annotation.respond_to?(:filename)
|
|
724
613
|
end
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
warn_if_file_fails_to_save do
|
|
738
|
-
file_content = retrieve_file(file_id, container_id: container_id)
|
|
739
|
-
file_path = File.join(subfolder_path, filename)
|
|
740
|
-
File.binwrite(file_path, file_content.read)
|
|
741
|
-
filenames << file_path
|
|
742
|
-
end
|
|
614
|
+
end.compact
|
|
615
|
+
|
|
616
|
+
annotations.each do |annotation|
|
|
617
|
+
container_id = annotation.container_id
|
|
618
|
+
file_id = annotation.file_id
|
|
619
|
+
filename = annotation.filename
|
|
620
|
+
|
|
621
|
+
warn_if_file_fails_to_save do
|
|
622
|
+
file_content = retrieve_file(file_id, container_id: container_id)
|
|
623
|
+
file_path = File.join(subfolder_path, filename)
|
|
624
|
+
File.binwrite(file_path, file_content.read)
|
|
625
|
+
filenames << file_path
|
|
743
626
|
end
|
|
744
627
|
end
|
|
628
|
+
|
|
745
629
|
filenames
|
|
746
630
|
end
|
|
747
631
|
|
|
@@ -802,22 +686,12 @@ module AI
|
|
|
802
686
|
end
|
|
803
687
|
|
|
804
688
|
def retrieve_response(response_id)
|
|
805
|
-
|
|
806
|
-
uri = URI(PROXY_URL + "api.openai.com/v1/responses/#{response_id}")
|
|
807
|
-
send_request(uri, content_type: "json", method: "get")
|
|
808
|
-
else
|
|
809
|
-
client.responses.retrieve(response_id)
|
|
810
|
-
end
|
|
689
|
+
client.responses.retrieve(response_id)
|
|
811
690
|
end
|
|
812
691
|
|
|
813
692
|
def retrieve_file(file_id, container_id: nil)
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
send_request(uri, method: "get")
|
|
817
|
-
else
|
|
818
|
-
container_content = client.containers.files.content
|
|
819
|
-
container_content.retrieve(file_id, container_id: container_id)
|
|
820
|
-
end
|
|
693
|
+
container_content = client.containers.files.content
|
|
694
|
+
container_content.retrieve(file_id, container_id: container_id)
|
|
821
695
|
end
|
|
822
696
|
end
|
|
823
697
|
end
|
data/lib/ai/items.rb
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "delegate"
|
|
4
|
+
|
|
5
|
+
module AI
|
|
6
|
+
class Items < SimpleDelegator
|
|
7
|
+
def initialize(response, conversation_id:)
|
|
8
|
+
super(response)
|
|
9
|
+
@conversation_id = conversation_id
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
def to_html
|
|
13
|
+
AI.wrap_html(build_output(html: true))
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def inspect
|
|
17
|
+
build_output(html: false, plain: !$stdout.tty?)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def pretty_inspect
|
|
21
|
+
"#{inspect}\n"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def pretty_print(q)
|
|
25
|
+
q.output << inspect
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
private
|
|
29
|
+
|
|
30
|
+
def build_output(html: false, plain: false)
|
|
31
|
+
box = build_box
|
|
32
|
+
items_output = data.ai(html: html, plain: plain, limit: 100, indent: 2, index: true)
|
|
33
|
+
|
|
34
|
+
if html
|
|
35
|
+
"<pre>#{box}</pre>\n#{items_output}"
|
|
36
|
+
else
|
|
37
|
+
"#{box}\n#{items_output}"
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def build_box
|
|
42
|
+
box_width = 78
|
|
43
|
+
inner_width = box_width - 4
|
|
44
|
+
|
|
45
|
+
lines = []
|
|
46
|
+
lines << "┌#{"─" * (box_width - 2)}┐"
|
|
47
|
+
lines << "│ Conversation: #{@conversation_id.to_s.ljust(inner_width - 14)} │"
|
|
48
|
+
lines << "│ Items: #{data.length.to_s.ljust(inner_width - 7)} │"
|
|
49
|
+
lines << "└#{"─" * (box_width - 2)}┘"
|
|
50
|
+
|
|
51
|
+
lines.join("\n")
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|
data/lib/ai/message.rb
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module AI
|
|
4
|
+
class Message < Hash
|
|
5
|
+
def inspect
|
|
6
|
+
ai(plain: !$stdout.tty?, index: false)
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
def pretty_inspect
|
|
10
|
+
"#{inspect}\n"
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
# IRB's ColorPrinter calls pretty_print and re-colorizes text,
|
|
14
|
+
# which escapes our ANSI codes. Write directly to output to bypass.
|
|
15
|
+
def pretty_print(q)
|
|
16
|
+
q.output << inspect
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def to_html
|
|
20
|
+
AI.wrap_html(ai(html: true, index: false))
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
end
|
data/lib/ai-chat.rb
CHANGED
|
@@ -1,3 +1,14 @@
|
|
|
1
|
+
module AI
|
|
2
|
+
HTML_PRE_STYLE = "background-color: #1e1e1e; color: #d4d4d4; padding: 1em; white-space: pre-wrap; word-wrap: break-word;"
|
|
3
|
+
|
|
4
|
+
def self.wrap_html(html)
|
|
5
|
+
html = html.gsub("<pre>", "<pre style=\"#{HTML_PRE_STYLE}\">")
|
|
6
|
+
html.respond_to?(:html_safe) ? html.html_safe : html
|
|
7
|
+
end
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
require_relative "ai/message"
|
|
11
|
+
require_relative "ai/items"
|
|
1
12
|
require_relative "ai/chat"
|
|
2
13
|
|
|
3
14
|
# Load amazing_print extension if amazing_print is available
|