rails_ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec_status +96 -0
- data/AGENT_GUIDE.md +513 -0
- data/Appraisals +49 -0
- data/COMMERCIAL_LICENSE_TEMPLATE.md +92 -0
- data/FEATURES.md +204 -0
- data/LEGAL_PROTECTION_GUIDE.md +222 -0
- data/LICENSE +62 -0
- data/LICENSE_SUMMARY.md +74 -0
- data/MIT-LICENSE +62 -0
- data/PERFORMANCE.md +300 -0
- data/PROVIDERS.md +495 -0
- data/README.md +454 -0
- data/Rakefile +11 -0
- data/SPEED_OPTIMIZATIONS.md +217 -0
- data/STRUCTURE.md +139 -0
- data/USAGE_GUIDE.md +288 -0
- data/app/channels/ai_stream_channel.rb +33 -0
- data/app/components/ai/prompt_component.rb +25 -0
- data/app/controllers/concerns/ai/context_aware.rb +77 -0
- data/app/controllers/concerns/ai/streaming.rb +41 -0
- data/app/helpers/ai_helper.rb +164 -0
- data/app/jobs/ai/generate_embedding_job.rb +25 -0
- data/app/jobs/ai/generate_summary_job.rb +25 -0
- data/app/models/concerns/ai/embeddable.rb +38 -0
- data/app/views/rails_ai/dashboard/index.html.erb +51 -0
- data/config/routes.rb +19 -0
- data/lib/generators/rails_ai/install/install_generator.rb +38 -0
- data/lib/rails_ai/agents/agent_manager.rb +258 -0
- data/lib/rails_ai/agents/agent_team.rb +243 -0
- data/lib/rails_ai/agents/base_agent.rb +331 -0
- data/lib/rails_ai/agents/collaboration.rb +238 -0
- data/lib/rails_ai/agents/memory.rb +116 -0
- data/lib/rails_ai/agents/message_bus.rb +95 -0
- data/lib/rails_ai/agents/specialized_agents.rb +391 -0
- data/lib/rails_ai/agents/task_queue.rb +111 -0
- data/lib/rails_ai/cache.rb +14 -0
- data/lib/rails_ai/config.rb +40 -0
- data/lib/rails_ai/context.rb +7 -0
- data/lib/rails_ai/context_analyzer.rb +86 -0
- data/lib/rails_ai/engine.rb +48 -0
- data/lib/rails_ai/events.rb +9 -0
- data/lib/rails_ai/image_context.rb +110 -0
- data/lib/rails_ai/performance.rb +231 -0
- data/lib/rails_ai/provider.rb +8 -0
- data/lib/rails_ai/providers/anthropic_adapter.rb +256 -0
- data/lib/rails_ai/providers/base.rb +60 -0
- data/lib/rails_ai/providers/dummy_adapter.rb +29 -0
- data/lib/rails_ai/providers/gemini_adapter.rb +509 -0
- data/lib/rails_ai/providers/openai_adapter.rb +535 -0
- data/lib/rails_ai/providers/secure_anthropic_adapter.rb +206 -0
- data/lib/rails_ai/providers/secure_openai_adapter.rb +284 -0
- data/lib/rails_ai/railtie.rb +48 -0
- data/lib/rails_ai/redactor.rb +12 -0
- data/lib/rails_ai/security/api_key_manager.rb +82 -0
- data/lib/rails_ai/security/audit_logger.rb +46 -0
- data/lib/rails_ai/security/error_handler.rb +62 -0
- data/lib/rails_ai/security/input_validator.rb +176 -0
- data/lib/rails_ai/security/secure_file_handler.rb +45 -0
- data/lib/rails_ai/security/secure_http_client.rb +177 -0
- data/lib/rails_ai/security.rb +0 -0
- data/lib/rails_ai/version.rb +5 -0
- data/lib/rails_ai/window_context.rb +103 -0
- data/lib/rails_ai.rb +502 -0
- data/monitoring/ci_setup_guide.md +214 -0
- data/monitoring/enhanced_monitoring_script.rb +237 -0
- data/monitoring/google_alerts_setup.md +42 -0
- data/monitoring_log_20250921.txt +0 -0
- data/monitoring_script.rb +161 -0
- data/rails_ai.gemspec +54 -0
- data/scripts/security_scanner.rb +353 -0
- data/setup_monitoring.sh +163 -0
- data/wiki/API-Documentation.md +734 -0
- data/wiki/Architecture-Overview.md +672 -0
- data/wiki/Contributing-Guide.md +407 -0
- data/wiki/Development-Setup.md +532 -0
- data/wiki/Home.md +278 -0
- data/wiki/Installation-Guide.md +527 -0
- data/wiki/Quick-Start.md +186 -0
- data/wiki/README.md +135 -0
- data/wiki/Release-Process.md +467 -0
- metadata +385 -0
@@ -0,0 +1,535 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "net/http"
|
4
|
+
require "json"
|
5
|
+
require "base64"
|
6
|
+
|
7
|
+
module RailsAi
|
8
|
+
module Providers
|
9
|
+
class OpenAIAdapter < Base
|
10
|
+
OPENAI_API_BASE = "https://api.openai.com/v1"
|
11
|
+
|
12
|
+
def initialize
|
13
|
+
@api_key = ENV.fetch("OPENAI_API_KEY")
|
14
|
+
super
|
15
|
+
end
|
16
|
+
|
17
|
+
# Text-based operations
|
18
|
+
def chat!(messages:, model:, **opts)
|
19
|
+
return "(stubbed) #{messages.last[:content]}" if RailsAi.config.stub_responses
|
20
|
+
|
21
|
+
response = make_request(
|
22
|
+
"chat/completions",
|
23
|
+
{
|
24
|
+
model: model,
|
25
|
+
messages: messages,
|
26
|
+
max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
|
27
|
+
temperature: opts[:temperature] || 0.7,
|
28
|
+
top_p: opts[:top_p] || 1.0,
|
29
|
+
frequency_penalty: opts[:frequency_penalty] || 0.0,
|
30
|
+
presence_penalty: opts[:presence_penalty] || 0.0,
|
31
|
+
**opts.except(:max_tokens, :temperature, :top_p, :frequency_penalty, :presence_penalty)
|
32
|
+
}
|
33
|
+
)
|
34
|
+
|
35
|
+
response.dig("choices", 0, "message", "content")
|
36
|
+
end
|
37
|
+
|
38
|
+
def stream_chat!(messages:, model:, **opts, &on_token)
|
39
|
+
return on_token.call("(stubbed stream)") if RailsAi.config.stub_responses
|
40
|
+
|
41
|
+
make_streaming_request(
|
42
|
+
"chat/completions",
|
43
|
+
{
|
44
|
+
model: model,
|
45
|
+
messages: messages,
|
46
|
+
max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
|
47
|
+
temperature: opts[:temperature] || 0.7,
|
48
|
+
top_p: opts[:top_p] || 1.0,
|
49
|
+
frequency_penalty: opts[:frequency_penalty] || 0.0,
|
50
|
+
presence_penalty: opts[:presence_penalty] || 0.0,
|
51
|
+
stream: true,
|
52
|
+
**opts.except(:max_tokens, :temperature, :top_p, :frequency_penalty, :presence_penalty, :stream)
|
53
|
+
}
|
54
|
+
) do |chunk|
|
55
|
+
text = chunk.dig("choices", 0, "delta", "content")
|
56
|
+
on_token.call(text) if text
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def embed!(texts:, model:, **opts)
|
61
|
+
return Array.new(texts.length) { [0.0] * 1536 } if RailsAi.config.stub_responses
|
62
|
+
|
63
|
+
# Handle both single and batch embedding requests
|
64
|
+
if texts.length == 1
|
65
|
+
response = make_request(
|
66
|
+
"embeddings",
|
67
|
+
{
|
68
|
+
model: model,
|
69
|
+
input: texts.first,
|
70
|
+
**opts
|
71
|
+
}
|
72
|
+
)
|
73
|
+
[response.dig("data", 0, "embedding")]
|
74
|
+
else
|
75
|
+
response = make_request(
|
76
|
+
"embeddings",
|
77
|
+
{
|
78
|
+
model: model,
|
79
|
+
input: texts,
|
80
|
+
**opts
|
81
|
+
}
|
82
|
+
)
|
83
|
+
response.dig("data").map { |item| item["embedding"] }
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
# Image generation - DALL-E 3 and DALL-E 2
|
88
|
+
def generate_image!(prompt:, model: "dall-e-3", size: "1024x1024", quality: "standard", **opts)
|
89
|
+
return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
|
90
|
+
|
91
|
+
response = make_request(
|
92
|
+
"images/generations",
|
93
|
+
{
|
94
|
+
model: model,
|
95
|
+
prompt: prompt,
|
96
|
+
size: size,
|
97
|
+
quality: quality,
|
98
|
+
n: opts[:n] || 1,
|
99
|
+
response_format: opts[:response_format] || "url",
|
100
|
+
**opts.except(:n, :response_format)
|
101
|
+
}
|
102
|
+
)
|
103
|
+
|
104
|
+
# Return the first image URL or base64 data
|
105
|
+
image_data = response.dig("data", 0, "url") || response.dig("data", 0, "b64_json")
|
106
|
+
if image_data
|
107
|
+
if image_data.start_with?("http")
|
108
|
+
# Convert URL to base64 for consistency
|
109
|
+
convert_url_to_base64(image_data)
|
110
|
+
else
|
111
|
+
"data:image/png;base64,#{image_data}"
|
112
|
+
end
|
113
|
+
else
|
114
|
+
raise "Image generation failed: No image data in response"
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def edit_image!(image:, prompt:, mask: nil, size: "1024x1024", **opts)
|
119
|
+
return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
|
120
|
+
|
121
|
+
# Prepare form data for image editing
|
122
|
+
form_data = {
|
123
|
+
image: prepare_image_file(image),
|
124
|
+
prompt: prompt,
|
125
|
+
size: size,
|
126
|
+
n: opts[:n] || 1,
|
127
|
+
response_format: opts[:response_format] || "url",
|
128
|
+
**opts.except(:n, :response_format)
|
129
|
+
}
|
130
|
+
|
131
|
+
form_data[:mask] = prepare_image_file(mask) if mask
|
132
|
+
|
133
|
+
response = make_form_request("images/edits", form_data)
|
134
|
+
|
135
|
+
image_data = response.dig("data", 0, "url") || response.dig("data", 0, "b64_json")
|
136
|
+
if image_data
|
137
|
+
if image_data.start_with?("http")
|
138
|
+
convert_url_to_base64(image_data)
|
139
|
+
else
|
140
|
+
"data:image/png;base64,#{image_data}"
|
141
|
+
end
|
142
|
+
else
|
143
|
+
raise "Image editing failed: No image data in response"
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
def create_variation!(image:, size: "1024x1024", **opts)
|
148
|
+
return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
|
149
|
+
|
150
|
+
form_data = {
|
151
|
+
image: prepare_image_file(image),
|
152
|
+
size: size,
|
153
|
+
n: opts[:n] || 1,
|
154
|
+
response_format: opts[:response_format] || "url",
|
155
|
+
**opts.except(:n, :response_format)
|
156
|
+
}
|
157
|
+
|
158
|
+
response = make_form_request("images/variations", form_data)
|
159
|
+
|
160
|
+
image_data = response.dig("data", 0, "url") || response.dig("data", 0, "b64_json")
|
161
|
+
if image_data
|
162
|
+
if image_data.start_with?("http")
|
163
|
+
convert_url_to_base64(image_data)
|
164
|
+
else
|
165
|
+
"data:image/png;base64,#{image_data}"
|
166
|
+
end
|
167
|
+
else
|
168
|
+
raise "Image variation failed: No image data in response"
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
# Video generation - Sora and other video models
|
173
|
+
def generate_video!(prompt:, model: "sora", duration: 5, **opts)
|
174
|
+
return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
|
175
|
+
|
176
|
+
response = make_request(
|
177
|
+
"video/generations",
|
178
|
+
{
|
179
|
+
model: model,
|
180
|
+
prompt: prompt,
|
181
|
+
duration: duration,
|
182
|
+
size: opts[:size] || "1280x720",
|
183
|
+
quality: opts[:quality] || "standard",
|
184
|
+
**opts.except(:size, :quality)
|
185
|
+
}
|
186
|
+
)
|
187
|
+
|
188
|
+
video_data = response.dig("data", 0, "url") || response.dig("data", 0, "b64_json")
|
189
|
+
if video_data
|
190
|
+
if video_data.start_with?("http")
|
191
|
+
convert_url_to_base64(video_data, "video/mp4")
|
192
|
+
else
|
193
|
+
"data:video/mp4;base64,#{video_data}"
|
194
|
+
end
|
195
|
+
else
|
196
|
+
raise "Video generation failed: No video data in response"
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
def edit_video!(video:, prompt:, **opts)
|
201
|
+
return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
|
202
|
+
|
203
|
+
form_data = {
|
204
|
+
video: prepare_video_file(video),
|
205
|
+
prompt: prompt,
|
206
|
+
**opts
|
207
|
+
}
|
208
|
+
|
209
|
+
response = make_form_request("video/edits", form_data)
|
210
|
+
|
211
|
+
video_data = response.dig("data", 0, "url") || response.dig("data", 0, "b64_json")
|
212
|
+
if video_data
|
213
|
+
if video_data.start_with?("http")
|
214
|
+
convert_url_to_base64(video_data, "video/mp4")
|
215
|
+
else
|
216
|
+
"data:video/mp4;base64,#{video_data}"
|
217
|
+
end
|
218
|
+
else
|
219
|
+
raise "Video editing failed: No video data in response"
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
# Audio generation - TTS models
|
224
|
+
def generate_speech!(text:, model: "tts-1", voice: "alloy", **opts)
|
225
|
+
return "data:audio/mp3;base64,SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU4Ljc2LjEwMAAAAAAAAAAAAAAA//tQxAADB8AhSmAhIIEVWWWU" if RailsAi.config.stub_responses
|
226
|
+
|
227
|
+
response = make_request(
|
228
|
+
"audio/speech",
|
229
|
+
{
|
230
|
+
model: model,
|
231
|
+
input: text,
|
232
|
+
voice: voice,
|
233
|
+
response_format: opts[:response_format] || "mp3",
|
234
|
+
speed: opts[:speed] || 1.0,
|
235
|
+
**opts.except(:response_format, :speed)
|
236
|
+
}
|
237
|
+
)
|
238
|
+
|
239
|
+
# TTS returns binary data, not JSON
|
240
|
+
if response.is_a?(String)
|
241
|
+
"data:audio/mp3;base64,#{Base64.strict_encode64(response)}"
|
242
|
+
else
|
243
|
+
raise "Speech generation failed: No audio data in response"
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
def transcribe_audio!(audio:, model: "whisper-1", **opts)
|
248
|
+
return "[stubbed transcription]" if RailsAi.config.stub_responses
|
249
|
+
|
250
|
+
form_data = {
|
251
|
+
file: prepare_audio_file(audio),
|
252
|
+
model: model,
|
253
|
+
language: opts[:language],
|
254
|
+
prompt: opts[:prompt],
|
255
|
+
response_format: opts[:response_format] || "json",
|
256
|
+
temperature: opts[:temperature] || 0.0,
|
257
|
+
**opts.except(:language, :prompt, :response_format, :temperature)
|
258
|
+
}
|
259
|
+
|
260
|
+
response = make_form_request("audio/transcriptions", form_data)
|
261
|
+
|
262
|
+
if response.is_a?(String)
|
263
|
+
response
|
264
|
+
else
|
265
|
+
response.dig("text")
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# Multimodal analysis - GPT-4 Vision and other vision models
|
270
|
+
def analyze_image!(image:, prompt:, model: "gpt-4o", **opts)
|
271
|
+
return "[stubbed] Image analysis: #{prompt}" if RailsAi.config.stub_responses
|
272
|
+
|
273
|
+
# Prepare image for vision models
|
274
|
+
image_data = prepare_image_for_vision(image)
|
275
|
+
|
276
|
+
messages = [
|
277
|
+
{
|
278
|
+
role: "user",
|
279
|
+
content: [
|
280
|
+
{
|
281
|
+
type: "text",
|
282
|
+
text: prompt
|
283
|
+
},
|
284
|
+
{
|
285
|
+
type: "image_url",
|
286
|
+
image_url: {
|
287
|
+
url: image_data
|
288
|
+
}
|
289
|
+
}
|
290
|
+
]
|
291
|
+
}
|
292
|
+
]
|
293
|
+
|
294
|
+
response = make_request(
|
295
|
+
"chat/completions",
|
296
|
+
{
|
297
|
+
model: model,
|
298
|
+
messages: messages,
|
299
|
+
max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
|
300
|
+
temperature: opts[:temperature] || 0.7,
|
301
|
+
**opts.except(:max_tokens, :temperature)
|
302
|
+
}
|
303
|
+
)
|
304
|
+
|
305
|
+
response.dig("choices", 0, "message", "content")
|
306
|
+
end
|
307
|
+
|
308
|
+
def analyze_video!(video:, prompt:, model: "gpt-4o", **opts)
|
309
|
+
return "[stubbed] Video analysis: #{prompt}" if RailsAi.config.stub_responses
|
310
|
+
|
311
|
+
# For video analysis, we'll extract frames and analyze them
|
312
|
+
# This is a simplified implementation
|
313
|
+
video_data = prepare_video_for_vision(video)
|
314
|
+
|
315
|
+
messages = [
|
316
|
+
{
|
317
|
+
role: "user",
|
318
|
+
content: [
|
319
|
+
{
|
320
|
+
type: "text",
|
321
|
+
text: "#{prompt}\n\nAnalyze this video content:"
|
322
|
+
},
|
323
|
+
{
|
324
|
+
type: "image_url",
|
325
|
+
image_url: {
|
326
|
+
url: video_data
|
327
|
+
}
|
328
|
+
}
|
329
|
+
]
|
330
|
+
}
|
331
|
+
]
|
332
|
+
|
333
|
+
response = make_request(
|
334
|
+
"chat/completions",
|
335
|
+
{
|
336
|
+
model: model,
|
337
|
+
messages: messages,
|
338
|
+
max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
|
339
|
+
temperature: opts[:temperature] || 0.7,
|
340
|
+
**opts.except(:max_tokens, :temperature)
|
341
|
+
}
|
342
|
+
)
|
343
|
+
|
344
|
+
response.dig("choices", 0, "message", "content")
|
345
|
+
end
|
346
|
+
|
347
|
+
private
|
348
|
+
|
349
|
+
def make_request(endpoint, payload)
|
350
|
+
uri = URI("#{OPENAI_API_BASE}/#{endpoint}")
|
351
|
+
|
352
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
353
|
+
http.use_ssl = true
|
354
|
+
|
355
|
+
request = Net::HTTP::Post.new(uri)
|
356
|
+
request["Authorization"] = "Bearer #{@api_key}"
|
357
|
+
request["Content-Type"] = "application/json"
|
358
|
+
request.body = payload.to_json
|
359
|
+
|
360
|
+
response = http.request(request)
|
361
|
+
|
362
|
+
if response.code == "200"
|
363
|
+
JSON.parse(response.body)
|
364
|
+
else
|
365
|
+
error_body = JSON.parse(response.body) rescue response.body
|
366
|
+
raise "OpenAI API error (#{response.code}): #{error_body}"
|
367
|
+
end
|
368
|
+
end
|
369
|
+
|
370
|
+
def make_form_request(endpoint, form_data)
|
371
|
+
uri = URI("#{OPENAI_API_BASE}/#{endpoint}")
|
372
|
+
|
373
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
374
|
+
http.use_ssl = true
|
375
|
+
|
376
|
+
request = Net::HTTP::Post.new(uri)
|
377
|
+
request["Authorization"] = "Bearer #{@api_key}"
|
378
|
+
|
379
|
+
# Create multipart form data
|
380
|
+
form = []
|
381
|
+
form_data.each do |key, value|
|
382
|
+
if value.respond_to?(:read)
|
383
|
+
form << [key.to_s, value, { filename: "file.#{key}" }]
|
384
|
+
else
|
385
|
+
form << [key.to_s, value.to_s]
|
386
|
+
end
|
387
|
+
end
|
388
|
+
|
389
|
+
request.set_form(form, "multipart/form-data")
|
390
|
+
|
391
|
+
response = http.request(request)
|
392
|
+
|
393
|
+
if response.code == "200"
|
394
|
+
# Check if response is JSON or binary
|
395
|
+
content_type = response["content-type"]
|
396
|
+
if content_type&.include?("application/json")
|
397
|
+
JSON.parse(response.body)
|
398
|
+
else
|
399
|
+
response.body
|
400
|
+
end
|
401
|
+
else
|
402
|
+
error_body = JSON.parse(response.body) rescue response.body
|
403
|
+
raise "OpenAI API error (#{response.code}): #{error_body}"
|
404
|
+
end
|
405
|
+
end
|
406
|
+
|
407
|
+
def make_streaming_request(endpoint, payload, &block)
|
408
|
+
uri = URI("#{OPENAI_API_BASE}/#{endpoint}")
|
409
|
+
|
410
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
411
|
+
http.use_ssl = true
|
412
|
+
|
413
|
+
request = Net::HTTP::Post.new(uri)
|
414
|
+
request["Authorization"] = "Bearer #{@api_key}"
|
415
|
+
request["Content-Type"] = "application/json"
|
416
|
+
request.body = payload.to_json
|
417
|
+
|
418
|
+
http.request(request) do |response|
|
419
|
+
if response.code == "200"
|
420
|
+
response.read_body do |chunk|
|
421
|
+
# Parse streaming response chunks
|
422
|
+
chunk.split("\n").each do |line|
|
423
|
+
next if line.empty?
|
424
|
+
next unless line.start_with?("data: ")
|
425
|
+
|
426
|
+
data = line[6..-1] # Remove "data: " prefix
|
427
|
+
next if data == "[DONE]"
|
428
|
+
|
429
|
+
begin
|
430
|
+
parsed = JSON.parse(data)
|
431
|
+
block.call(parsed)
|
432
|
+
rescue JSON::ParserError
|
433
|
+
# Skip invalid JSON chunks
|
434
|
+
next
|
435
|
+
end
|
436
|
+
end
|
437
|
+
end
|
438
|
+
else
|
439
|
+
error_body = JSON.parse(response.body) rescue response.body
|
440
|
+
raise "OpenAI API error (#{response.code}): #{error_body}"
|
441
|
+
end
|
442
|
+
end
|
443
|
+
end
|
444
|
+
|
445
|
+
def prepare_image_file(image)
|
446
|
+
if image.is_a?(String)
|
447
|
+
if image.start_with?("data:image/")
|
448
|
+
# Extract base64 data
|
449
|
+
base64_data = image.split(",")[1]
|
450
|
+
StringIO.new(Base64.decode64(base64_data))
|
451
|
+
else
|
452
|
+
# Assume it's a file path
|
453
|
+
File.open(image, "rb")
|
454
|
+
end
|
455
|
+
else
|
456
|
+
image
|
457
|
+
end
|
458
|
+
end
|
459
|
+
|
460
|
+
def prepare_video_file(video)
|
461
|
+
if video.is_a?(String)
|
462
|
+
if video.start_with?("data:video/")
|
463
|
+
# Extract base64 data
|
464
|
+
base64_data = video.split(",")[1]
|
465
|
+
StringIO.new(Base64.decode64(base64_data))
|
466
|
+
else
|
467
|
+
# Assume it's a file path
|
468
|
+
File.open(video, "rb")
|
469
|
+
end
|
470
|
+
else
|
471
|
+
video
|
472
|
+
end
|
473
|
+
end
|
474
|
+
|
475
|
+
def prepare_audio_file(audio)
|
476
|
+
if audio.is_a?(String)
|
477
|
+
if audio.start_with?("data:audio/")
|
478
|
+
# Extract base64 data
|
479
|
+
base64_data = audio.split(",")[1]
|
480
|
+
StringIO.new(Base64.decode64(base64_data))
|
481
|
+
else
|
482
|
+
# Assume it's a file path
|
483
|
+
File.open(audio, "rb")
|
484
|
+
end
|
485
|
+
else
|
486
|
+
audio
|
487
|
+
end
|
488
|
+
end
|
489
|
+
|
490
|
+
def prepare_image_for_vision(image)
|
491
|
+
if image.is_a?(String)
|
492
|
+
if image.start_with?("data:image/")
|
493
|
+
image
|
494
|
+
else
|
495
|
+
# Convert file to base64 data URI
|
496
|
+
image_data = Base64.strict_encode64(File.read(image))
|
497
|
+
"data:image/png;base64,#{image_data}"
|
498
|
+
end
|
499
|
+
else
|
500
|
+
# Convert file object to base64 data URI
|
501
|
+
image_data = Base64.strict_encode64(image.read)
|
502
|
+
"data:image/png;base64,#{image_data}"
|
503
|
+
end
|
504
|
+
end
|
505
|
+
|
506
|
+
def prepare_video_for_vision(video)
|
507
|
+
# For video analysis, we'll extract a frame and convert to image
|
508
|
+
# This is a simplified implementation
|
509
|
+
if video.is_a?(String)
|
510
|
+
if video.start_with?("data:video/")
|
511
|
+
# Convert video to image (simplified)
|
512
|
+
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
|
513
|
+
else
|
514
|
+
# Extract frame from video file (simplified)
|
515
|
+
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
|
516
|
+
end
|
517
|
+
else
|
518
|
+
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFc5JAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
|
519
|
+
end
|
520
|
+
end
|
521
|
+
|
522
|
+
def convert_url_to_base64(url, mime_type = "image/png")
|
523
|
+
uri = URI(url)
|
524
|
+
response = Net::HTTP.get_response(uri)
|
525
|
+
|
526
|
+
if response.code == "200"
|
527
|
+
base64_data = Base64.strict_encode64(response.body)
|
528
|
+
"data:#{mime_type};base64,#{base64_data}"
|
529
|
+
else
|
530
|
+
raise "Failed to fetch image from URL: #{response.code}"
|
531
|
+
end
|
532
|
+
end
|
533
|
+
end
|
534
|
+
end
|
535
|
+
end
|