rails_ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +7 -0
  2. data/.rspec_status +96 -0
  3. data/AGENT_GUIDE.md +513 -0
  4. data/Appraisals +49 -0
  5. data/COMMERCIAL_LICENSE_TEMPLATE.md +92 -0
  6. data/FEATURES.md +204 -0
  7. data/LEGAL_PROTECTION_GUIDE.md +222 -0
  8. data/LICENSE +62 -0
  9. data/LICENSE_SUMMARY.md +74 -0
  10. data/MIT-LICENSE +62 -0
  11. data/PERFORMANCE.md +300 -0
  12. data/PROVIDERS.md +495 -0
  13. data/README.md +454 -0
  14. data/Rakefile +11 -0
  15. data/SPEED_OPTIMIZATIONS.md +217 -0
  16. data/STRUCTURE.md +139 -0
  17. data/USAGE_GUIDE.md +288 -0
  18. data/app/channels/ai_stream_channel.rb +33 -0
  19. data/app/components/ai/prompt_component.rb +25 -0
  20. data/app/controllers/concerns/ai/context_aware.rb +77 -0
  21. data/app/controllers/concerns/ai/streaming.rb +41 -0
  22. data/app/helpers/ai_helper.rb +164 -0
  23. data/app/jobs/ai/generate_embedding_job.rb +25 -0
  24. data/app/jobs/ai/generate_summary_job.rb +25 -0
  25. data/app/models/concerns/ai/embeddable.rb +38 -0
  26. data/app/views/rails_ai/dashboard/index.html.erb +51 -0
  27. data/config/routes.rb +19 -0
  28. data/lib/generators/rails_ai/install/install_generator.rb +38 -0
  29. data/lib/rails_ai/agents/agent_manager.rb +258 -0
  30. data/lib/rails_ai/agents/agent_team.rb +243 -0
  31. data/lib/rails_ai/agents/base_agent.rb +331 -0
  32. data/lib/rails_ai/agents/collaboration.rb +238 -0
  33. data/lib/rails_ai/agents/memory.rb +116 -0
  34. data/lib/rails_ai/agents/message_bus.rb +95 -0
  35. data/lib/rails_ai/agents/specialized_agents.rb +391 -0
  36. data/lib/rails_ai/agents/task_queue.rb +111 -0
  37. data/lib/rails_ai/cache.rb +14 -0
  38. data/lib/rails_ai/config.rb +40 -0
  39. data/lib/rails_ai/context.rb +7 -0
  40. data/lib/rails_ai/context_analyzer.rb +86 -0
  41. data/lib/rails_ai/engine.rb +48 -0
  42. data/lib/rails_ai/events.rb +9 -0
  43. data/lib/rails_ai/image_context.rb +110 -0
  44. data/lib/rails_ai/performance.rb +231 -0
  45. data/lib/rails_ai/provider.rb +8 -0
  46. data/lib/rails_ai/providers/anthropic_adapter.rb +256 -0
  47. data/lib/rails_ai/providers/base.rb +60 -0
  48. data/lib/rails_ai/providers/dummy_adapter.rb +29 -0
  49. data/lib/rails_ai/providers/gemini_adapter.rb +509 -0
  50. data/lib/rails_ai/providers/openai_adapter.rb +535 -0
  51. data/lib/rails_ai/providers/secure_anthropic_adapter.rb +206 -0
  52. data/lib/rails_ai/providers/secure_openai_adapter.rb +284 -0
  53. data/lib/rails_ai/railtie.rb +48 -0
  54. data/lib/rails_ai/redactor.rb +12 -0
  55. data/lib/rails_ai/security/api_key_manager.rb +82 -0
  56. data/lib/rails_ai/security/audit_logger.rb +46 -0
  57. data/lib/rails_ai/security/error_handler.rb +62 -0
  58. data/lib/rails_ai/security/input_validator.rb +176 -0
  59. data/lib/rails_ai/security/secure_file_handler.rb +45 -0
  60. data/lib/rails_ai/security/secure_http_client.rb +177 -0
  61. data/lib/rails_ai/security.rb +0 -0
  62. data/lib/rails_ai/version.rb +5 -0
  63. data/lib/rails_ai/window_context.rb +103 -0
  64. data/lib/rails_ai.rb +502 -0
  65. data/monitoring/ci_setup_guide.md +214 -0
  66. data/monitoring/enhanced_monitoring_script.rb +237 -0
  67. data/monitoring/google_alerts_setup.md +42 -0
  68. data/monitoring_log_20250921.txt +0 -0
  69. data/monitoring_script.rb +161 -0
  70. data/rails_ai.gemspec +54 -0
  71. data/scripts/security_scanner.rb +353 -0
  72. data/setup_monitoring.sh +163 -0
  73. data/wiki/API-Documentation.md +734 -0
  74. data/wiki/Architecture-Overview.md +672 -0
  75. data/wiki/Contributing-Guide.md +407 -0
  76. data/wiki/Development-Setup.md +532 -0
  77. data/wiki/Home.md +278 -0
  78. data/wiki/Installation-Guide.md +527 -0
  79. data/wiki/Quick-Start.md +186 -0
  80. data/wiki/README.md +135 -0
  81. data/wiki/Release-Process.md +467 -0
  82. metadata +385 -0
@@ -0,0 +1,509 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "net/http"
4
+ require "json"
5
+ require "base64"
6
+
7
+ module RailsAi
8
+ module Providers
9
+ class GeminiAdapter < Base
10
+ GEMINI_API_BASE = "https://generativelanguage.googleapis.com/v1beta"
11
+
12
+ def initialize
13
+ @api_key = ENV.fetch("GEMINI_API_KEY")
14
+ super
15
+ end
16
+
17
+ # Text-based operations
18
+ def chat!(messages:, model:, **opts)
19
+ return "(stubbed) #{messages.last[:content]}" if RailsAi.config.stub_responses
20
+
21
+ # Convert OpenAI format to Gemini format
22
+ gemini_messages = convert_messages_to_gemini(messages)
23
+
24
+ response = make_request(
25
+ "models/#{model}:generateContent",
26
+ {
27
+ contents: gemini_messages,
28
+ generationConfig: {
29
+ maxOutputTokens: opts[:max_tokens] || RailsAi.config.token_limit,
30
+ temperature: opts[:temperature] || 0.7,
31
+ topP: opts[:top_p] || 0.8,
32
+ topK: opts[:top_k] || 40,
33
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k)
34
+ }
35
+ }
36
+ )
37
+
38
+ response.dig("candidates", 0, "content", "parts", 0, "text")
39
+ end
40
+
41
+ def stream_chat!(messages:, model:, **opts, &on_token)
42
+ return on_token.call("(stubbed stream)") if RailsAi.config.stub_responses
43
+
44
+ # Convert OpenAI format to Gemini format
45
+ gemini_messages = convert_messages_to_gemini(messages)
46
+
47
+ make_streaming_request(
48
+ "models/#{model}:streamGenerateContent",
49
+ {
50
+ contents: gemini_messages,
51
+ generationConfig: {
52
+ maxOutputTokens: opts[:max_tokens] || RailsAi.config.token_limit,
53
+ temperature: opts[:temperature] || 0.7,
54
+ topP: opts[:top_p] || 0.8,
55
+ topK: opts[:top_k] || 40,
56
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k)
57
+ }
58
+ }
59
+ ) do |chunk|
60
+ text = chunk.dig("candidates", 0, "content", "parts", 0, "text")
61
+ on_token.call(text) if text
62
+ end
63
+ end
64
+
65
+ def embed!(texts:, model:, **opts)
66
+ return Array.new(texts.length) { [0.0] * 768 } if RailsAi.config.stub_responses
67
+
68
+ # Gemini has embedding models
69
+ response = make_request(
70
+ "models/#{model}:embedContent",
71
+ {
72
+ content: {
73
+ parts: texts.map { |text| { text: text } }
74
+ }
75
+ }
76
+ )
77
+
78
+ # Handle both single and batch embedding responses
79
+ if texts.length == 1
80
+ [response.dig("embedding", "values")]
81
+ else
82
+ # For multiple texts, we need to make separate requests or use batch embedding
83
+ texts.map do |text|
84
+ single_response = make_request(
85
+ "models/#{model}:embedContent",
86
+ {
87
+ content: {
88
+ parts: [{ text: text }]
89
+ }
90
+ }
91
+ )
92
+ single_response.dig("embedding", "values")
93
+ end
94
+ end
95
+ end
96
+
97
+ # Image generation - Gemini 2.0 Flash supports image generation
98
+ def generate_image!(prompt:, model: "gemini-2.0-flash-exp", **opts)
99
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
100
+
101
+ # Use Gemini 2.0 Flash for image generation
102
+ response = make_request(
103
+ "models/#{model}:generateContent",
104
+ {
105
+ contents: [
106
+ {
107
+ parts: [
108
+ {
109
+ text: "Generate an image: #{prompt}"
110
+ }
111
+ ]
112
+ }
113
+ ],
114
+ generationConfig: {
115
+ maxOutputTokens: 1000,
116
+ temperature: opts[:temperature] || 0.7,
117
+ **opts
118
+ }
119
+ }
120
+ )
121
+
122
+ # Extract image data from response
123
+ image_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
124
+ if image_data
125
+ "data:image/png;base64,#{image_data}"
126
+ else
127
+ # Fallback: return a placeholder or raise error
128
+ raise "Image generation failed: No image data in response"
129
+ end
130
+ end
131
+
132
+ def edit_image!(image:, prompt:, **opts)
133
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
134
+
135
+ # Gemini doesn't have direct image editing, but we can use it to generate variations
136
+ image_prompt = "Edit this image: #{prompt}. Show the edited version."
137
+
138
+ contents = [
139
+ {
140
+ parts: [
141
+ { text: image_prompt },
142
+ {
143
+ inlineData: {
144
+ mimeType: detect_image_type(image),
145
+ data: extract_base64_data(image)
146
+ }
147
+ }
148
+ ]
149
+ }
150
+ ]
151
+
152
+ response = make_request(
153
+ "models/gemini-2.0-flash-exp:generateContent",
154
+ {
155
+ contents: contents,
156
+ generationConfig: {
157
+ maxOutputTokens: 1000,
158
+ temperature: opts[:temperature] || 0.7,
159
+ **opts
160
+ }
161
+ }
162
+ )
163
+
164
+ # Extract generated image data
165
+ image_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
166
+ if image_data
167
+ "data:image/png;base64,#{image_data}"
168
+ else
169
+ raise "Image editing failed: No image data in response"
170
+ end
171
+ end
172
+
173
+ def create_variation!(image:, **opts)
174
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
175
+
176
+ # Create variations using Gemini 2.0 Flash
177
+ variation_prompt = "Create a variation of this image with similar style but different composition."
178
+
179
+ contents = [
180
+ {
181
+ parts: [
182
+ { text: variation_prompt },
183
+ {
184
+ inlineData: {
185
+ mimeType: detect_image_type(image),
186
+ data: extract_base64_data(image)
187
+ }
188
+ }
189
+ ]
190
+ }
191
+ ]
192
+
193
+ response = make_request(
194
+ "models/gemini-2.0-flash-exp:generateContent",
195
+ {
196
+ contents: contents,
197
+ generationConfig: {
198
+ maxOutputTokens: 1000,
199
+ temperature: opts[:temperature] || 0.8,
200
+ **opts
201
+ }
202
+ }
203
+ )
204
+
205
+ image_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
206
+ if image_data
207
+ "data:image/png;base64,#{image_data}"
208
+ else
209
+ raise "Image variation failed: No image data in response"
210
+ end
211
+ end
212
+
213
+ # Video generation - Gemini 2.0 Flash supports video generation
214
+ def generate_video!(prompt:, model: "gemini-2.0-flash-exp", **opts)
215
+ return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
216
+
217
+ response = make_request(
218
+ "models/#{model}:generateContent",
219
+ {
220
+ contents: [
221
+ {
222
+ parts: [
223
+ {
224
+ text: "Generate a video: #{prompt}"
225
+ }
226
+ ]
227
+ }
228
+ ],
229
+ generationConfig: {
230
+ maxOutputTokens: 1000,
231
+ temperature: opts[:temperature] || 0.7,
232
+ **opts
233
+ }
234
+ }
235
+ )
236
+
237
+ video_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
238
+ if video_data
239
+ "data:video/mp4;base64,#{video_data}"
240
+ else
241
+ raise "Video generation failed: No video data in response"
242
+ end
243
+ end
244
+
245
+ def edit_video!(video:, prompt:, **opts)
246
+ return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
247
+
248
+ video_prompt = "Edit this video: #{prompt}. Show the edited version."
249
+
250
+ contents = [
251
+ {
252
+ parts: [
253
+ { text: video_prompt },
254
+ {
255
+ inlineData: {
256
+ mimeType: "video/mp4",
257
+ data: extract_base64_data(video)
258
+ }
259
+ }
260
+ ]
261
+ }
262
+ ]
263
+
264
+ response = make_request(
265
+ "models/gemini-2.0-flash-exp:generateContent",
266
+ {
267
+ contents: contents,
268
+ generationConfig: {
269
+ maxOutputTokens: 1000,
270
+ temperature: opts[:temperature] || 0.7,
271
+ **opts
272
+ }
273
+ }
274
+ )
275
+
276
+ video_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
277
+ if video_data
278
+ "data:video/mp4;base64,#{video_data}"
279
+ else
280
+ raise "Video editing failed: No video data in response"
281
+ end
282
+ end
283
+
284
+ # Audio generation - Gemini 2.0 Flash supports audio generation
285
+ def generate_speech!(text:, model: "gemini-2.0-flash-exp", **opts)
286
+ return "data:audio/mp3;base64,SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU4Ljc2LjEwMAAAAAAAAAAAAAAA//tQxAADB8AhSmAhIIEVWWWU" if RailsAi.config.stub_responses
287
+
288
+ response = make_request(
289
+ "models/#{model}:generateContent",
290
+ {
291
+ contents: [
292
+ {
293
+ parts: [
294
+ {
295
+ text: "Generate speech for: #{text}"
296
+ }
297
+ ]
298
+ }
299
+ ],
300
+ generationConfig: {
301
+ maxOutputTokens: 1000,
302
+ temperature: opts[:temperature] || 0.7,
303
+ **opts
304
+ }
305
+ }
306
+ )
307
+
308
+ audio_data = response.dig("candidates", 0, "content", "parts", 0, "inlineData", "data")
309
+ if audio_data
310
+ "data:audio/mp3;base64,#{audio_data}"
311
+ else
312
+ raise "Speech generation failed: No audio data in response"
313
+ end
314
+ end
315
+
316
+ def transcribe_audio!(audio:, model: "gemini-2.0-flash-exp", **opts)
317
+ return "[stubbed transcription]" if RailsAi.config.stub_responses
318
+
319
+ contents = [
320
+ {
321
+ parts: [
322
+ {
323
+ text: "Transcribe this audio:"
324
+ },
325
+ {
326
+ inlineData: {
327
+ mimeType: "audio/mp3",
328
+ data: extract_base64_data(audio)
329
+ }
330
+ }
331
+ ]
332
+ }
333
+ ]
334
+
335
+ response = make_request(
336
+ "models/#{model}:generateContent",
337
+ {
338
+ contents: contents,
339
+ generationConfig: {
340
+ maxOutputTokens: 1000,
341
+ temperature: opts[:temperature] || 0.1,
342
+ **opts
343
+ }
344
+ }
345
+ )
346
+
347
+ response.dig("candidates", 0, "content", "parts", 0, "text")
348
+ end
349
+
350
+ # Multimodal analysis - Gemini supports image and video analysis
351
+ def analyze_image!(image:, prompt:, model: "gemini-2.0-flash-exp", **opts)
352
+ return "[stubbed] Image analysis: #{prompt}" if RailsAi.config.stub_responses
353
+
354
+ contents = [
355
+ {
356
+ parts: [
357
+ { text: prompt },
358
+ {
359
+ inlineData: {
360
+ mimeType: detect_image_type(image),
361
+ data: extract_base64_data(image)
362
+ }
363
+ }
364
+ ]
365
+ }
366
+ ]
367
+
368
+ response = make_request(
369
+ "models/#{model}:generateContent",
370
+ {
371
+ contents: contents,
372
+ generationConfig: {
373
+ maxOutputTokens: opts[:max_tokens] || RailsAi.config.token_limit,
374
+ temperature: opts[:temperature] || 0.7,
375
+ topP: opts[:top_p] || 0.8,
376
+ topK: opts[:top_k] || 40,
377
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k)
378
+ }
379
+ }
380
+ )
381
+
382
+ response.dig("candidates", 0, "content", "parts", 0, "text")
383
+ end
384
+
385
+ def analyze_video!(video:, prompt:, model: "gemini-2.0-flash-exp", **opts)
386
+ return "[stubbed] Video analysis: #{prompt}" if RailsAi.config.stub_responses
387
+
388
+ contents = [
389
+ {
390
+ parts: [
391
+ { text: prompt },
392
+ {
393
+ inlineData: {
394
+ mimeType: "video/mp4",
395
+ data: extract_base64_data(video)
396
+ }
397
+ }
398
+ ]
399
+ }
400
+ ]
401
+
402
+ response = make_request(
403
+ "models/#{model}:generateContent",
404
+ {
405
+ contents: contents,
406
+ generationConfig: {
407
+ maxOutputTokens: opts[:max_tokens] || RailsAi.config.token_limit,
408
+ temperature: opts[:temperature] || 0.7,
409
+ topP: opts[:top_p] || 0.8,
410
+ topK: opts[:top_k] || 40,
411
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k)
412
+ }
413
+ }
414
+ )
415
+
416
+ response.dig("candidates", 0, "content", "parts", 0, "text")
417
+ end
418
+
419
+ private
420
+
421
+ def make_request(endpoint, payload)
422
+ uri = URI("#{GEMINI_API_BASE}/#{endpoint}?key=#{@api_key}")
423
+
424
+ http = Net::HTTP.new(uri.host, uri.port)
425
+ http.use_ssl = true
426
+
427
+ request = Net::HTTP::Post.new(uri)
428
+ request["Content-Type"] = "application/json"
429
+ request.body = payload.to_json
430
+
431
+ response = http.request(request)
432
+
433
+ if response.code == "200"
434
+ JSON.parse(response.body)
435
+ else
436
+ error_body = JSON.parse(response.body) rescue response.body
437
+ raise "Gemini API error (#{response.code}): #{error_body}"
438
+ end
439
+ end
440
+
441
+ def make_streaming_request(endpoint, payload, &block)
442
+ uri = URI("#{GEMINI_API_BASE}/#{endpoint}?key=#{@api_key}")
443
+
444
+ http = Net::HTTP.new(uri.host, uri.port)
445
+ http.use_ssl = true
446
+
447
+ request = Net::HTTP::Post.new(uri)
448
+ request["Content-Type"] = "application/json"
449
+ request.body = payload.to_json
450
+
451
+ http.request(request) do |response|
452
+ if response.code == "200"
453
+ response.read_body do |chunk|
454
+ # Parse streaming response chunks
455
+ chunk.split("\n").each do |line|
456
+ next if line.empty?
457
+ next unless line.start_with?("data: ")
458
+
459
+ data = line[6..-1] # Remove "data: " prefix
460
+ next if data == "[DONE]"
461
+
462
+ begin
463
+ parsed = JSON.parse(data)
464
+ block.call(parsed)
465
+ rescue JSON::ParserError
466
+ # Skip invalid JSON chunks
467
+ next
468
+ end
469
+ end
470
+ end
471
+ else
472
+ error_body = JSON.parse(response.body) rescue response.body
473
+ raise "Gemini API error (#{response.code}): #{error_body}"
474
+ end
475
+ end
476
+ end
477
+
478
+ def convert_messages_to_gemini(messages)
479
+ messages.map do |message|
480
+ {
481
+ role: message[:role] == "assistant" ? "model" : "user",
482
+ parts: [{ text: message[:content] }]
483
+ }
484
+ end
485
+ end
486
+
487
+ def detect_image_type(image)
488
+ if image.is_a?(String)
489
+ if image.start_with?("data:image/")
490
+ image.split(";")[0].split(":")[1]
491
+ else
492
+ "image/png" # default
493
+ end
494
+ else
495
+ "image/png" # default for file objects
496
+ end
497
+ end
498
+
499
+ def extract_base64_data(image)
500
+ if image.is_a?(String) && image.include?("base64,")
501
+ image.split("base64,")[1]
502
+ else
503
+ # For file objects, read and encode
504
+ Base64.strict_encode64(image.read)
505
+ end
506
+ end
507
+ end
508
+ end
509
+ end