rails_ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +7 -0
  2. data/.rspec_status +96 -0
  3. data/AGENT_GUIDE.md +513 -0
  4. data/Appraisals +49 -0
  5. data/COMMERCIAL_LICENSE_TEMPLATE.md +92 -0
  6. data/FEATURES.md +204 -0
  7. data/LEGAL_PROTECTION_GUIDE.md +222 -0
  8. data/LICENSE +62 -0
  9. data/LICENSE_SUMMARY.md +74 -0
  10. data/MIT-LICENSE +62 -0
  11. data/PERFORMANCE.md +300 -0
  12. data/PROVIDERS.md +495 -0
  13. data/README.md +454 -0
  14. data/Rakefile +11 -0
  15. data/SPEED_OPTIMIZATIONS.md +217 -0
  16. data/STRUCTURE.md +139 -0
  17. data/USAGE_GUIDE.md +288 -0
  18. data/app/channels/ai_stream_channel.rb +33 -0
  19. data/app/components/ai/prompt_component.rb +25 -0
  20. data/app/controllers/concerns/ai/context_aware.rb +77 -0
  21. data/app/controllers/concerns/ai/streaming.rb +41 -0
  22. data/app/helpers/ai_helper.rb +164 -0
  23. data/app/jobs/ai/generate_embedding_job.rb +25 -0
  24. data/app/jobs/ai/generate_summary_job.rb +25 -0
  25. data/app/models/concerns/ai/embeddable.rb +38 -0
  26. data/app/views/rails_ai/dashboard/index.html.erb +51 -0
  27. data/config/routes.rb +19 -0
  28. data/lib/generators/rails_ai/install/install_generator.rb +38 -0
  29. data/lib/rails_ai/agents/agent_manager.rb +258 -0
  30. data/lib/rails_ai/agents/agent_team.rb +243 -0
  31. data/lib/rails_ai/agents/base_agent.rb +331 -0
  32. data/lib/rails_ai/agents/collaboration.rb +238 -0
  33. data/lib/rails_ai/agents/memory.rb +116 -0
  34. data/lib/rails_ai/agents/message_bus.rb +95 -0
  35. data/lib/rails_ai/agents/specialized_agents.rb +391 -0
  36. data/lib/rails_ai/agents/task_queue.rb +111 -0
  37. data/lib/rails_ai/cache.rb +14 -0
  38. data/lib/rails_ai/config.rb +40 -0
  39. data/lib/rails_ai/context.rb +7 -0
  40. data/lib/rails_ai/context_analyzer.rb +86 -0
  41. data/lib/rails_ai/engine.rb +48 -0
  42. data/lib/rails_ai/events.rb +9 -0
  43. data/lib/rails_ai/image_context.rb +110 -0
  44. data/lib/rails_ai/performance.rb +231 -0
  45. data/lib/rails_ai/provider.rb +8 -0
  46. data/lib/rails_ai/providers/anthropic_adapter.rb +256 -0
  47. data/lib/rails_ai/providers/base.rb +60 -0
  48. data/lib/rails_ai/providers/dummy_adapter.rb +29 -0
  49. data/lib/rails_ai/providers/gemini_adapter.rb +509 -0
  50. data/lib/rails_ai/providers/openai_adapter.rb +535 -0
  51. data/lib/rails_ai/providers/secure_anthropic_adapter.rb +206 -0
  52. data/lib/rails_ai/providers/secure_openai_adapter.rb +284 -0
  53. data/lib/rails_ai/railtie.rb +48 -0
  54. data/lib/rails_ai/redactor.rb +12 -0
  55. data/lib/rails_ai/security/api_key_manager.rb +82 -0
  56. data/lib/rails_ai/security/audit_logger.rb +46 -0
  57. data/lib/rails_ai/security/error_handler.rb +62 -0
  58. data/lib/rails_ai/security/input_validator.rb +176 -0
  59. data/lib/rails_ai/security/secure_file_handler.rb +45 -0
  60. data/lib/rails_ai/security/secure_http_client.rb +177 -0
  61. data/lib/rails_ai/security.rb +0 -0
  62. data/lib/rails_ai/version.rb +5 -0
  63. data/lib/rails_ai/window_context.rb +103 -0
  64. data/lib/rails_ai.rb +502 -0
  65. data/monitoring/ci_setup_guide.md +214 -0
  66. data/monitoring/enhanced_monitoring_script.rb +237 -0
  67. data/monitoring/google_alerts_setup.md +42 -0
  68. data/monitoring_log_20250921.txt +0 -0
  69. data/monitoring_script.rb +161 -0
  70. data/rails_ai.gemspec +54 -0
  71. data/scripts/security_scanner.rb +353 -0
  72. data/setup_monitoring.sh +163 -0
  73. data/wiki/API-Documentation.md +734 -0
  74. data/wiki/Architecture-Overview.md +672 -0
  75. data/wiki/Contributing-Guide.md +407 -0
  76. data/wiki/Development-Setup.md +532 -0
  77. data/wiki/Home.md +278 -0
  78. data/wiki/Installation-Guide.md +527 -0
  79. data/wiki/Quick-Start.md +186 -0
  80. data/wiki/README.md +135 -0
  81. data/wiki/Release-Process.md +467 -0
  82. metadata +385 -0
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ module Events
5
+ def self.log!(kind:, name:, payload: {}, latency_ms: nil)
6
+ ActiveSupport::Notifications.instrument("rails_ai.#{kind}", {name:, payload:, latency_ms:, user_id: RailsAi::Context.user_id, request_id: RailsAi::Context.request_id})
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,110 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ class ImageContext
5
+ attr_reader :metadata, :source, :dimensions, :file_size, :format, :created_at
6
+
7
+ def initialize(image_data, metadata: {})
8
+ @metadata = metadata || {}
9
+ @source = determine_source(image_data)
10
+ @dimensions = extract_dimensions(image_data)
11
+ @file_size = extract_file_size(image_data)
12
+ @format = extract_format(image_data)
13
+ @created_at = current_time
14
+ end
15
+
16
+ def to_h
17
+ {
18
+ source: source,
19
+ dimensions: dimensions,
20
+ file_size: file_size,
21
+ format: format,
22
+ created_at: created_at.iso8601,
23
+ metadata: metadata,
24
+ analysis_ready: analysis_ready?
25
+ }
26
+ end
27
+
28
+ def analysis_ready?
29
+ dimensions.present? && format.present?
30
+ end
31
+
32
+ def self.from_file(image_file)
33
+ new(image_file, metadata: extract_file_metadata(image_file))
34
+ end
35
+
36
+ def self.from_url(image_url)
37
+ new(image_url, metadata: { url: image_url })
38
+ end
39
+
40
+ def self.from_base64(image_data)
41
+ new(image_data, metadata: { encoding: 'base64' })
42
+ end
43
+
44
+ private
45
+
46
+ def current_time
47
+ if defined?(Time.current)
48
+ Time.current
49
+ else
50
+ Time.now
51
+ end
52
+ end
53
+
54
+ def determine_source(image_data)
55
+ case image_data
56
+ when String
57
+ if image_data.start_with?('data:')
58
+ 'base64'
59
+ elsif image_data.start_with?('http')
60
+ 'url'
61
+ else
62
+ 'file_path'
63
+ end
64
+ when File, ActionDispatch::Http::UploadedFile
65
+ 'uploaded_file'
66
+ else
67
+ 'unknown'
68
+ end
69
+ end
70
+
71
+ def extract_dimensions(image_data)
72
+ # This would need to be implemented with image processing library
73
+ # For now, return placeholder
74
+ { width: 'unknown', height: 'unknown' }
75
+ end
76
+
77
+ def extract_file_size(image_data)
78
+ case image_data
79
+ when String
80
+ image_data.bytesize
81
+ when File, ActionDispatch::Http::UploadedFile
82
+ image_data.size
83
+ else
84
+ 'unknown'
85
+ end
86
+ end
87
+
88
+ def extract_format(image_data)
89
+ case image_data
90
+ when String
91
+ if image_data.start_with?('data:')
92
+ image_data.split(';')[0].split(':')[1]
93
+ else
94
+ File.extname(image_data).downcase[1..-1]
95
+ end
96
+ when File, ActionDispatch::Http::UploadedFile
97
+ image_data.content_type&.split('/')&.last
98
+ else
99
+ 'unknown'
100
+ end
101
+ end
102
+
103
+ def self.extract_file_metadata(image_file)
104
+ {
105
+ filename: image_file.respond_to?(:original_filename) ? image_file.original_filename : 'unknown',
106
+ content_type: image_file.respond_to?(:content_type) ? image_file.content_type : 'unknown'
107
+ }
108
+ end
109
+ end
110
+ end
@@ -0,0 +1,231 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ module Performance
5
+ # Connection pooling for HTTP clients
6
+ class ConnectionPool
7
+ def initialize(size: 10)
8
+ @pool = Concurrent::Array.new(size) { create_connection }
9
+ @semaphore = Concurrent::Semaphore.new(size)
10
+ end
11
+
12
+ def with_connection
13
+ @semaphore.acquire
14
+ connection = @pool.pop
15
+ yield(connection)
16
+ ensure
17
+ @pool.push(connection) if connection
18
+ @semaphore.release
19
+ end
20
+
21
+ private
22
+
23
+ def create_connection
24
+ # Simple connection object for now
25
+ Object.new
26
+ end
27
+ end
28
+
29
+ # Request batching for multiple operations
30
+ class BatchProcessor
31
+ def initialize(batch_size: 10, flush_interval: 0.1)
32
+ @batch_size = batch_size
33
+ @flush_interval = flush_interval
34
+ @queue = Concurrent::Array.new
35
+ @mutex = Mutex.new
36
+ @last_flush = Time.now
37
+ end
38
+
39
+ def add_operation(operation)
40
+ @mutex.synchronize do
41
+ @queue << operation
42
+ flush_if_needed
43
+ end
44
+ end
45
+
46
+ private
47
+
48
+ def flush_if_needed
49
+ return unless should_flush?
50
+
51
+ operations = @queue.shift(@batch_size)
52
+ process_batch(operations) if operations.any?
53
+ @last_flush = Time.now
54
+ end
55
+
56
+ def should_flush?
57
+ @queue.size >= @batch_size ||
58
+ (Time.now - @last_flush) > @flush_interval
59
+ end
60
+
61
+ def process_batch(operations)
62
+ # Process operations in parallel
63
+ operations.map do |operation|
64
+ Concurrent::Future.execute { operation.call }
65
+ end.each(&:value!)
66
+ end
67
+ end
68
+
69
+ # Memory-efficient streaming
70
+ class StreamProcessor
71
+ def initialize(chunk_size: 1024)
72
+ @chunk_size = chunk_size
73
+ end
74
+
75
+ def process_stream(stream, &block)
76
+ buffer = String.new(capacity: @chunk_size)
77
+
78
+ stream.each_chunk(@chunk_size) do |chunk|
79
+ buffer << chunk
80
+
81
+ if buffer.bytesize >= @chunk_size
82
+ yield(buffer.dup)
83
+ buffer.clear
84
+ end
85
+ end
86
+
87
+ yield(buffer) if buffer.bytesize > 0
88
+ end
89
+ end
90
+
91
+ # Smart caching with compression
92
+ class SmartCache
93
+ def initialize(compression_threshold: 1024)
94
+ @compression_threshold = compression_threshold
95
+ end
96
+
97
+ def fetch(key, **opts, &block)
98
+ return block.call unless block_given?
99
+
100
+ if defined?(Rails) && Rails.cache
101
+ compressed_key = compress_key(key)
102
+ Rails.cache.fetch(compressed_key, **opts) do
103
+ result = block.call
104
+ compress_if_needed(result)
105
+ end
106
+ else
107
+ block.call
108
+ end
109
+ end
110
+
111
+ private
112
+
113
+ def compress_key(key)
114
+ # Use consistent hashing for better distribution
115
+ Digest::MD5.hexdigest(key.inspect)
116
+ end
117
+
118
+ def compress_if_needed(data)
119
+ return data unless data.is_a?(String) && data.bytesize > @compression_threshold
120
+
121
+ compressed = Zlib::Deflate.deflate(data)
122
+ { compressed: true, data: compressed }
123
+ end
124
+
125
+ def decompress_if_needed(data)
126
+ return data unless data.is_a?(Hash) && data[:compressed]
127
+
128
+ Zlib::Inflate.inflate(data[:data])
129
+ end
130
+ end
131
+
132
+ # Request deduplication
133
+ class RequestDeduplicator
134
+ def initialize
135
+ @pending_requests = Concurrent::Hash.new
136
+ @mutex = Mutex.new
137
+ end
138
+
139
+ def deduplicate(key, &block)
140
+ @mutex.synchronize do
141
+ if @pending_requests[key]
142
+ # Wait for existing request
143
+ @pending_requests[key].value
144
+ else
145
+ # Start new request
146
+ future = Concurrent::Future.execute(&block)
147
+ @pending_requests[key] = future
148
+
149
+ begin
150
+ future.value
151
+ ensure
152
+ @pending_requests.delete(key)
153
+ end
154
+ end
155
+ end
156
+ end
157
+ end
158
+
159
+ # Performance monitoring
160
+ class PerformanceMonitor
161
+ def initialize
162
+ @metrics = Concurrent::Hash.new
163
+ end
164
+
165
+ def measure(operation, &block)
166
+ start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
167
+ memory_before = memory_usage
168
+
169
+ result = block.call
170
+
171
+ duration = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time
172
+ memory_after = memory_usage
173
+
174
+ record_metric(operation, duration, memory_after - memory_before)
175
+ result
176
+ end
177
+
178
+ def metrics
179
+ @metrics.dup
180
+ end
181
+
182
+ private
183
+
184
+ def record_metric(operation, duration, memory_delta)
185
+ @metrics[operation] ||= {
186
+ count: 0,
187
+ total_duration: 0.0,
188
+ total_memory: 0,
189
+ min_duration: Float::INFINITY,
190
+ max_duration: 0.0
191
+ }
192
+
193
+ metric = @metrics[operation]
194
+ metric[:count] += 1
195
+ metric[:total_duration] += duration
196
+ metric[:total_memory] += memory_delta
197
+ metric[:min_duration] = [metric[:min_duration], duration].min
198
+ metric[:max_duration] = [metric[:max_duration], duration].max
199
+ end
200
+
201
+ def memory_usage
202
+ `ps -o rss= -p #{Process.pid}`.to_i * 1024
203
+ rescue
204
+ 0
205
+ end
206
+ end
207
+
208
+ # Lazy loading for providers
209
+ class LazyProvider
210
+ def initialize(&provider_factory)
211
+ @provider_factory = provider_factory
212
+ @provider = nil
213
+ @mutex = Mutex.new
214
+ end
215
+
216
+ def method_missing(method, *args, **kwargs, &block)
217
+ @mutex.synchronize do
218
+ @provider ||= @provider_factory.call
219
+ end
220
+ @provider.public_send(method, *args, **kwargs, &block)
221
+ end
222
+
223
+ def respond_to_missing?(method, include_private = false)
224
+ @mutex.synchronize do
225
+ @provider ||= @provider_factory.call
226
+ end
227
+ @provider.respond_to?(method, include_private)
228
+ end
229
+ end
230
+ end
231
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ class Provider
5
+ class RateLimited < StandardError; end
6
+ class UnsafeInputError < StandardError; end
7
+ end
8
+ end
@@ -0,0 +1,256 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "net/http"
4
+ require "json"
5
+ require "base64"
6
+
7
+ module RailsAi
8
+ module Providers
9
+ class AnthropicAdapter < Base
10
+ ANTHROPIC_API_BASE = "https://api.anthropic.com/v1"
11
+
12
+ def initialize
13
+ @api_key = ENV.fetch("ANTHROPIC_API_KEY")
14
+ super
15
+ end
16
+
17
+ # Text-based operations
18
+ def chat!(messages:, model:, **opts)
19
+ return "(stubbed) #{messages.last[:content]}" if RailsAi.config.stub_responses
20
+
21
+ # Convert OpenAI format to Anthropic format
22
+ anthropic_messages = convert_messages_to_anthropic(messages)
23
+
24
+ response = make_request(
25
+ "messages",
26
+ {
27
+ model: model,
28
+ max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
29
+ messages: anthropic_messages,
30
+ temperature: opts[:temperature] || 1.0,
31
+ top_p: opts[:top_p] || 1.0,
32
+ top_k: opts[:top_k] || 0,
33
+ stop_sequences: opts[:stop_sequences] || [],
34
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k, :stop_sequences)
35
+ }
36
+ )
37
+
38
+ response.dig("content", 0, "text")
39
+ end
40
+
41
+ def stream_chat!(messages:, model:, **opts, &on_token)
42
+ return on_token.call("(stubbed stream)") if RailsAi.config.stub_responses
43
+
44
+ # Convert OpenAI format to Anthropic format
45
+ anthropic_messages = convert_messages_to_anthropic(messages)
46
+
47
+ make_streaming_request(
48
+ "messages",
49
+ {
50
+ model: model,
51
+ max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
52
+ messages: anthropic_messages,
53
+ temperature: opts[:temperature] || 1.0,
54
+ top_p: opts[:top_p] || 1.0,
55
+ top_k: opts[:top_k] || 0,
56
+ stop_sequences: opts[:stop_sequences] || [],
57
+ stream: true,
58
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k, :stop_sequences, :stream)
59
+ }
60
+ ) do |chunk|
61
+ text = chunk.dig("delta", "text")
62
+ on_token.call(text) if text
63
+ end
64
+ end
65
+
66
+ def embed!(texts:, model:, **opts)
67
+ return Array.new(texts.length) { [0.0] * 1024 } if RailsAi.config.stub_responses
68
+
69
+ # Anthropic doesn't have a direct embedding API, but we can use their models
70
+ # This is a placeholder implementation that could be enhanced
71
+ texts.map do |text|
72
+ # In a real implementation, you might use a different service for embeddings
73
+ # or implement a workaround using Claude's text understanding
74
+ Array.new(1024) { rand(-1.0..1.0) }
75
+ end
76
+ end
77
+
78
+ # Image generation - Anthropic doesn't have image generation
79
+ def generate_image!(prompt:, model:, **opts)
80
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
81
+ raise NotImplementedError, "Anthropic doesn't support image generation. Use OpenAI or Gemini for image generation."
82
+ end
83
+
84
+ def edit_image!(image:, prompt:, **opts)
85
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
86
+ raise NotImplementedError, "Anthropic doesn't support image editing. Use OpenAI or Gemini for image editing."
87
+ end
88
+
89
+ def create_variation!(image:, **opts)
90
+ return "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==" if RailsAi.config.stub_responses
91
+ raise NotImplementedError, "Anthropic doesn't support image variations. Use OpenAI or Gemini for image variations."
92
+ end
93
+
94
+ # Video generation - Anthropic doesn't have video generation
95
+ def generate_video!(prompt:, model:, **opts)
96
+ return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
97
+ raise NotImplementedError, "Anthropic doesn't support video generation. Use OpenAI or Gemini for video generation."
98
+ end
99
+
100
+ def edit_video!(video:, prompt:, **opts)
101
+ return "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ=" if RailsAi.config.stub_responses
102
+ raise NotImplementedError, "Anthropic doesn't support video editing. Use OpenAI or Gemini for video editing."
103
+ end
104
+
105
+ # Audio generation - Anthropic doesn't have audio generation
106
+ def generate_speech!(text:, model:, **opts)
107
+ return "data:audio/mp3;base64,SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU4Ljc2LjEwMAAAAAAAAAAAAAAA//tQxAADB8AhSmAhIIEVWWWU" if RailsAi.config.stub_responses
108
+ raise NotImplementedError, "Anthropic doesn't support speech generation. Use OpenAI or Gemini for speech generation."
109
+ end
110
+
111
+ def transcribe_audio!(audio:, model:, **opts)
112
+ return "[stubbed transcription]" if RailsAi.config.stub_responses
113
+ raise NotImplementedError, "Anthropic doesn't support audio transcription. Use OpenAI or Gemini for audio transcription."
114
+ end
115
+
116
+ # Multimodal analysis - Anthropic supports image analysis with Claude 3 Vision
117
+ def analyze_image!(image:, prompt:, model: "claude-3-5-sonnet-20241022", **opts)
118
+ return "[stubbed] Image analysis: #{prompt}" if RailsAi.config.stub_responses
119
+
120
+ # Anthropic supports image analysis with Claude 3 Vision models
121
+ messages = [
122
+ {
123
+ role: "user",
124
+ content: [
125
+ {
126
+ type: "image",
127
+ source: {
128
+ type: "base64",
129
+ media_type: detect_image_type(image),
130
+ data: extract_base64_data(image)
131
+ }
132
+ },
133
+ {
134
+ type: "text",
135
+ text: prompt
136
+ }
137
+ ]
138
+ }
139
+ ]
140
+
141
+ response = make_request(
142
+ "messages",
143
+ {
144
+ model: model,
145
+ max_tokens: opts[:max_tokens] || RailsAi.config.token_limit,
146
+ messages: messages,
147
+ temperature: opts[:temperature] || 1.0,
148
+ top_p: opts[:top_p] || 1.0,
149
+ top_k: opts[:top_k] || 0,
150
+ **opts.except(:max_tokens, :temperature, :top_p, :top_k)
151
+ }
152
+ )
153
+
154
+ response.dig("content", 0, "text")
155
+ end
156
+
157
+ def analyze_video!(video:, prompt:, model:, **opts)
158
+ return "[stubbed] Video analysis: #{prompt}" if RailsAi.config.stub_responses
159
+ raise NotImplementedError, "Anthropic doesn't support video analysis. Use OpenAI or Gemini for video analysis."
160
+ end
161
+
162
+ private
163
+
164
+ def make_request(endpoint, payload)
165
+ uri = URI("#{ANTHROPIC_API_BASE}/#{endpoint}")
166
+
167
+ http = Net::HTTP.new(uri.host, uri.port)
168
+ http.use_ssl = true
169
+
170
+ request = Net::HTTP::Post.new(uri)
171
+ request["x-api-key"] = @api_key
172
+ request["Content-Type"] = "application/json"
173
+ request["anthropic-version"] = "2023-06-01"
174
+ request.body = payload.to_json
175
+
176
+ response = http.request(request)
177
+
178
+ if response.code == "200"
179
+ JSON.parse(response.body)
180
+ else
181
+ error_body = JSON.parse(response.body) rescue response.body
182
+ raise "Anthropic API error (#{response.code}): #{error_body}"
183
+ end
184
+ end
185
+
186
+ def make_streaming_request(endpoint, payload, &block)
187
+ uri = URI("#{ANTHROPIC_API_BASE}/#{endpoint}")
188
+
189
+ http = Net::HTTP.new(uri.host, uri.port)
190
+ http.use_ssl = true
191
+
192
+ request = Net::HTTP::Post.new(uri)
193
+ request["x-api-key"] = @api_key
194
+ request["Content-Type"] = "application/json"
195
+ request["anthropic-version"] = "2023-06-01"
196
+ request.body = payload.to_json
197
+
198
+ http.request(request) do |response|
199
+ if response.code == "200"
200
+ response.read_body do |chunk|
201
+ # Parse streaming response chunks
202
+ chunk.split("\n").each do |line|
203
+ next if line.empty?
204
+ next unless line.start_with?("data: ")
205
+
206
+ data = line[6..-1] # Remove "data: " prefix
207
+ next if data == "[DONE]"
208
+
209
+ begin
210
+ parsed = JSON.parse(data)
211
+ block.call(parsed)
212
+ rescue JSON::ParserError
213
+ # Skip invalid JSON chunks
214
+ next
215
+ end
216
+ end
217
+ end
218
+ else
219
+ error_body = JSON.parse(response.body) rescue response.body
220
+ raise "Anthropic API error (#{response.code}): #{error_body}"
221
+ end
222
+ end
223
+ end
224
+
225
+ def convert_messages_to_anthropic(messages)
226
+ messages.map do |message|
227
+ {
228
+ role: message[:role] == "assistant" ? "assistant" : "user",
229
+ content: message[:content]
230
+ }
231
+ end
232
+ end
233
+
234
+ def detect_image_type(image)
235
+ if image.is_a?(String)
236
+ if image.start_with?("data:image/")
237
+ image.split(";")[0].split(":")[1]
238
+ else
239
+ "image/png" # default
240
+ end
241
+ else
242
+ "image/png" # default for file objects
243
+ end
244
+ end
245
+
246
+ def extract_base64_data(image)
247
+ if image.is_a?(String) && image.include?("base64,")
248
+ image.split("base64,")[1]
249
+ else
250
+ # For file objects, read and encode
251
+ Base64.strict_encode64(image.read)
252
+ end
253
+ end
254
+ end
255
+ end
256
+ end
@@ -0,0 +1,60 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ module Providers
5
+ class Base
6
+ # Text-based AI operations
7
+ def chat!(messages:, model:, **opts)
8
+ raise NotImplementedError
9
+ end
10
+
11
+ def stream_chat!(messages:, model:, **opts, &on_token)
12
+ raise NotImplementedError
13
+ end
14
+
15
+ def embed!(texts:, model:, **opts)
16
+ raise NotImplementedError
17
+ end
18
+
19
+ # Image generation
20
+ def generate_image!(prompt:, model: "dall-e-3", size: "1024x1024", quality: "standard", **opts)
21
+ raise NotImplementedError
22
+ end
23
+
24
+ def edit_image!(image:, prompt:, mask: nil, size: "1024x1024", **opts)
25
+ raise NotImplementedError
26
+ end
27
+
28
+ def create_variation!(image:, size: "1024x1024", **opts)
29
+ raise NotImplementedError
30
+ end
31
+
32
+ # Video generation
33
+ def generate_video!(prompt:, model: "sora", duration: 5, **opts)
34
+ raise NotImplementedError
35
+ end
36
+
37
+ def edit_video!(video:, prompt:, **opts)
38
+ raise NotImplementedError
39
+ end
40
+
41
+ # Audio generation
42
+ def generate_speech!(text:, model: "tts-1", voice: "alloy", **opts)
43
+ raise NotImplementedError
44
+ end
45
+
46
+ def transcribe_audio!(audio:, model: "whisper-1", **opts)
47
+ raise NotImplementedError
48
+ end
49
+
50
+ # Multimodal operations
51
+ def analyze_image!(image:, prompt:, model: "gpt-4-vision-preview", **opts)
52
+ raise NotImplementedError
53
+ end
54
+
55
+ def analyze_video!(video:, prompt:, model: "gpt-4-vision-preview", **opts)
56
+ raise NotImplementedError
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RailsAi
4
+ module Providers
5
+ class DummyAdapter < Base
6
+ # Text-based operations
7
+ def chat!(messages:, model:, **opts) = "[dummy] #{messages.last[:content].to_s.reverse}"
8
+ def stream_chat!(messages:, model:, **opts, &on_token) = messages.last[:content].chars.each { |c| on_token.call(c) }
9
+ def embed!(texts:, model:, **opts) = texts.map { |t| [t.length.to_f] }
10
+
11
+ # Image generation
12
+ def generate_image!(prompt:, model:, **opts) = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
13
+ def edit_image!(image:, prompt:, **opts) = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
14
+ def create_variation!(image:, **opts) = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
15
+
16
+ # Video generation
17
+ def generate_video!(prompt:, model:, **opts) = "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ="
18
+ def edit_video!(video:, prompt:, **opts) = "data:video/mp4;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAAAB1tZGF0AQAAARxtYXNrAAAAAG1wNDEAAAAAIG1kYXQ="
19
+
20
+ # Audio generation
21
+ def generate_speech!(text:, model:, **opts) = "data:audio/mp3;base64,SUQzBAAAAAAAI1RTU0UAAAAPAAADTGF2ZjU4Ljc2LjEwMAAAAAAAAAAAAAAA//tQxAADB8AhSmAhIIEVWWWU"
22
+ def transcribe_audio!(audio:, model:, **opts) = "[dummy transcription] #{audio.class.name}"
23
+
24
+ # Multimodal analysis
25
+ def analyze_image!(image:, prompt:, model:, **opts) = "[dummy] Image analysis: #{prompt}"
26
+ def analyze_video!(video:, prompt:, model:, **opts) = "[dummy] Video analysis: #{prompt}"
27
+ end
28
+ end
29
+ end