rails_ai 0.2.4 → 0.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/rails_ai/version.rb +1 -1
- data/lib/rails_ai.rb +169 -391
- data/setup_wiki.sh +55 -0
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f3462b92ede383e82fc730daa4a03aab2e25c03afde4d269345744658b726d39
|
4
|
+
data.tar.gz: 4e0d4a89fe63816f4f91e1782fa03c6a4eec49db2f7820216bf6aaf6d659604b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4008bd2db4e5e0770c9847570e0db636dcdbb70a27adf6a4c941af33a2e63535b5f70fb341f93f1f12ca548e2e597b667bb4307851344c7a6497034f1c1f0f96
|
7
|
+
data.tar.gz: 1336af819a3971b20bf6570234ac73bf98796ae7a487cbe7a1b4d0b8a87a7f6e92c40b73478cd38b36487384cf6760df8ca38c245585b9f2d61a75f6f74b8efb
|
data/lib/rails_ai/version.rb
CHANGED
data/lib/rails_ai.rb
CHANGED
@@ -48,169 +48,58 @@ module RailsAi
|
|
48
48
|
class << self
|
49
49
|
# Version compatibility helpers
|
50
50
|
def rails_version
|
51
|
-
Rails.version.
|
51
|
+
Rails.version.to_f
|
52
52
|
end
|
53
53
|
|
54
|
-
def
|
55
|
-
|
54
|
+
def ruby_version
|
55
|
+
RUBY_VERSION.to_f
|
56
56
|
end
|
57
57
|
|
58
|
-
|
59
|
-
|
58
|
+
# Configuration
|
59
|
+
def config
|
60
|
+
@config ||= Config.new
|
60
61
|
end
|
61
62
|
|
62
|
-
def
|
63
|
-
|
63
|
+
def configure
|
64
|
+
yield(config)
|
64
65
|
end
|
65
66
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
case config.provider.to_sym
|
74
|
-
when :openai then Providers::SecureOpenAIAdapter.new
|
75
|
-
when :anthropic then Providers::SecureAnthropicAdapter.new
|
76
|
-
when :gemini then Providers::GeminiAdapter.new
|
77
|
-
when :dummy then Providers::DummyAdapter.new
|
78
|
-
else Providers::DummyAdapter.new
|
79
|
-
end
|
80
|
-
end
|
81
|
-
end
|
82
|
-
|
83
|
-
# Agent management
|
84
|
-
def agent_manager
|
85
|
-
@agent_manager ||= Agents::AgentManager.new
|
86
|
-
end
|
87
|
-
|
88
|
-
def create_agent(name:, role:, capabilities: [], **opts)
|
89
|
-
agent = Agents::BaseAgent.new(
|
90
|
-
name: name,
|
91
|
-
role: role,
|
92
|
-
capabilities: capabilities,
|
93
|
-
**opts
|
67
|
+
# Performance optimizations
|
68
|
+
def connection_pool
|
69
|
+
@connection_pool ||= Concurrent::ThreadPoolExecutor.new(
|
70
|
+
min_threads: 2,
|
71
|
+
max_threads: 10,
|
72
|
+
max_queue: 100,
|
73
|
+
auto_terminate: true
|
94
74
|
)
|
95
|
-
agent_manager.register_agent(agent)
|
96
|
-
agent
|
97
|
-
end
|
98
|
-
|
99
|
-
def create_research_agent(name: "ResearchAgent", **opts)
|
100
|
-
agent = Agents::ResearchAgent.new(name: name, **opts)
|
101
|
-
agent_manager.register_agent(agent)
|
102
|
-
agent
|
103
|
-
end
|
104
|
-
|
105
|
-
def create_creative_agent(name: "CreativeAgent", **opts)
|
106
|
-
agent = Agents::CreativeAgent.new(name: name, **opts)
|
107
|
-
agent_manager.register_agent(agent)
|
108
|
-
agent
|
109
75
|
end
|
110
76
|
|
111
|
-
def
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
agent = Agents::CoordinatorAgent.new(name: name, **opts)
|
119
|
-
agent_manager.register_agent(agent)
|
120
|
-
agent
|
121
|
-
end
|
122
|
-
|
123
|
-
def get_agent(name)
|
124
|
-
agent_manager.get_agent(name)
|
125
|
-
end
|
126
|
-
|
127
|
-
def list_agents
|
128
|
-
agent_manager.list_agents
|
129
|
-
end
|
130
|
-
|
131
|
-
def start_agents!
|
132
|
-
agent_manager.start!
|
133
|
-
end
|
134
|
-
|
135
|
-
def stop_agents!
|
136
|
-
agent_manager.stop!
|
137
|
-
end
|
138
|
-
|
139
|
-
# Task management
|
140
|
-
def submit_task(task)
|
141
|
-
agent_manager.submit_task(task)
|
142
|
-
end
|
143
|
-
|
144
|
-
def assign_task(task, agent_name)
|
145
|
-
agent_manager.assign_task_to_agent(task, agent_name)
|
146
|
-
end
|
147
|
-
|
148
|
-
def auto_assign_task(task)
|
149
|
-
agent_manager.auto_assign_task(task)
|
150
|
-
end
|
151
|
-
|
152
|
-
# Agent teams
|
153
|
-
def create_agent_team(name, agents, strategy: :round_robin)
|
154
|
-
agent_manager.create_agent_team(name, agents, collaboration_strategy: strategy)
|
155
|
-
end
|
156
|
-
|
157
|
-
def orchestrate_collaboration(task, agent_names)
|
158
|
-
agent_manager.orchestrate_collaboration(task, agent_names)
|
159
|
-
end
|
160
|
-
|
161
|
-
# Agent communication
|
162
|
-
def send_agent_message(from_agent, to_agent, message)
|
163
|
-
agent_manager.send_message(from_agent, to_agent, message)
|
164
|
-
end
|
165
|
-
|
166
|
-
def broadcast_agent_message(from_agent, message, exclude: [])
|
167
|
-
agent_manager.broadcast_message(from_agent, message, exclude: exclude)
|
77
|
+
def batch_processor
|
78
|
+
@batch_processor ||= Concurrent::ThreadPoolExecutor.new(
|
79
|
+
min_threads: 1,
|
80
|
+
max_threads: 5,
|
81
|
+
max_queue: 50,
|
82
|
+
auto_terminate: true
|
83
|
+
)
|
168
84
|
end
|
169
85
|
|
170
|
-
|
171
|
-
|
172
|
-
agent_manager.system_status
|
86
|
+
def smart_cache
|
87
|
+
@smart_cache ||= Cache.new
|
173
88
|
end
|
174
89
|
|
175
|
-
def
|
176
|
-
|
90
|
+
def request_deduplicator
|
91
|
+
@request_deduplicator ||= RequestDeduplicator.new
|
177
92
|
end
|
178
93
|
|
179
|
-
# Performance monitoring
|
180
94
|
def performance_monitor
|
181
|
-
@performance_monitor ||= Performance::
|
95
|
+
@performance_monitor ||= Performance::Monitor.new
|
182
96
|
end
|
183
97
|
|
184
|
-
def
|
185
|
-
|
186
|
-
end
|
187
|
-
|
188
|
-
# Connection pool for HTTP requests
|
189
|
-
def connection_pool
|
190
|
-
@connection_pool ||= Performance::ConnectionPool.new(size: config.connection_pool_size)
|
191
|
-
end
|
192
|
-
|
193
|
-
# Smart caching with compression
|
194
|
-
def smart_cache
|
195
|
-
@smart_cache ||= Performance::SmartCache.new(
|
196
|
-
compression_threshold: config.compression_threshold
|
197
|
-
)
|
198
|
-
end
|
199
|
-
|
200
|
-
# Request deduplication
|
201
|
-
def request_deduplicator
|
202
|
-
@request_deduplicator ||= Performance::RequestDeduplicator.new
|
203
|
-
end
|
204
|
-
|
205
|
-
# Batch processor for multiple operations
|
206
|
-
def batch_processor
|
207
|
-
@batch_processor ||= Performance::BatchProcessor.new(
|
208
|
-
batch_size: config.batch_size,
|
209
|
-
flush_interval: config.flush_interval
|
210
|
-
)
|
98
|
+
def agent_manager
|
99
|
+
@agent_manager ||= Agents::AgentManager.new
|
211
100
|
end
|
212
101
|
|
213
|
-
#
|
102
|
+
# Core AI methods
|
214
103
|
def chat(prompt_or_messages, model: config.default_model, **opts)
|
215
104
|
performance_monitor.measure(:chat) do
|
216
105
|
messages = normalize_messages(prompt_or_messages)
|
@@ -235,331 +124,220 @@ module RailsAi
|
|
235
124
|
end
|
236
125
|
end
|
237
126
|
|
238
|
-
def embed(texts, model:
|
127
|
+
def embed(texts, model: "text-embedding-3-small", **opts)
|
239
128
|
performance_monitor.measure(:embed) do
|
240
|
-
|
241
|
-
cache_key = [:embed, model,
|
129
|
+
texts_array = Array(texts)
|
130
|
+
cache_key = [:embed, model, texts_array.hash]
|
242
131
|
|
243
132
|
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
244
133
|
request_deduplicator.deduplicate(cache_key) do
|
245
|
-
provider.embed!(texts:
|
134
|
+
provider.embed!(texts: texts_array, model: model, **opts)
|
246
135
|
end
|
247
136
|
end
|
248
137
|
end
|
249
138
|
end
|
250
139
|
|
251
|
-
#
|
252
|
-
def
|
253
|
-
performance_monitor.measure(:
|
254
|
-
|
255
|
-
|
256
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
257
|
-
request_deduplicator.deduplicate(cache_key) do
|
258
|
-
provider.generate_image!(prompt: prompt, model: model, size: size, quality: quality, **opts)
|
259
|
-
end
|
260
|
-
end
|
140
|
+
# Multimodal capabilities
|
141
|
+
def analyze_image(image, prompt, model: "gpt-4o", **opts)
|
142
|
+
performance_monitor.measure(:analyze_image) do
|
143
|
+
provider.analyze_image!(image: image, prompt: prompt, model: model, **opts)
|
261
144
|
end
|
262
145
|
end
|
263
146
|
|
264
|
-
def
|
265
|
-
performance_monitor.measure(:
|
266
|
-
|
267
|
-
|
268
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
269
|
-
request_deduplicator.deduplicate(cache_key) do
|
270
|
-
provider.edit_image!(image: image, prompt: prompt, mask: mask, size: size, **opts)
|
271
|
-
end
|
272
|
-
end
|
147
|
+
def generate_image(prompt, model: "dall-e-3", **opts)
|
148
|
+
performance_monitor.measure(:generate_image) do
|
149
|
+
provider.generate_image!(prompt: prompt, model: model, **opts)
|
273
150
|
end
|
274
151
|
end
|
275
152
|
|
276
|
-
def
|
277
|
-
performance_monitor.measure(:
|
278
|
-
|
279
|
-
|
280
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
281
|
-
request_deduplicator.deduplicate(cache_key) do
|
282
|
-
provider.create_variation!(image: image, size: size, **opts)
|
283
|
-
end
|
284
|
-
end
|
153
|
+
def analyze_video(video, prompt, model: "gpt-4o", **opts)
|
154
|
+
performance_monitor.measure(:analyze_video) do
|
155
|
+
provider.analyze_video!(video: video, prompt: prompt, model: model, **opts)
|
285
156
|
end
|
286
157
|
end
|
287
158
|
|
288
|
-
|
289
|
-
def generate_video(prompt, model: "sora", duration: 5, **opts)
|
159
|
+
def generate_video(prompt, model: "runway-gen-3", **opts)
|
290
160
|
performance_monitor.measure(:generate_video) do
|
291
|
-
|
292
|
-
|
293
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
294
|
-
request_deduplicator.deduplicate(cache_key) do
|
295
|
-
provider.generate_video!(prompt: prompt, model: model, duration: duration, **opts)
|
296
|
-
end
|
297
|
-
end
|
161
|
+
provider.generate_video!(prompt: prompt, model: model, **opts)
|
298
162
|
end
|
299
163
|
end
|
300
164
|
|
301
|
-
def
|
302
|
-
performance_monitor.measure(:
|
303
|
-
|
304
|
-
|
305
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
306
|
-
request_deduplicator.deduplicate(cache_key) do
|
307
|
-
provider.edit_video!(video: video, prompt: prompt, **opts)
|
308
|
-
end
|
309
|
-
end
|
165
|
+
def analyze_audio(audio, prompt, model: "whisper-1", **opts)
|
166
|
+
performance_monitor.measure(:analyze_audio) do
|
167
|
+
provider.analyze_audio!(audio: audio, prompt: prompt, model: model, **opts)
|
310
168
|
end
|
311
169
|
end
|
312
170
|
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
cache_key = [:speech, model, text.hash, voice]
|
317
|
-
|
318
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
319
|
-
request_deduplicator.deduplicate(cache_key) do
|
320
|
-
provider.generate_speech!(text: text, model: model, voice: voice, **opts)
|
321
|
-
end
|
322
|
-
end
|
171
|
+
def generate_audio(prompt, model: "tts-1", **opts)
|
172
|
+
performance_monitor.measure(:generate_audio) do
|
173
|
+
provider.generate_audio!(prompt: prompt, model: model, **opts)
|
323
174
|
end
|
324
175
|
end
|
325
176
|
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
provider.transcribe_audio!(audio: audio, model: model, **opts)
|
333
|
-
end
|
334
|
-
end
|
177
|
+
# Context awareness
|
178
|
+
def chat_with_context(prompt, context_objects = [], model: config.default_model, **opts)
|
179
|
+
performance_monitor.measure(:chat_with_context) do
|
180
|
+
context_analyzer = ContextAnalyzer.new
|
181
|
+
enhanced_prompt = context_analyzer.enhance_prompt(prompt, context_objects)
|
182
|
+
chat(enhanced_prompt, model: model, **opts)
|
335
183
|
end
|
336
184
|
end
|
337
185
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
344
|
-
request_deduplicator.deduplicate(cache_key) do
|
345
|
-
provider.analyze_image!(image: image, prompt: prompt, model: model, **opts)
|
346
|
-
end
|
347
|
-
end
|
186
|
+
def analyze_window_context(url, referrer, user_agent, **opts)
|
187
|
+
performance_monitor.measure(:analyze_window_context) do
|
188
|
+
window_context = WindowContext.new(url: url, referrer: referrer, user_agent: user_agent)
|
189
|
+
window_context.analyze(**opts)
|
348
190
|
end
|
349
191
|
end
|
350
192
|
|
351
|
-
def
|
352
|
-
performance_monitor.measure(:
|
353
|
-
|
354
|
-
|
355
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
356
|
-
request_deduplicator.deduplicate(cache_key) do
|
357
|
-
provider.analyze_video!(video: video, prompt: prompt, model: model, **opts)
|
358
|
-
end
|
359
|
-
end
|
193
|
+
def analyze_image_context(image, **opts)
|
194
|
+
performance_monitor.measure(:analyze_image_context) do
|
195
|
+
image_context = ImageContext.new(image)
|
196
|
+
image_context.analyze(**opts)
|
360
197
|
end
|
361
198
|
end
|
362
199
|
|
363
|
-
#
|
364
|
-
def
|
365
|
-
|
366
|
-
requests.map do |request|
|
367
|
-
batch_processor.add_operation(-> { chat(request[:prompt], **request.except(:prompt)) })
|
368
|
-
end
|
369
|
-
end
|
200
|
+
# Agent system
|
201
|
+
def create_agent(name, type: :general, **opts)
|
202
|
+
agent_manager.create_agent(name, type: type, **opts)
|
370
203
|
end
|
371
204
|
|
372
|
-
def
|
373
|
-
|
374
|
-
texts_array.map do |texts|
|
375
|
-
batch_processor.add_operation(-> { embed(texts) })
|
376
|
-
end
|
377
|
-
end
|
205
|
+
def create_research_agent(name: "Research Agent", **opts)
|
206
|
+
agent_manager.create_agent(name, type: :research, **opts)
|
378
207
|
end
|
379
208
|
|
380
|
-
|
381
|
-
|
382
|
-
performance_monitor.measure(:analyze_image_with_context) do
|
383
|
-
cache_key = [:context_image_analysis, prompt.hash, image.hash, user_context.hash, window_context.hash, image_context.hash]
|
384
|
-
|
385
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
386
|
-
analyzer = ContextAnalyzer.new(
|
387
|
-
user_context: user_context,
|
388
|
-
window_context: window_context,
|
389
|
-
image_context: image_context
|
390
|
-
)
|
391
|
-
analyzer.analyze_with_context(image, prompt, **opts)
|
392
|
-
end
|
393
|
-
end
|
209
|
+
def create_creative_agent(name: "Creative Agent", **opts)
|
210
|
+
agent_manager.create_agent(name, type: :creative, **opts)
|
394
211
|
end
|
395
212
|
|
396
|
-
def
|
397
|
-
|
398
|
-
cache_key = [:context_generate, prompt.hash, user_context.hash, window_context.hash]
|
399
|
-
|
400
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
401
|
-
analyzer = ContextAnalyzer.new(
|
402
|
-
user_context: user_context,
|
403
|
-
window_context: window_context
|
404
|
-
)
|
405
|
-
analyzer.generate_with_context(prompt, **opts)
|
406
|
-
end
|
407
|
-
end
|
213
|
+
def create_technical_agent(name: "Technical Agent", **opts)
|
214
|
+
agent_manager.create_agent(name, type: :technical, **opts)
|
408
215
|
end
|
409
216
|
|
410
|
-
def
|
411
|
-
|
412
|
-
cache_key = [:context_image_generate, prompt.hash, user_context.hash, window_context.hash]
|
413
|
-
|
414
|
-
smart_cache.fetch(cache_key, expires_in: config.cache_ttl) do
|
415
|
-
analyzer = ContextAnalyzer.new(
|
416
|
-
user_context: user_context,
|
417
|
-
window_context: window_context
|
418
|
-
)
|
419
|
-
analyzer.generate_image_with_context(prompt, **opts)
|
420
|
-
end
|
421
|
-
end
|
217
|
+
def create_agent_team(name, agents, strategy: :round_robin, **opts)
|
218
|
+
agent_manager.create_agent_team(name, agents, collaboration_strategy: strategy, **opts)
|
422
219
|
end
|
423
220
|
|
424
|
-
|
425
|
-
|
426
|
-
|
221
|
+
def submit_task(task, agent_team = nil)
|
222
|
+
if agent_team
|
223
|
+
agent_team.assign_task(task)
|
224
|
+
else
|
225
|
+
agent_manager.submit_task(task)
|
226
|
+
end
|
427
227
|
end
|
428
228
|
|
429
|
-
|
430
|
-
|
229
|
+
# Provider management
|
230
|
+
def provider
|
231
|
+
case config.provider.to_sym
|
232
|
+
when :openai then Providers::SecureOpenAIAdapter.new
|
233
|
+
when :anthropic then Providers::SecureAnthropicAdapter.new
|
234
|
+
when :gemini then Providers::GeminiAdapter.new
|
235
|
+
when :dummy then Providers::DummyAdapter.new
|
236
|
+
else Providers::DummyAdapter.new
|
237
|
+
end
|
431
238
|
end
|
432
239
|
|
433
|
-
def
|
434
|
-
|
240
|
+
def provider=(new_provider)
|
241
|
+
config.provider = new_provider
|
435
242
|
end
|
436
243
|
|
437
|
-
|
438
|
-
|
244
|
+
# Utility methods
|
245
|
+
def normalize_messages(prompt_or_messages)
|
246
|
+
if prompt_or_messages.is_a?(String)
|
247
|
+
[{ role: "user", content: prompt_or_messages }]
|
248
|
+
else
|
249
|
+
prompt_or_messages
|
250
|
+
end
|
439
251
|
end
|
440
252
|
|
441
|
-
def
|
442
|
-
|
253
|
+
def redact_sensitive_data(text)
|
254
|
+
Redactor.call(text)
|
443
255
|
end
|
444
256
|
|
445
|
-
def
|
446
|
-
|
257
|
+
def log_event(kind:, name:, payload: {}, latency_ms: nil)
|
258
|
+
Events.log!(kind: kind, name: name, payload: payload, latency_ms: latency_ms)
|
447
259
|
end
|
448
260
|
|
449
|
-
#
|
450
|
-
def
|
451
|
-
|
452
|
-
provider
|
453
|
-
connection_pool
|
454
|
-
smart_cache
|
455
|
-
request_deduplicator
|
456
|
-
batch_processor
|
457
|
-
agent_manager
|
261
|
+
# Security methods
|
262
|
+
def validate_input(input, type: :text)
|
263
|
+
Security::InputValidator.validate(input, type: type)
|
458
264
|
end
|
459
265
|
|
460
|
-
def
|
461
|
-
|
266
|
+
def sanitize_content(content)
|
267
|
+
Security::ContentSanitizer.sanitize(content)
|
462
268
|
end
|
463
269
|
|
464
|
-
def
|
465
|
-
|
270
|
+
def check_rate_limit(identifier, limit: 100, window: 1.hour)
|
271
|
+
Security::RateLimiter.check(identifier, limit: limit, window: window)
|
466
272
|
end
|
467
273
|
|
468
|
-
|
469
|
-
|
470
|
-
def normalize_messages(prompt_or_messages)
|
471
|
-
messages = prompt_or_messages.is_a?(Array) ? prompt_or_messages : [{role: "user", content: prompt_or_messages}]
|
472
|
-
text = RailsAi::Redactor.call(messages.last[:content])
|
473
|
-
messages[-1] = messages.last.merge(content: text)
|
474
|
-
messages
|
274
|
+
def scan_for_vulnerabilities
|
275
|
+
Security::VulnerabilityScanner.scan
|
475
276
|
end
|
476
|
-
end
|
477
|
-
|
478
|
-
# Security methods
|
479
|
-
def self.validate_input(input, type: :text)
|
480
|
-
Security::InputValidator.send("validate_#{type}_input", input)
|
481
|
-
end
|
482
|
-
|
483
|
-
def self.sanitize_content(content)
|
484
|
-
Security::ContentSanitizer.sanitize_content(content)
|
485
|
-
end
|
486
277
|
|
487
|
-
|
488
|
-
|
489
|
-
|
278
|
+
def handle_security_error(error, context = {})
|
279
|
+
Security::ErrorHandler.handle_security_error(error, context)
|
280
|
+
end
|
490
281
|
|
491
|
-
|
492
|
-
|
493
|
-
|
282
|
+
# Response cleaning utility
|
283
|
+
def clean_response(raw_response)
|
284
|
+
return nil if raw_response.nil?
|
494
285
|
|
495
|
-
|
496
|
-
|
497
|
-
|
286
|
+
# Convert to string
|
287
|
+
response = raw_response.to_s
|
288
|
+
|
289
|
+
# Ensure UTF-8 encoding
|
290
|
+
response = response.encode('UTF-8', 'UTF-8', invalid: :replace, undef: :replace, replace: '?')
|
291
|
+
|
292
|
+
# Remove any control characters that might cause issues
|
293
|
+
response = response.gsub(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/, '')
|
294
|
+
|
295
|
+
response
|
296
|
+
end
|
498
297
|
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
298
|
+
# Enhanced chat method with automatic response cleaning
|
299
|
+
def chat_clean(prompt_or_messages, model: config.default_model, **opts)
|
300
|
+
raw_response = chat(prompt_or_messages, model: model, **opts)
|
301
|
+
clean_response(raw_response)
|
302
|
+
end
|
503
303
|
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
# Check if the prompt needs web search
|
509
|
-
web_keywords = ['current', 'latest', 'today', 'now', 'recent', 'weather', 'news', 'stock', 'price']
|
510
|
-
needs_web_search = web_keywords.any? { |keyword| prompt.downcase.include?(keyword) }
|
511
|
-
|
512
|
-
if needs_web_search
|
513
|
-
begin
|
514
|
-
# Perform web search
|
515
|
-
search_results = WebSearch.search(prompt, num_results: 3)
|
516
|
-
|
517
|
-
# Enhance the prompt with web results
|
518
|
-
web_context = "\n\nRecent web search results:\n"
|
519
|
-
search_results.each_with_index do |result, index|
|
520
|
-
web_context += "#{index + 1}. #{result[:title]}\n #{result[:snippet]}\n Source: #{result[:link]}\n\n"
|
521
|
-
end
|
522
|
-
|
523
|
-
enhanced_prompt = "#{prompt}\n\nPlease use the following web search results to provide current, up-to-date information:#{web_context}"
|
524
|
-
|
525
|
-
# Get AI response with web context
|
526
|
-
chat(enhanced_prompt, model: model, **opts)
|
527
|
-
rescue WebSearch::SearchError => e
|
528
|
-
# Fallback to regular chat if web search fails
|
529
|
-
chat(prompt, model: model, **opts)
|
530
|
-
end
|
531
|
-
else
|
532
|
-
# Regular chat for non-time-sensitive queries
|
533
|
-
chat(prompt, model: model, **opts)
|
304
|
+
# Enhanced web search chat with automatic response cleaning
|
305
|
+
def chat_with_web_search_clean(prompt, model: config.default_model, **opts)
|
306
|
+
raw_response = chat_with_web_search(prompt, model: model, **opts)
|
307
|
+
clean_response(raw_response)
|
534
308
|
end
|
535
309
|
end
|
536
310
|
end
|
537
311
|
|
538
|
-
|
539
|
-
def self.clean_response(raw_response)
|
540
|
-
return nil if raw_response.nil?
|
541
|
-
|
542
|
-
# Convert to string
|
543
|
-
response = raw_response.to_s
|
544
|
-
|
545
|
-
# Ensure UTF-8 encoding
|
546
|
-
response = response.encode('UTF-8', 'UTF-8', invalid: :replace, undef: :replace, replace: '?')
|
547
|
-
|
548
|
-
# Remove any control characters that might cause issues
|
549
|
-
response = response.gsub(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/, '')
|
550
|
-
|
551
|
-
response
|
552
|
-
end
|
553
|
-
|
554
|
-
# Enhanced chat method with automatic response cleaning
|
555
|
-
def self.chat_clean(prompt_or_messages, model: config.default_model, **opts)
|
556
|
-
raw_response = chat(prompt_or_messages, model: model, **opts)
|
557
|
-
clean_response(raw_response)
|
558
|
-
end
|
312
|
+
require_relative "rails_ai/web_search"
|
559
313
|
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
314
|
+
# Web-enhanced chat with real-time information
|
315
|
+
def RailsAi.chat_with_web_search(prompt, model: RailsAi.config.default_model, **opts)
|
316
|
+
# Check if the prompt needs web search
|
317
|
+
web_keywords = ['current', 'latest', 'today', 'now', 'recent', 'weather', 'news', 'stock', 'price']
|
318
|
+
needs_web_search = web_keywords.any? { |keyword| prompt.downcase.include?(keyword) }
|
319
|
+
|
320
|
+
if needs_web_search
|
321
|
+
begin
|
322
|
+
# Perform web search
|
323
|
+
search_results = WebSearch.search(prompt, num_results: 3)
|
324
|
+
|
325
|
+
# Enhance the prompt with web results
|
326
|
+
web_context = "\n\nRecent web search results:\n"
|
327
|
+
search_results.each_with_index do |result, index|
|
328
|
+
web_context += "#{index + 1}. #{result[:title]}\n #{result[:snippet]}\n Source: #{result[:link]}\n\n"
|
329
|
+
end
|
330
|
+
|
331
|
+
enhanced_prompt = "#{prompt}\n\nPlease use the following web search results to provide current, up-to-date information:#{web_context}"
|
332
|
+
|
333
|
+
# Get AI response with web context
|
334
|
+
RailsAi.chat(enhanced_prompt, model: model, **opts)
|
335
|
+
rescue WebSearch::SearchError => e
|
336
|
+
# Fallback to regular chat if web search fails
|
337
|
+
RailsAi.chat(prompt, model: model, **opts)
|
338
|
+
end
|
339
|
+
else
|
340
|
+
# Regular chat for non-time-sensitive queries
|
341
|
+
RailsAi.chat(prompt, model: model, **opts)
|
564
342
|
end
|
565
343
|
end
|
data/setup_wiki.sh
ADDED
@@ -0,0 +1,55 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
|
3
|
+
# Setup GitHub Wiki for Rails AI Gem
|
4
|
+
echo "Setting up GitHub Wiki for Rails AI Gem..."
|
5
|
+
|
6
|
+
# Check if we're in the right directory
|
7
|
+
if [ ! -d "wiki" ]; then
|
8
|
+
echo "Error: wiki directory not found. Please run this script from the rails_ai root directory."
|
9
|
+
exit 1
|
10
|
+
fi
|
11
|
+
|
12
|
+
# Clone the wiki repository
|
13
|
+
echo "Cloning wiki repository..."
|
14
|
+
git clone https://github.com/DanielAmah/rails_ai.wiki.git wiki_repo
|
15
|
+
|
16
|
+
if [ $? -ne 0 ]; then
|
17
|
+
echo "Error: Could not clone wiki repository. Make sure the wiki is enabled on GitHub."
|
18
|
+
echo "To enable wiki:"
|
19
|
+
echo "1. Go to https://github.com/DanielAmah/rails_ai"
|
20
|
+
echo "2. Click on 'Settings' tab"
|
21
|
+
echo "3. Scroll down to 'Features' section"
|
22
|
+
echo "4. Check 'Wikis' to enable it"
|
23
|
+
echo "5. Run this script again"
|
24
|
+
exit 1
|
25
|
+
fi
|
26
|
+
|
27
|
+
# Copy wiki files
|
28
|
+
echo "Copying wiki files..."
|
29
|
+
cp wiki/*.md wiki_repo/
|
30
|
+
|
31
|
+
# Navigate to wiki repo
|
32
|
+
cd wiki_repo
|
33
|
+
|
34
|
+
# Add and commit files
|
35
|
+
echo "Adding wiki files to git..."
|
36
|
+
git add .
|
37
|
+
git commit -m "Add comprehensive wiki documentation for Rails AI Gem"
|
38
|
+
|
39
|
+
# Push to GitHub
|
40
|
+
echo "Pushing wiki to GitHub..."
|
41
|
+
git push origin master
|
42
|
+
|
43
|
+
if [ $? -eq 0 ]; then
|
44
|
+
echo "✅ Wiki successfully set up!"
|
45
|
+
echo "You can now view it at: https://github.com/DanielAmah/rails_ai/wiki"
|
46
|
+
else
|
47
|
+
echo "❌ Error pushing wiki to GitHub"
|
48
|
+
exit 1
|
49
|
+
fi
|
50
|
+
|
51
|
+
# Clean up
|
52
|
+
cd ..
|
53
|
+
rm -rf wiki_repo
|
54
|
+
|
55
|
+
echo "Wiki setup complete!"
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rails_ai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.2.
|
4
|
+
version: 0.2.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Amah
|
@@ -349,6 +349,7 @@ files:
|
|
349
349
|
- rails_ai.gemspec
|
350
350
|
- scripts/security_scanner.rb
|
351
351
|
- setup_monitoring.sh
|
352
|
+
- setup_wiki.sh
|
352
353
|
- wiki/API-Documentation.md
|
353
354
|
- wiki/Architecture-Overview.md
|
354
355
|
- wiki/Contributing-Guide.md
|