aia 0.9.15 → 0.9.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.version +1 -1
- data/CHANGELOG.md +62 -0
- data/README.md +77 -0
- data/docs/faq.md +83 -1
- data/docs/guides/local-models.md +304 -0
- data/docs/guides/models.md +157 -0
- data/lib/aia/chat_processor_service.rb +20 -5
- data/lib/aia/directives/models.rb +135 -5
- data/lib/aia/ruby_llm_adapter.rb +174 -19
- data/lib/aia/session.rb +27 -16
- data/lib/extensions/ruby_llm/provider_fix.rb +34 -0
- data/mkdocs.yml +1 -0
- metadata +31 -1
data/lib/aia/ruby_llm_adapter.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# lib/aia/ruby_llm_adapter.rb
|
2
2
|
|
3
3
|
require 'async'
|
4
|
+
require_relative '../extensions/ruby_llm/provider_fix'
|
4
5
|
|
5
6
|
module AIA
|
6
7
|
class RubyLLMAdapter
|
@@ -42,6 +43,8 @@ module AIA
|
|
42
43
|
|
43
44
|
# --- Custom OpenAI Endpoint ---
|
44
45
|
# Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
|
46
|
+
# For osaurus: Use model name prefix "osaurus/" and set OSAURUS_API_BASE env var
|
47
|
+
# For LM Studio: Use model name prefix "lms/" and set LMS_API_BASE env var
|
45
48
|
config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
|
46
49
|
|
47
50
|
# --- Default Models ---
|
@@ -83,7 +86,35 @@ module AIA
|
|
83
86
|
|
84
87
|
@models.each do |model_name|
|
85
88
|
begin
|
86
|
-
|
89
|
+
# Check if this is a local provider model and handle it specially
|
90
|
+
if model_name.start_with?('ollama/')
|
91
|
+
# For Ollama models, extract the actual model name and use assume_model_exists
|
92
|
+
actual_model = model_name.sub('ollama/', '')
|
93
|
+
chat = RubyLLM.chat(model: actual_model, provider: 'ollama', assume_model_exists: true)
|
94
|
+
elsif model_name.start_with?('osaurus/')
|
95
|
+
# For Osaurus models (OpenAI-compatible), create a custom context with the right API base
|
96
|
+
actual_model = model_name.sub('osaurus/', '')
|
97
|
+
custom_config = RubyLLM.config.dup
|
98
|
+
custom_config.openai_api_base = ENV.fetch('OSAURUS_API_BASE', 'http://localhost:11434/v1')
|
99
|
+
custom_config.openai_api_key = 'dummy' # Local servers don't need a real API key
|
100
|
+
context = RubyLLM::Context.new(custom_config)
|
101
|
+
chat = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
|
102
|
+
elsif model_name.start_with?('lms/')
|
103
|
+
# For LM Studio models (OpenAI-compatible), create a custom context with the right API base
|
104
|
+
actual_model = model_name.sub('lms/', '')
|
105
|
+
lms_api_base = ENV.fetch('LMS_API_BASE', 'http://localhost:1234/v1')
|
106
|
+
|
107
|
+
# Validate model exists in LM Studio
|
108
|
+
validate_lms_model!(actual_model, lms_api_base)
|
109
|
+
|
110
|
+
custom_config = RubyLLM.config.dup
|
111
|
+
custom_config.openai_api_base = lms_api_base
|
112
|
+
custom_config.openai_api_key = 'dummy' # Local servers don't need a real API key
|
113
|
+
context = RubyLLM::Context.new(custom_config)
|
114
|
+
chat = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
|
115
|
+
else
|
116
|
+
chat = RubyLLM.chat(model: model_name)
|
117
|
+
end
|
87
118
|
valid_chats[model_name] = chat
|
88
119
|
rescue StandardError => e
|
89
120
|
failed_models << "#{model_name}: #{e.message}"
|
@@ -212,33 +243,55 @@ module AIA
|
|
212
243
|
|
213
244
|
|
214
245
|
def chat(prompt)
|
215
|
-
|
246
|
+
puts "[DEBUG RubyLLMAdapter.chat] Received prompt class: #{prompt.class}" if AIA.config.debug
|
247
|
+
puts "[DEBUG RubyLLMAdapter.chat] Prompt inspect: #{prompt.inspect[0..500]}..." if AIA.config.debug
|
248
|
+
puts "[DEBUG RubyLLMAdapter.chat] Models: #{@models.inspect}" if AIA.config.debug
|
249
|
+
|
250
|
+
result = if @models.size == 1
|
216
251
|
# Single model - use the original behavior
|
217
252
|
single_model_chat(prompt, @models.first)
|
218
253
|
else
|
219
254
|
# Multiple models - use concurrent processing
|
220
255
|
multi_model_chat(prompt)
|
221
256
|
end
|
257
|
+
|
258
|
+
puts "[DEBUG RubyLLMAdapter.chat] Returning result class: #{result.class}" if AIA.config.debug
|
259
|
+
puts "[DEBUG RubyLLMAdapter.chat] Result inspect: #{result.inspect[0..500]}..." if AIA.config.debug
|
260
|
+
result
|
222
261
|
end
|
223
262
|
|
224
263
|
def single_model_chat(prompt, model_name)
|
264
|
+
puts "[DEBUG single_model_chat] Model name: #{model_name}" if AIA.config.debug
|
225
265
|
chat_instance = @chats[model_name]
|
266
|
+
puts "[DEBUG single_model_chat] Chat instance: #{chat_instance.class}" if AIA.config.debug
|
267
|
+
|
226
268
|
modes = chat_instance.model.modalities
|
269
|
+
puts "[DEBUG single_model_chat] Modalities: #{modes.inspect}" if AIA.config.debug
|
227
270
|
|
228
271
|
# TODO: Need to consider how to handle multi-mode models
|
229
|
-
if modes.text_to_text?
|
272
|
+
result = if modes.text_to_text?
|
273
|
+
puts "[DEBUG single_model_chat] Using text_to_text_single" if AIA.config.debug
|
230
274
|
text_to_text_single(prompt, model_name)
|
231
275
|
elsif modes.image_to_text?
|
276
|
+
puts "[DEBUG single_model_chat] Using image_to_text_single" if AIA.config.debug
|
232
277
|
image_to_text_single(prompt, model_name)
|
233
278
|
elsif modes.text_to_image?
|
279
|
+
puts "[DEBUG single_model_chat] Using text_to_image_single" if AIA.config.debug
|
234
280
|
text_to_image_single(prompt, model_name)
|
235
281
|
elsif modes.text_to_audio?
|
282
|
+
puts "[DEBUG single_model_chat] Using text_to_audio_single" if AIA.config.debug
|
236
283
|
text_to_audio_single(prompt, model_name)
|
237
284
|
elsif modes.audio_to_text?
|
285
|
+
puts "[DEBUG single_model_chat] Using audio_to_text_single" if AIA.config.debug
|
238
286
|
audio_to_text_single(prompt, model_name)
|
239
287
|
else
|
288
|
+
puts "[DEBUG single_model_chat] No matching modality!" if AIA.config.debug
|
240
289
|
# TODO: what else can be done?
|
290
|
+
"Error: No matching modality for model #{model_name}"
|
241
291
|
end
|
292
|
+
|
293
|
+
puts "[DEBUG single_model_chat] Result class: #{result.class}" if AIA.config.debug
|
294
|
+
result
|
242
295
|
end
|
243
296
|
|
244
297
|
def multi_model_chat(prompt)
|
@@ -263,7 +316,7 @@ module AIA
|
|
263
316
|
|
264
317
|
def format_multi_model_results(results)
|
265
318
|
use_consensus = should_use_consensus_mode?
|
266
|
-
|
319
|
+
|
267
320
|
if use_consensus
|
268
321
|
# Generate consensus response using primary model
|
269
322
|
generate_consensus_response(results)
|
@@ -288,7 +341,7 @@ module AIA
|
|
288
341
|
begin
|
289
342
|
# Have the primary model generate the consensus
|
290
343
|
consensus_result = primary_chat.ask(consensus_prompt).content
|
291
|
-
|
344
|
+
|
292
345
|
# Format the consensus response
|
293
346
|
"from: #{primary_model} (consensus)\n#{consensus_result}"
|
294
347
|
rescue StandardError => e
|
@@ -329,7 +382,7 @@ module AIA
|
|
329
382
|
def format_individual_responses(results)
|
330
383
|
# For metrics support, return a special structure if all results have token info
|
331
384
|
has_metrics = results.values.all? { |r| r.respond_to?(:input_tokens) && r.respond_to?(:output_tokens) }
|
332
|
-
|
385
|
+
|
333
386
|
if has_metrics && AIA.config.show_metrics
|
334
387
|
# Return structured data that preserves metrics for multi-model
|
335
388
|
format_multi_model_with_metrics(results)
|
@@ -350,17 +403,17 @@ module AIA
|
|
350
403
|
output.join("\n")
|
351
404
|
end
|
352
405
|
end
|
353
|
-
|
406
|
+
|
354
407
|
def format_multi_model_with_metrics(results)
|
355
408
|
# Create a composite response that includes all model responses and metrics
|
356
409
|
formatted_content = []
|
357
410
|
metrics_data = []
|
358
|
-
|
411
|
+
|
359
412
|
results.each do |model_name, result|
|
360
413
|
formatted_content << "from: #{model_name}"
|
361
414
|
formatted_content << result.content
|
362
415
|
formatted_content << ""
|
363
|
-
|
416
|
+
|
364
417
|
# Collect metrics for each model
|
365
418
|
metrics_data << {
|
366
419
|
model_id: model_name,
|
@@ -368,20 +421,20 @@ module AIA
|
|
368
421
|
output_tokens: result.output_tokens
|
369
422
|
}
|
370
423
|
end
|
371
|
-
|
424
|
+
|
372
425
|
# Return a special MultiModelResponse that ChatProcessorService can handle
|
373
426
|
MultiModelResponse.new(formatted_content.join("\n"), metrics_data)
|
374
427
|
end
|
375
|
-
|
428
|
+
|
376
429
|
# Helper class to carry multi-model response with metrics
|
377
430
|
class MultiModelResponse
|
378
431
|
attr_reader :content, :metrics_list
|
379
|
-
|
432
|
+
|
380
433
|
def initialize(content, metrics_list)
|
381
434
|
@content = content
|
382
435
|
@metrics_list = metrics_list
|
383
436
|
end
|
384
|
-
|
437
|
+
|
385
438
|
def multi_model?
|
386
439
|
true
|
387
440
|
end
|
@@ -415,7 +468,7 @@ module AIA
|
|
415
468
|
|
416
469
|
|
417
470
|
# Clear the chat context/history
|
418
|
-
# Needed for the //clear
|
471
|
+
# Needed for the //clear and //restore directives
|
419
472
|
def clear_context
|
420
473
|
@chats.each do |model_name, chat|
|
421
474
|
# Option 1: Directly clear the messages array in the current chat object
|
@@ -430,16 +483,65 @@ module AIA
|
|
430
483
|
# This ensures any shared state is reset
|
431
484
|
RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
|
432
485
|
|
433
|
-
# Option 3:
|
434
|
-
|
486
|
+
# Option 3: Try to create fresh chat instances, but don't exit on failure
|
487
|
+
# This is safer for use in directives like //restore
|
488
|
+
old_chats = @chats
|
489
|
+
@chats = {} # First clear the chats hash
|
435
490
|
|
436
491
|
begin
|
437
492
|
@models.each do |model_name|
|
438
|
-
|
493
|
+
# Try to recreate each chat, but if it fails, keep the old one
|
494
|
+
begin
|
495
|
+
# Check if this is a local provider model and handle it specially
|
496
|
+
if model_name.start_with?('ollama/')
|
497
|
+
actual_model = model_name.sub('ollama/', '')
|
498
|
+
@chats[model_name] = RubyLLM.chat(model: actual_model, provider: 'ollama', assume_model_exists: true)
|
499
|
+
elsif model_name.start_with?('osaurus/')
|
500
|
+
actual_model = model_name.sub('osaurus/', '')
|
501
|
+
custom_config = RubyLLM.config.dup
|
502
|
+
custom_config.openai_api_base = ENV.fetch('OSAURUS_API_BASE', 'http://localhost:11434/v1')
|
503
|
+
custom_config.openai_api_key = 'dummy'
|
504
|
+
context = RubyLLM::Context.new(custom_config)
|
505
|
+
@chats[model_name] = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
|
506
|
+
elsif model_name.start_with?('lms/')
|
507
|
+
actual_model = model_name.sub('lms/', '')
|
508
|
+
lms_api_base = ENV.fetch('LMS_API_BASE', 'http://localhost:1234/v1')
|
509
|
+
|
510
|
+
# Validate model exists in LM Studio
|
511
|
+
validate_lms_model!(actual_model, lms_api_base)
|
512
|
+
|
513
|
+
custom_config = RubyLLM.config.dup
|
514
|
+
custom_config.openai_api_base = lms_api_base
|
515
|
+
custom_config.openai_api_key = 'dummy'
|
516
|
+
context = RubyLLM::Context.new(custom_config)
|
517
|
+
@chats[model_name] = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
|
518
|
+
else
|
519
|
+
@chats[model_name] = RubyLLM.chat(model: model_name)
|
520
|
+
end
|
521
|
+
|
522
|
+
# Re-add tools if they were previously loaded
|
523
|
+
if @tools && !@tools.empty? && @chats[model_name].model&.supports_functions?
|
524
|
+
@chats[model_name].with_tools(*@tools)
|
525
|
+
end
|
526
|
+
rescue StandardError => e
|
527
|
+
# If we can't create a new chat, keep the old one but clear its context
|
528
|
+
warn "Warning: Could not recreate chat for #{model_name}: #{e.message}. Keeping existing instance."
|
529
|
+
@chats[model_name] = old_chats[model_name]
|
530
|
+
# Clear the old chat's messages if possible
|
531
|
+
if @chats[model_name] && @chats[model_name].instance_variable_defined?(:@messages)
|
532
|
+
@chats[model_name].instance_variable_set(:@messages, [])
|
533
|
+
end
|
534
|
+
end
|
439
535
|
end
|
440
536
|
rescue StandardError => e
|
441
|
-
|
442
|
-
|
537
|
+
# If something went terribly wrong, restore the old chats but clear their contexts
|
538
|
+
warn "Warning: Error during context clearing: #{e.message}. Attempting to recover."
|
539
|
+
@chats = old_chats
|
540
|
+
@chats.each_value do |chat|
|
541
|
+
if chat.instance_variable_defined?(:@messages)
|
542
|
+
chat.instance_variable_set(:@messages, [])
|
543
|
+
end
|
544
|
+
end
|
443
545
|
end
|
444
546
|
|
445
547
|
# Option 4: Call official clear_history method if it exists
|
@@ -498,6 +600,44 @@ module AIA
|
|
498
600
|
end
|
499
601
|
|
500
602
|
|
603
|
+
def validate_lms_model!(model_name, api_base)
|
604
|
+
require 'net/http'
|
605
|
+
require 'json'
|
606
|
+
|
607
|
+
# Build the /v1/models endpoint URL
|
608
|
+
uri = URI("#{api_base.gsub(%r{/v1/?$}, '')}/v1/models")
|
609
|
+
|
610
|
+
begin
|
611
|
+
response = Net::HTTP.get_response(uri)
|
612
|
+
|
613
|
+
unless response.is_a?(Net::HTTPSuccess)
|
614
|
+
raise "Cannot connect to LM Studio at #{api_base}. Is LM Studio running?"
|
615
|
+
end
|
616
|
+
|
617
|
+
data = JSON.parse(response.body)
|
618
|
+
available_models = data['data']&.map { |m| m['id'] } || []
|
619
|
+
|
620
|
+
unless available_models.include?(model_name)
|
621
|
+
error_msg = "❌ '#{model_name}' is not a valid LM Studio model.\n\n"
|
622
|
+
if available_models.empty?
|
623
|
+
error_msg += "No models are currently loaded in LM Studio.\n"
|
624
|
+
error_msg += "Please load a model in LM Studio first."
|
625
|
+
else
|
626
|
+
error_msg += "Available LM Studio models:\n"
|
627
|
+
available_models.each { |m| error_msg += " - lms/#{m}\n" }
|
628
|
+
end
|
629
|
+
raise error_msg
|
630
|
+
end
|
631
|
+
rescue JSON::ParserError => e
|
632
|
+
raise "Invalid response from LM Studio at #{api_base}: #{e.message}"
|
633
|
+
rescue StandardError => e
|
634
|
+
# Re-raise our custom error messages, wrap others
|
635
|
+
raise if e.message.start_with?('❌')
|
636
|
+
raise "Error connecting to LM Studio: #{e.message}"
|
637
|
+
end
|
638
|
+
end
|
639
|
+
|
640
|
+
|
501
641
|
def extract_models_config
|
502
642
|
models_config = AIA.config.model
|
503
643
|
|
@@ -531,15 +671,30 @@ module AIA
|
|
531
671
|
def text_to_text_single(prompt, model_name)
|
532
672
|
chat_instance = @chats[model_name]
|
533
673
|
text_prompt = extract_text_prompt(prompt)
|
674
|
+
|
675
|
+
puts "[DEBUG RubyLLMAdapter] Sending to model #{model_name}: #{text_prompt[0..100]}..." if AIA.config.debug
|
676
|
+
|
534
677
|
response = if AIA.config.context_files.empty?
|
535
678
|
chat_instance.ask(text_prompt)
|
536
679
|
else
|
537
680
|
chat_instance.ask(text_prompt, with: AIA.config.context_files)
|
538
681
|
end
|
539
682
|
|
683
|
+
# Debug output to understand the response structure
|
684
|
+
puts "[DEBUG RubyLLMAdapter] Response class: #{response.class}" if AIA.config.debug
|
685
|
+
puts "[DEBUG RubyLLMAdapter] Response inspect: #{response.inspect[0..500]}..." if AIA.config.debug
|
686
|
+
|
687
|
+
if response.respond_to?(:content)
|
688
|
+
puts "[DEBUG RubyLLMAdapter] Response content: #{response.content[0..200]}..." if AIA.config.debug
|
689
|
+
else
|
690
|
+
puts "[DEBUG RubyLLMAdapter] Response (no content method): #{response.to_s[0..200]}..." if AIA.config.debug
|
691
|
+
end
|
692
|
+
|
540
693
|
# Return the full response object to preserve token information
|
541
694
|
response
|
542
695
|
rescue StandardError => e
|
696
|
+
puts "[DEBUG RubyLLMAdapter] Error in text_to_text_single: #{e.class} - #{e.message}" if AIA.config.debug
|
697
|
+
puts "[DEBUG RubyLLMAdapter] Backtrace: #{e.backtrace[0..5].join("\n")}" if AIA.config.debug
|
543
698
|
e.message
|
544
699
|
end
|
545
700
|
|
data/lib/aia/session.rb
CHANGED
@@ -418,23 +418,23 @@ module AIA
|
|
418
418
|
|
419
419
|
def handle_clear_directive
|
420
420
|
# The directive processor has called context_manager.clear_context
|
421
|
-
# but we need
|
421
|
+
# but we need to also clear the LLM client's context
|
422
422
|
|
423
423
|
# First, clear the context manager's context
|
424
424
|
@context_manager.clear_context(keep_system_prompt: true)
|
425
425
|
|
426
426
|
# Second, try clearing the client's context
|
427
427
|
if AIA.config.client && AIA.config.client.respond_to?(:clear_context)
|
428
|
-
|
428
|
+
begin
|
429
|
+
AIA.config.client.clear_context
|
430
|
+
rescue => e
|
431
|
+
STDERR.puts "Warning: Error clearing client context: #{e.message}"
|
432
|
+
# Continue anyway - the context manager has been cleared which is the main goal
|
433
|
+
end
|
429
434
|
end
|
430
435
|
|
431
|
-
#
|
432
|
-
#
|
433
|
-
begin
|
434
|
-
AIA.config.client = AIA::RubyLLMAdapter.new
|
435
|
-
rescue => e
|
436
|
-
STDERR.puts "Error reinitializing client: #{e.message}"
|
437
|
-
end
|
436
|
+
# Note: We intentionally do NOT reinitialize the client here
|
437
|
+
# as that could cause termination if model initialization fails
|
438
438
|
|
439
439
|
@ui_presenter.display_info("Chat context cleared.")
|
440
440
|
nil
|
@@ -448,16 +448,27 @@ module AIA
|
|
448
448
|
def handle_restore_directive(directive_output)
|
449
449
|
# If the restore was successful, we also need to refresh the client's context
|
450
450
|
if directive_output.start_with?("Context restored")
|
451
|
-
#
|
451
|
+
# Clear the client's context without reinitializing the entire adapter
|
452
|
+
# This avoids the risk of exiting if model initialization fails
|
452
453
|
if AIA.config.client && AIA.config.client.respond_to?(:clear_context)
|
453
|
-
|
454
|
+
begin
|
455
|
+
AIA.config.client.clear_context
|
456
|
+
rescue => e
|
457
|
+
STDERR.puts "Warning: Error clearing client context after restore: #{e.message}"
|
458
|
+
# Continue anyway - the context manager has been restored which is the main goal
|
459
|
+
end
|
454
460
|
end
|
455
461
|
|
456
|
-
#
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
462
|
+
# Rebuild the conversation in the LLM client from the restored context
|
463
|
+
# This ensures the LLM's internal state matches what we restored
|
464
|
+
if AIA.config.client && @context_manager
|
465
|
+
begin
|
466
|
+
restored_context = @context_manager.get_context
|
467
|
+
# The client's context has been cleared, so we can safely continue
|
468
|
+
# The next interaction will use the restored context from context_manager
|
469
|
+
rescue => e
|
470
|
+
STDERR.puts "Warning: Error syncing restored context: #{e.message}"
|
471
|
+
end
|
461
472
|
end
|
462
473
|
end
|
463
474
|
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# lib/extensions/ruby_llm/provider_fix.rb
|
2
|
+
#
|
3
|
+
# Monkey patch to fix LM Studio compatibility with RubyLLM Provider
|
4
|
+
# LM Studio sometimes returns response.body as a String that fails JSON parsing
|
5
|
+
# This causes "String does not have #dig method" errors in parse_error
|
6
|
+
|
7
|
+
module RubyLLM
|
8
|
+
class Provider
|
9
|
+
# Override the parse_error method to handle String responses from LM Studio
|
10
|
+
def parse_error(response)
|
11
|
+
return if response.body.empty?
|
12
|
+
|
13
|
+
body = try_parse_json(response.body)
|
14
|
+
|
15
|
+
# Be more explicit about type checking to prevent String#dig errors
|
16
|
+
case body
|
17
|
+
when Hash
|
18
|
+
# Only call dig if we're certain it's a Hash
|
19
|
+
body.dig('error', 'message')
|
20
|
+
when Array
|
21
|
+
# Only call dig on array elements if they're Hashes
|
22
|
+
body.filter_map do |part|
|
23
|
+
part.is_a?(Hash) ? part.dig('error', 'message') : part.to_s
|
24
|
+
end.join('. ')
|
25
|
+
else
|
26
|
+
# For Strings or any other type, convert to string
|
27
|
+
body.to_s
|
28
|
+
end
|
29
|
+
rescue StandardError => e
|
30
|
+
# Fallback in case anything goes wrong
|
31
|
+
"Error parsing response: #{e.message}"
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/mkdocs.yml
CHANGED
@@ -151,6 +151,7 @@ nav:
|
|
151
151
|
- Getting Started: guides/getting-started.md
|
152
152
|
- Chat Mode: guides/chat.md
|
153
153
|
- Working with Models: guides/models.md
|
154
|
+
- Local Models: guides/local-models.md
|
154
155
|
- Available Models: guides/available-models.md
|
155
156
|
- Image Generation: guides/image-generation.md
|
156
157
|
- Tools Integration: guides/tools.md
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: aia
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.17
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Dewayne VanHoozer
|
@@ -289,6 +289,20 @@ dependencies:
|
|
289
289
|
- - ">="
|
290
290
|
- !ruby/object:Gem::Version
|
291
291
|
version: '0'
|
292
|
+
- !ruby/object:Gem::Dependency
|
293
|
+
name: simplecov_lcov_formatter
|
294
|
+
requirement: !ruby/object:Gem::Requirement
|
295
|
+
requirements:
|
296
|
+
- - ">="
|
297
|
+
- !ruby/object:Gem::Version
|
298
|
+
version: '0'
|
299
|
+
type: :development
|
300
|
+
prerelease: false
|
301
|
+
version_requirements: !ruby/object:Gem::Requirement
|
302
|
+
requirements:
|
303
|
+
- - ">="
|
304
|
+
- !ruby/object:Gem::Version
|
305
|
+
version: '0'
|
292
306
|
- !ruby/object:Gem::Dependency
|
293
307
|
name: tocer
|
294
308
|
requirement: !ruby/object:Gem::Requirement
|
@@ -303,6 +317,20 @@ dependencies:
|
|
303
317
|
- - ">="
|
304
318
|
- !ruby/object:Gem::Version
|
305
319
|
version: '0'
|
320
|
+
- !ruby/object:Gem::Dependency
|
321
|
+
name: webmock
|
322
|
+
requirement: !ruby/object:Gem::Requirement
|
323
|
+
requirements:
|
324
|
+
- - ">="
|
325
|
+
- !ruby/object:Gem::Version
|
326
|
+
version: '0'
|
327
|
+
type: :development
|
328
|
+
prerelease: false
|
329
|
+
version_requirements: !ruby/object:Gem::Requirement
|
330
|
+
requirements:
|
331
|
+
- - ">="
|
332
|
+
- !ruby/object:Gem::Version
|
333
|
+
version: '0'
|
306
334
|
description: 'AIA is a revolutionary CLI console application that brings multi-model
|
307
335
|
AI capabilities to your command line, supporting 20+ providers including OpenAI,
|
308
336
|
Anthropic, and Google. Run multiple AI models simultaneously for comparison, get
|
@@ -354,6 +382,7 @@ files:
|
|
354
382
|
- docs/guides/getting-started.md
|
355
383
|
- docs/guides/image-generation.md
|
356
384
|
- docs/guides/index.md
|
385
|
+
- docs/guides/local-models.md
|
357
386
|
- docs/guides/models.md
|
358
387
|
- docs/guides/tools.md
|
359
388
|
- docs/index.md
|
@@ -416,6 +445,7 @@ files:
|
|
416
445
|
- lib/extensions/openstruct_merge.rb
|
417
446
|
- lib/extensions/ruby_llm/.irbrc
|
418
447
|
- lib/extensions/ruby_llm/modalities.rb
|
448
|
+
- lib/extensions/ruby_llm/provider_fix.rb
|
419
449
|
- lib/refinements/string.rb
|
420
450
|
- main.just
|
421
451
|
- mcp_servers/README.md
|