aia 0.9.15 → 0.9.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: acca553714c1fe19ed2a152a5ca500691d0435817369e60a1d773b9996d02802
4
- data.tar.gz: f97fd1bc43cf1d8d1a8d2364313ad59a89462c31b71648c28828faa3ddc36ab9
3
+ metadata.gz: d93978bc1dc5486b24cb460ecdd13bbcefa83c3ce7a6cb3b7b7454c21b66e979
4
+ data.tar.gz: 169d574acd4e9b4127ba8f47526e4767bc86cd5e7c3c144f8f79de2f2dfefa25
5
5
  SHA512:
6
- metadata.gz: 3b54ffdbcc2268e9a509f4c144182249c2d02cc520b0f1f37d7ebe8a4e79400cd2e23a96f77660019c5fb205a0bdf5df8eff14c3809f24c58886aee1fbde306f
7
- data.tar.gz: 57a0a68ddedd0bdd69a5f25802cb9a09f8c300829d2e48c2f6025147eae3b29c861d47004614dc33108cf3a403605ee98ad9c0c239aae47e05afb8def40fb738
6
+ metadata.gz: 82b26b797a06a1af89e96ced21b8a182a9f9e6fd31e1cb20c8ad5e070b045f31430f89f8c3fc82d79d5714363d3289e80cdbf716bc6a9f86d4b6f59cd9a6a96e
7
+ data.tar.gz: 6c2f8953be44fc7d98fe2bef0680831f09947ee75c9d60b251ecdb243057da83a836de904a4ed45e4776280cad4d048a9485a249c692213aec9496fe05eb8cf7
data/.version CHANGED
@@ -1 +1 @@
1
- 0.9.15
1
+ 0.9.16
data/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # Changelog
2
2
  ## [Unreleased]
3
+
4
+ ### [0.9.16] 2025-09-26
5
+
6
+ #### New Features
7
+ - **NEW FEATURE**: Added support for Ollama AI provider
8
+ - **NEW FEATURE**: Added support for Osaurus AI provider
9
+ - **NEW FEATURE**: Added support for LM Studio AI provider
10
+
11
+ #### Improvements
12
+ - **ENHANCEMENT**: Expanded AI provider ecosystem with three new local/self-hosted model options
13
+ - **ENHANCEMENT**: Improved flexibility for users preferring local LLM deployments
14
+
3
15
  ## Released
4
16
  ### [0.9.15] 2025-09-21
5
17
 
@@ -42,6 +42,8 @@ module AIA
42
42
 
43
43
  # --- Custom OpenAI Endpoint ---
44
44
  # Use this for Azure OpenAI, proxies, or self-hosted models via OpenAI-compatible APIs.
45
+ # For osaurus: Use model name prefix "osaurus/" and set OSAURUS_API_BASE env var
46
+ # For LM Studio: Use model name prefix "lms/" and set LMS_API_BASE env var
45
47
  config.openai_api_base = ENV.fetch('OPENAI_API_BASE', nil) # e.g., "https://your-azure.openai.azure.com"
46
48
 
47
49
  # --- Default Models ---
@@ -83,7 +85,30 @@ module AIA
83
85
 
84
86
  @models.each do |model_name|
85
87
  begin
86
- chat = RubyLLM.chat(model: model_name)
88
+ # Check if this is a local provider model and handle it specially
89
+ if model_name.start_with?('ollama/')
90
+ # For Ollama models, extract the actual model name and use assume_model_exists
91
+ actual_model = model_name.sub('ollama/', '')
92
+ chat = RubyLLM.chat(model: actual_model, provider: 'ollama', assume_model_exists: true)
93
+ elsif model_name.start_with?('osaurus/')
94
+ # For Osaurus models (OpenAI-compatible), create a custom context with the right API base
95
+ actual_model = model_name.sub('osaurus/', '')
96
+ custom_config = RubyLLM.config.dup
97
+ custom_config.openai_api_base = ENV.fetch('OSAURUS_API_BASE', 'http://localhost:11434/v1')
98
+ custom_config.openai_api_key = 'dummy' # Local servers don't need a real API key
99
+ context = RubyLLM::Context.new(custom_config)
100
+ chat = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
101
+ elsif model_name.start_with?('lms/')
102
+ # For LM Studio models (OpenAI-compatible), create a custom context with the right API base
103
+ actual_model = model_name.sub('lms/', '')
104
+ custom_config = RubyLLM.config.dup
105
+ custom_config.openai_api_base = ENV.fetch('LMS_API_BASE', 'http://localhost:1234/v1')
106
+ custom_config.openai_api_key = 'dummy' # Local servers don't need a real API key
107
+ context = RubyLLM::Context.new(custom_config)
108
+ chat = context.chat(model: actual_model, provider: 'openai', assume_model_exists: true)
109
+ else
110
+ chat = RubyLLM.chat(model: model_name)
111
+ end
87
112
  valid_chats[model_name] = chat
88
113
  rescue StandardError => e
89
114
  failed_models << "#{model_name}: #{e.message}"
@@ -263,7 +288,7 @@ module AIA
263
288
 
264
289
  def format_multi_model_results(results)
265
290
  use_consensus = should_use_consensus_mode?
266
-
291
+
267
292
  if use_consensus
268
293
  # Generate consensus response using primary model
269
294
  generate_consensus_response(results)
@@ -288,7 +313,7 @@ module AIA
288
313
  begin
289
314
  # Have the primary model generate the consensus
290
315
  consensus_result = primary_chat.ask(consensus_prompt).content
291
-
316
+
292
317
  # Format the consensus response
293
318
  "from: #{primary_model} (consensus)\n#{consensus_result}"
294
319
  rescue StandardError => e
@@ -329,7 +354,7 @@ module AIA
329
354
  def format_individual_responses(results)
330
355
  # For metrics support, return a special structure if all results have token info
331
356
  has_metrics = results.values.all? { |r| r.respond_to?(:input_tokens) && r.respond_to?(:output_tokens) }
332
-
357
+
333
358
  if has_metrics && AIA.config.show_metrics
334
359
  # Return structured data that preserves metrics for multi-model
335
360
  format_multi_model_with_metrics(results)
@@ -350,17 +375,17 @@ module AIA
350
375
  output.join("\n")
351
376
  end
352
377
  end
353
-
378
+
354
379
  def format_multi_model_with_metrics(results)
355
380
  # Create a composite response that includes all model responses and metrics
356
381
  formatted_content = []
357
382
  metrics_data = []
358
-
383
+
359
384
  results.each do |model_name, result|
360
385
  formatted_content << "from: #{model_name}"
361
386
  formatted_content << result.content
362
387
  formatted_content << ""
363
-
388
+
364
389
  # Collect metrics for each model
365
390
  metrics_data << {
366
391
  model_id: model_name,
@@ -368,20 +393,20 @@ module AIA
368
393
  output_tokens: result.output_tokens
369
394
  }
370
395
  end
371
-
396
+
372
397
  # Return a special MultiModelResponse that ChatProcessorService can handle
373
398
  MultiModelResponse.new(formatted_content.join("\n"), metrics_data)
374
399
  end
375
-
400
+
376
401
  # Helper class to carry multi-model response with metrics
377
402
  class MultiModelResponse
378
403
  attr_reader :content, :metrics_list
379
-
404
+
380
405
  def initialize(content, metrics_list)
381
406
  @content = content
382
407
  @metrics_list = metrics_list
383
408
  end
384
-
409
+
385
410
  def multi_model?
386
411
  true
387
412
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aia
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.15
4
+ version: 0.9.16
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dewayne VanHoozer