aia 0.9.17 → 0.9.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ae5c7e9497837763b36226f8fc44aeb7dffe692c964e450938ff83aea043c10a
4
- data.tar.gz: b839a219cb3f6e5b34d9aa02ff7f3ced77ff039816b6b2a3d9ef79076d32c302
3
+ metadata.gz: 41c2c4e145f3bc6789b0de6e444a7d71352abd3bdf9e97ad487539271db28584
4
+ data.tar.gz: 8057296778317494811114aa4692755ab45038dbccf06f14e47d43d7afe8b3d6
5
5
  SHA512:
6
- metadata.gz: 7df6fe4bbf0fd2ce33b9827390340d3cf1f8fd5ad5b4f52ceb343769dee6d699ab87c5dd2686472cd480bf5ac1e20b612f5c69e70b72eab68735560601423d7c
7
- data.tar.gz: 9fa99822319f6e3ff213f62ba0c26121c045e4fa96035596df46b4d723df72cecb19c1abb98f760e6b435941a3cc9b869f6401da771452c3d226930b4f4ea104
6
+ metadata.gz: e76f4aff3f181f6bcb710241fae8f41b0642bb2250f0dafd8fcf54220d044f01a9698288bbdbae5ac0c62b8b4834b262ed55b25e11dfa09252118f0be4cc2664
7
+ data.tar.gz: 0c61d610e09ae223ea8694fc18700cb5aa73a14a098a2038f9ab8f2e8c8879f50b210c516f0e4cc53309de1e7f272f85646550296a31830585b749935c26a687
data/.version CHANGED
@@ -1 +1 @@
1
- 0.9.17
1
+ 0.9.18
@@ -243,10 +243,6 @@ module AIA
243
243
 
244
244
 
245
245
  def chat(prompt)
246
- puts "[DEBUG RubyLLMAdapter.chat] Received prompt class: #{prompt.class}" if AIA.config.debug
247
- puts "[DEBUG RubyLLMAdapter.chat] Prompt inspect: #{prompt.inspect[0..500]}..." if AIA.config.debug
248
- puts "[DEBUG RubyLLMAdapter.chat] Models: #{@models.inspect}" if AIA.config.debug
249
-
250
246
  result = if @models.size == 1
251
247
  # Single model - use the original behavior
252
248
  single_model_chat(prompt, @models.first)
@@ -255,42 +251,29 @@ module AIA
255
251
  multi_model_chat(prompt)
256
252
  end
257
253
 
258
- puts "[DEBUG RubyLLMAdapter.chat] Returning result class: #{result.class}" if AIA.config.debug
259
- puts "[DEBUG RubyLLMAdapter.chat] Result inspect: #{result.inspect[0..500]}..." if AIA.config.debug
260
254
  result
261
255
  end
262
256
 
263
257
  def single_model_chat(prompt, model_name)
264
- puts "[DEBUG single_model_chat] Model name: #{model_name}" if AIA.config.debug
265
258
  chat_instance = @chats[model_name]
266
- puts "[DEBUG single_model_chat] Chat instance: #{chat_instance.class}" if AIA.config.debug
267
-
268
259
  modes = chat_instance.model.modalities
269
- puts "[DEBUG single_model_chat] Modalities: #{modes.inspect}" if AIA.config.debug
270
260
 
271
261
  # TODO: Need to consider how to handle multi-mode models
272
262
  result = if modes.text_to_text?
273
- puts "[DEBUG single_model_chat] Using text_to_text_single" if AIA.config.debug
274
263
  text_to_text_single(prompt, model_name)
275
264
  elsif modes.image_to_text?
276
- puts "[DEBUG single_model_chat] Using image_to_text_single" if AIA.config.debug
277
265
  image_to_text_single(prompt, model_name)
278
266
  elsif modes.text_to_image?
279
- puts "[DEBUG single_model_chat] Using text_to_image_single" if AIA.config.debug
280
267
  text_to_image_single(prompt, model_name)
281
268
  elsif modes.text_to_audio?
282
- puts "[DEBUG single_model_chat] Using text_to_audio_single" if AIA.config.debug
283
269
  text_to_audio_single(prompt, model_name)
284
270
  elsif modes.audio_to_text?
285
- puts "[DEBUG single_model_chat] Using audio_to_text_single" if AIA.config.debug
286
271
  audio_to_text_single(prompt, model_name)
287
272
  else
288
- puts "[DEBUG single_model_chat] No matching modality!" if AIA.config.debug
289
273
  # TODO: what else can be done?
290
274
  "Error: No matching modality for model #{model_name}"
291
275
  end
292
276
 
293
- puts "[DEBUG single_model_chat] Result class: #{result.class}" if AIA.config.debug
294
277
  result
295
278
  end
296
279
 
@@ -672,29 +655,15 @@ module AIA
672
655
  chat_instance = @chats[model_name]
673
656
  text_prompt = extract_text_prompt(prompt)
674
657
 
675
- puts "[DEBUG RubyLLMAdapter] Sending to model #{model_name}: #{text_prompt[0..100]}..." if AIA.config.debug
676
-
677
658
  response = if AIA.config.context_files.empty?
678
659
  chat_instance.ask(text_prompt)
679
660
  else
680
661
  chat_instance.ask(text_prompt, with: AIA.config.context_files)
681
662
  end
682
663
 
683
- # Debug output to understand the response structure
684
- puts "[DEBUG RubyLLMAdapter] Response class: #{response.class}" if AIA.config.debug
685
- puts "[DEBUG RubyLLMAdapter] Response inspect: #{response.inspect[0..500]}..." if AIA.config.debug
686
-
687
- if response.respond_to?(:content)
688
- puts "[DEBUG RubyLLMAdapter] Response content: #{response.content[0..200]}..." if AIA.config.debug
689
- else
690
- puts "[DEBUG RubyLLMAdapter] Response (no content method): #{response.to_s[0..200]}..." if AIA.config.debug
691
- end
692
-
693
664
  # Return the full response object to preserve token information
694
665
  response
695
666
  rescue StandardError => e
696
- puts "[DEBUG RubyLLMAdapter] Error in text_to_text_single: #{e.class} - #{e.message}" if AIA.config.debug
697
- puts "[DEBUG RubyLLMAdapter] Backtrace: #{e.backtrace[0..5].join("\n")}" if AIA.config.debug
698
667
  e.message
699
668
  end
700
669
 
@@ -1,34 +1,79 @@
1
1
  # lib/extensions/ruby_llm/provider_fix.rb
2
2
  #
3
- # Monkey patch to fix LM Studio compatibility with RubyLLM Provider
3
+ # Monkey patch to fix LM Studio compatibility with RubyLLM
4
4
  # LM Studio sometimes returns response.body as a String that fails JSON parsing
5
5
  # This causes "String does not have #dig method" errors in parse_error
6
6
 
7
+ # Load RubyLLM first to ensure Provider class exists
8
+ require 'ruby_llm'
9
+
7
10
  module RubyLLM
8
- class Provider
11
+ module ProviderErrorFix
9
12
  # Override the parse_error method to handle String responses from LM Studio
13
+ # Parses error response from provider API.
14
+ #
15
+ # Supports two error formats:
16
+ # 1. OpenAI standard: {"error": {"message": "...", "type": "...", "code": "..."}}
17
+ # 2. Simple format: {"error": "error message"}
18
+ #
19
+ # @param response [Faraday::Response] The HTTP response
20
+ # @return [String, nil] The error message or nil if parsing fails
21
+ #
22
+ # @example OpenAI format
23
+ # response = double(body: '{"error": {"message": "Rate limit exceeded"}}')
24
+ # parse_error(response) #=> "Rate limit exceeded"
25
+ #
26
+ # @example Simple format (LM Studio, some local providers)
27
+ # response = double(body: '{"error": "Token limit exceeded"}')
28
+ # parse_error(response) #=> "Token limit exceeded"
10
29
  def parse_error(response)
11
30
  return if response.body.empty?
12
31
 
13
32
  body = try_parse_json(response.body)
14
-
15
- # Be more explicit about type checking to prevent String#dig errors
16
33
  case body
17
34
  when Hash
18
- # Only call dig if we're certain it's a Hash
19
- body.dig('error', 'message')
35
+ # Handle both formats:
36
+ # - {"error": "message"} (LM Studio, some providers)
37
+ # - {"error": {"message": "..."}} (OpenAI standard)
38
+ error_value = body['error']
39
+ return nil unless error_value
40
+
41
+ case error_value
42
+ when Hash
43
+ error_value['message']
44
+ when String
45
+ error_value
46
+ else
47
+ error_value.to_s if error_value
48
+ end
20
49
  when Array
21
- # Only call dig on array elements if they're Hashes
22
50
  body.filter_map do |part|
23
- part.is_a?(Hash) ? part.dig('error', 'message') : part.to_s
51
+ next unless part.is_a?(Hash)
52
+
53
+ error_value = part['error']
54
+ next unless error_value
55
+
56
+ case error_value
57
+ when Hash then error_value['message']
58
+ when String then error_value
59
+ else error_value.to_s if error_value
60
+ end
24
61
  end.join('. ')
25
62
  else
26
- # For Strings or any other type, convert to string
27
63
  body.to_s
28
64
  end
29
65
  rescue StandardError => e
30
- # Fallback in case anything goes wrong
31
- "Error parsing response: #{e.message}"
66
+ RubyLLM.logger.debug "Error parsing response: #{e.message}"
67
+ nil
32
68
  end
33
69
  end
34
- end
70
+ end
71
+
72
+ # Apply the prepend to all Provider subclasses
73
+ # LM Studio uses the OpenAI provider, so we need to prepend to all provider classes
74
+ RubyLLM::Provider.prepend(RubyLLM::ProviderErrorFix)
75
+
76
+ # Also prepend to all registered provider classes
77
+ RubyLLM::Provider.providers.each do |slug, provider_class|
78
+ provider_class.prepend(RubyLLM::ProviderErrorFix)
79
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aia
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.17
4
+ version: 0.9.18
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dewayne VanHoozer