ruby_llm 1.12.0 → 1.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +11 -5
  3. data/lib/generators/ruby_llm/agent/agent_generator.rb +36 -0
  4. data/lib/generators/ruby_llm/agent/templates/agent.rb.tt +6 -0
  5. data/lib/generators/ruby_llm/agent/templates/instructions.txt.erb.tt +0 -0
  6. data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +110 -41
  7. data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +14 -15
  8. data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +8 -11
  9. data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +2 -2
  10. data/lib/generators/ruby_llm/chat_ui/templates/helpers/messages_helper.rb.tt +25 -0
  11. data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +2 -2
  12. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/chats/_chat.html.erb.tt +16 -0
  13. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/chats/_form.html.erb.tt +31 -0
  14. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/chats/index.html.erb.tt +31 -0
  15. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/chats/new.html.erb.tt +9 -0
  16. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/chats/show.html.erb.tt +27 -0
  17. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_assistant.html.erb.tt +14 -0
  18. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_content.html.erb.tt +1 -0
  19. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_error.html.erb.tt +13 -0
  20. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_form.html.erb.tt +23 -0
  21. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_system.html.erb.tt +10 -0
  22. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_tool.html.erb.tt +2 -0
  23. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_tool_calls.html.erb.tt +4 -0
  24. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/_user.html.erb.tt +14 -0
  25. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/tool_calls/_default.html.erb.tt +13 -0
  26. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/messages/tool_results/_default.html.erb.tt +21 -0
  27. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/models/_model.html.erb.tt +17 -0
  28. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/models/index.html.erb.tt +40 -0
  29. data/lib/generators/ruby_llm/chat_ui/templates/tailwind/views/models/show.html.erb.tt +27 -0
  30. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +2 -2
  31. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +2 -2
  32. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +19 -7
  33. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +1 -1
  34. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +5 -3
  35. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_assistant.html.erb.tt +9 -0
  36. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_content.html.erb.tt +1 -1
  37. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_error.html.erb.tt +8 -0
  38. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +1 -1
  39. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_system.html.erb.tt +6 -0
  40. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_tool.html.erb.tt +2 -0
  41. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_tool_calls.html.erb.tt +4 -7
  42. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_user.html.erb.tt +9 -0
  43. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +5 -7
  44. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/tool_calls/_default.html.erb.tt +8 -0
  45. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/tool_results/_default.html.erb.tt +16 -0
  46. data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +11 -12
  47. data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +27 -17
  48. data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +3 -4
  49. data/lib/generators/ruby_llm/generator_helpers.rb +37 -17
  50. data/lib/generators/ruby_llm/install/install_generator.rb +22 -18
  51. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +1 -1
  52. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +1 -1
  53. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +4 -10
  54. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +2 -2
  55. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +2 -2
  56. data/lib/generators/ruby_llm/schema/schema_generator.rb +26 -0
  57. data/lib/generators/ruby_llm/schema/templates/schema.rb.tt +2 -0
  58. data/lib/generators/ruby_llm/tool/templates/tool.rb.tt +9 -0
  59. data/lib/generators/ruby_llm/tool/templates/tool_call.html.erb.tt +13 -0
  60. data/lib/generators/ruby_llm/tool/templates/tool_result.html.erb.tt +13 -0
  61. data/lib/generators/ruby_llm/tool/tool_generator.rb +96 -0
  62. data/lib/generators/ruby_llm/upgrade_to_v1_10/upgrade_to_v1_10_generator.rb +1 -1
  63. data/lib/generators/ruby_llm/upgrade_to_v1_14/templates/add_v1_14_tool_call_columns.rb.tt +7 -0
  64. data/lib/generators/ruby_llm/upgrade_to_v1_14/upgrade_to_v1_14_generator.rb +49 -0
  65. data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +2 -4
  66. data/lib/generators/ruby_llm/upgrade_to_v1_9/upgrade_to_v1_9_generator.rb +1 -1
  67. data/lib/ruby_llm/active_record/acts_as.rb +10 -4
  68. data/lib/ruby_llm/active_record/acts_as_legacy.rb +87 -20
  69. data/lib/ruby_llm/active_record/chat_methods.rb +80 -22
  70. data/lib/ruby_llm/active_record/message_methods.rb +17 -0
  71. data/lib/ruby_llm/active_record/model_methods.rb +1 -1
  72. data/lib/ruby_llm/active_record/payload_helpers.rb +26 -0
  73. data/lib/ruby_llm/active_record/tool_call_methods.rb +15 -0
  74. data/lib/ruby_llm/agent.rb +50 -8
  75. data/lib/ruby_llm/aliases.json +60 -21
  76. data/lib/ruby_llm/attachment.rb +4 -1
  77. data/lib/ruby_llm/chat.rb +113 -12
  78. data/lib/ruby_llm/configuration.rb +65 -66
  79. data/lib/ruby_llm/connection.rb +11 -7
  80. data/lib/ruby_llm/content.rb +6 -2
  81. data/lib/ruby_llm/error.rb +37 -1
  82. data/lib/ruby_llm/message.rb +5 -3
  83. data/lib/ruby_llm/model/info.rb +15 -13
  84. data/lib/ruby_llm/models.json +12279 -13517
  85. data/lib/ruby_llm/models.rb +16 -6
  86. data/lib/ruby_llm/provider.rb +10 -1
  87. data/lib/ruby_llm/providers/anthropic/capabilities.rb +5 -119
  88. data/lib/ruby_llm/providers/anthropic/chat.rb +22 -5
  89. data/lib/ruby_llm/providers/anthropic/models.rb +3 -9
  90. data/lib/ruby_llm/providers/anthropic/tools.rb +20 -0
  91. data/lib/ruby_llm/providers/anthropic.rb +5 -1
  92. data/lib/ruby_llm/providers/azure/chat.rb +1 -1
  93. data/lib/ruby_llm/providers/azure/embeddings.rb +1 -1
  94. data/lib/ruby_llm/providers/azure/models.rb +1 -1
  95. data/lib/ruby_llm/providers/azure.rb +92 -0
  96. data/lib/ruby_llm/providers/bedrock/chat.rb +50 -5
  97. data/lib/ruby_llm/providers/bedrock/models.rb +17 -1
  98. data/lib/ruby_llm/providers/bedrock/streaming.rb +8 -4
  99. data/lib/ruby_llm/providers/bedrock.rb +9 -1
  100. data/lib/ruby_llm/providers/deepseek/capabilities.rb +4 -114
  101. data/lib/ruby_llm/providers/deepseek.rb +5 -1
  102. data/lib/ruby_llm/providers/gemini/capabilities.rb +45 -207
  103. data/lib/ruby_llm/providers/gemini/chat.rb +20 -4
  104. data/lib/ruby_llm/providers/gemini/images.rb +1 -1
  105. data/lib/ruby_llm/providers/gemini/models.rb +2 -4
  106. data/lib/ruby_llm/providers/gemini/streaming.rb +2 -1
  107. data/lib/ruby_llm/providers/gemini/tools.rb +19 -0
  108. data/lib/ruby_llm/providers/gemini.rb +4 -0
  109. data/lib/ruby_llm/providers/gpustack/capabilities.rb +20 -0
  110. data/lib/ruby_llm/providers/gpustack.rb +8 -0
  111. data/lib/ruby_llm/providers/mistral/capabilities.rb +8 -0
  112. data/lib/ruby_llm/providers/mistral/chat.rb +2 -1
  113. data/lib/ruby_llm/providers/mistral.rb +4 -0
  114. data/lib/ruby_llm/providers/ollama/capabilities.rb +20 -0
  115. data/lib/ruby_llm/providers/ollama.rb +11 -1
  116. data/lib/ruby_llm/providers/openai/capabilities.rb +95 -195
  117. data/lib/ruby_llm/providers/openai/chat.rb +15 -5
  118. data/lib/ruby_llm/providers/openai/media.rb +4 -1
  119. data/lib/ruby_llm/providers/openai/models.rb +2 -4
  120. data/lib/ruby_llm/providers/openai/temperature.rb +2 -2
  121. data/lib/ruby_llm/providers/openai/tools.rb +27 -2
  122. data/lib/ruby_llm/providers/openai.rb +10 -0
  123. data/lib/ruby_llm/providers/openrouter/chat.rb +19 -5
  124. data/lib/ruby_llm/providers/openrouter/images.rb +69 -0
  125. data/lib/ruby_llm/providers/openrouter.rb +35 -1
  126. data/lib/ruby_llm/providers/perplexity/capabilities.rb +34 -99
  127. data/lib/ruby_llm/providers/perplexity/models.rb +12 -14
  128. data/lib/ruby_llm/providers/perplexity.rb +4 -0
  129. data/lib/ruby_llm/providers/vertexai/models.rb +1 -1
  130. data/lib/ruby_llm/providers/vertexai.rb +18 -6
  131. data/lib/ruby_llm/providers/xai.rb +4 -0
  132. data/lib/ruby_llm/stream_accumulator.rb +10 -5
  133. data/lib/ruby_llm/streaming.rb +7 -7
  134. data/lib/ruby_llm/tool.rb +48 -3
  135. data/lib/ruby_llm/version.rb +1 -1
  136. data/lib/tasks/models.rake +33 -7
  137. data/lib/tasks/release.rake +1 -1
  138. data/lib/tasks/ruby_llm.rake +9 -1
  139. data/lib/tasks/vcr.rake +1 -1
  140. metadata +56 -15
  141. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +0 -13
@@ -7,7 +7,10 @@ module RubyLLM
7
7
  module Chat
8
8
  module_function
9
9
 
10
- def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil, thinking: nil) # rubocop:disable Metrics/ParameterLists
10
+ # rubocop:disable Metrics/ParameterLists,Metrics/PerceivedComplexity
11
+ def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil,
12
+ thinking: nil, tool_prefs: nil)
13
+ tool_prefs ||= {}
11
14
  payload = {
12
15
  model: model.id,
13
16
  messages: format_messages(messages),
@@ -15,15 +18,25 @@ module RubyLLM
15
18
  }
16
19
 
17
20
  payload[:temperature] = temperature unless temperature.nil?
18
- payload[:tools] = tools.map { |_, tool| OpenAI::Tools.tool_for(tool) } if tools.any?
21
+ if tools.any?
22
+ payload[:tools] = tools.map { |_, tool| OpenAI::Tools.tool_for(tool) }
23
+ payload[:tool_choice] = OpenAI::Tools.build_tool_choice(tool_prefs[:choice]) unless tool_prefs[:choice].nil?
24
+ payload[:parallel_tool_calls] = tool_prefs[:calls] == :many unless tool_prefs[:calls].nil?
25
+ end
19
26
 
20
27
  if schema
21
- strict = schema[:strict] != false
28
+ schema_name = schema[:name]
29
+ schema_def = RubyLLM::Utils.deep_dup(schema[:schema])
30
+ if schema_def.is_a?(Hash)
31
+ schema_def.delete(:strict)
32
+ schema_def.delete('strict')
33
+ end
34
+ strict = schema[:strict]
22
35
  payload[:response_format] = {
23
36
  type: 'json_schema',
24
37
  json_schema: {
25
- name: 'response',
26
- schema: schema,
38
+ name: schema_name,
39
+ schema: schema_def,
27
40
  strict: strict
28
41
  }
29
42
  }
@@ -35,6 +48,7 @@ module RubyLLM
35
48
  payload[:stream_options] = { include_usage: true } if stream
36
49
  payload
37
50
  end
51
+ # rubocop:enable Metrics/ParameterLists,Metrics/PerceivedComplexity
38
52
 
39
53
  def parse_completion_response(response)
40
54
  data = response.body
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenRouter
6
+ # Image generation methods for the OpenRouter API integration.
7
+ # OpenRouter uses the chat completions endpoint for image generation
8
+ # instead of a dedicated images endpoint.
9
+ module Images
10
+ module_function
11
+
12
+ def images_url
13
+ 'chat/completions'
14
+ end
15
+
16
+ def render_image_payload(prompt, model:, size:)
17
+ RubyLLM.logger.debug { "Ignoring size #{size}. OpenRouter image generation does not support size parameter." }
18
+ {
19
+ model: model,
20
+ messages: [
21
+ {
22
+ role: 'user',
23
+ content: prompt
24
+ }
25
+ ],
26
+ modalities: %w[image text]
27
+ }
28
+ end
29
+
30
+ def parse_image_response(response, model:)
31
+ data = response.body
32
+ message = data.dig('choices', 0, 'message')
33
+
34
+ unless message&.key?('images') && message['images']&.any?
35
+ raise Error.new(nil, 'Unexpected response format from OpenRouter image generation API')
36
+ end
37
+
38
+ image_data = message['images'].first
39
+ image_url = image_data.dig('image_url', 'url') || image_data['url']
40
+
41
+ raise Error.new(nil, 'No image URL found in OpenRouter response') unless image_url
42
+
43
+ build_image_from_url(image_url, model)
44
+ end
45
+
46
+ def build_image_from_url(image_url, model)
47
+ if image_url.start_with?('data:')
48
+ # Parse data URL format: data:image/png;base64,<data>
49
+ match = image_url.match(/^data:([^;]+);base64,(.+)$/)
50
+ raise Error.new(nil, 'Invalid data URL format from OpenRouter') unless match
51
+
52
+ Image.new(
53
+ data: match[2],
54
+ mime_type: match[1],
55
+ model_id: model
56
+ )
57
+ else
58
+ # Regular URL
59
+ Image.new(
60
+ url: image_url,
61
+ mime_type: 'image/png',
62
+ model_id: model
63
+ )
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
69
+ end
@@ -7,9 +7,10 @@ module RubyLLM
7
7
  include OpenRouter::Chat
8
8
  include OpenRouter::Models
9
9
  include OpenRouter::Streaming
10
+ include OpenRouter::Images
10
11
 
11
12
  def api_base
12
- 'https://openrouter.ai/api/v1'
13
+ @config.openrouter_api_base || 'https://openrouter.ai/api/v1'
13
14
  end
14
15
 
15
16
  def headers
@@ -18,7 +19,40 @@ module RubyLLM
18
19
  }
19
20
  end
20
21
 
22
+ def parse_error(response)
23
+ return if response.body.empty?
24
+
25
+ body = try_parse_json(response.body)
26
+ case body
27
+ when Hash
28
+ parse_error_part_message body
29
+ when Array
30
+ body.map do |part|
31
+ parse_error_part_message part
32
+ end.join('. ')
33
+ else
34
+ body
35
+ end
36
+ end
37
+
38
+ private
39
+
40
+ def parse_error_part_message(part)
41
+ message = part.dig('error', 'message')
42
+ raw = try_parse_json(part.dig('error', 'metadata', 'raw'))
43
+ return message unless raw.is_a?(Hash)
44
+
45
+ raw_message = raw.dig('error', 'message')
46
+ return [message, raw_message].compact.join(' - ') if raw_message
47
+
48
+ message
49
+ end
50
+
21
51
  class << self
52
+ def configuration_options
53
+ %i[openrouter_api_key openrouter_api_base]
54
+ end
55
+
22
56
  def configuration_requirements
23
57
  %i[openrouter_api_key]
24
58
  end
@@ -3,63 +3,55 @@
3
3
  module RubyLLM
4
4
  module Providers
5
5
  class Perplexity
6
- # Determines capabilities and pricing for Perplexity models
6
+ # Provider-level capability checks and narrow registry fallbacks.
7
7
  module Capabilities
8
8
  module_function
9
9
 
10
- def context_window_for(model_id)
11
- case model_id
12
- when /sonar-pro/ then 200_000
13
- else 128_000
14
- end
15
- end
10
+ PRICES = {
11
+ sonar: { input: 1.0, output: 1.0 },
12
+ sonar_pro: { input: 3.0, output: 15.0 },
13
+ sonar_reasoning: { input: 1.0, output: 5.0 },
14
+ sonar_reasoning_pro: { input: 2.0, output: 8.0 },
15
+ sonar_deep_research: {
16
+ input: 2.0,
17
+ output: 8.0,
18
+ reasoning_output: 3.0
19
+ }
20
+ }.freeze
16
21
 
17
- def max_tokens_for(model_id)
18
- case model_id
19
- when /sonar-(?:pro|reasoning-pro)/ then 8_192
20
- else 4_096
21
- end
22
+ def supports_tool_choice?(_model_id)
23
+ false
22
24
  end
23
25
 
24
- def input_price_for(model_id)
25
- PRICES.dig(model_family(model_id), :input) || 1.0
26
+ def supports_tool_parallel_control?(_model_id)
27
+ false
26
28
  end
27
29
 
28
- def output_price_for(model_id)
29
- PRICES.dig(model_family(model_id), :output) || 1.0
30
+ def context_window_for(model_id)
31
+ model_id.match?(/sonar-pro/) ? 200_000 : 128_000
30
32
  end
31
33
 
32
- def supports_vision?(model_id)
33
- case model_id
34
- when /sonar-reasoning-pro/, /sonar-reasoning/, /sonar-pro/, /sonar/ then true
35
- else false
36
- end
34
+ def max_tokens_for(model_id)
35
+ model_id.match?(/sonar-(?:pro|reasoning-pro)/) ? 8_192 : 4_096
37
36
  end
38
37
 
39
- def supports_functions?(_model_id)
40
- false
38
+ def critical_capabilities_for(model_id)
39
+ capabilities = []
40
+ capabilities << 'vision' if model_id.match?(/sonar(?:-pro|-reasoning(?:-pro)?)?$/)
41
+ capabilities << 'reasoning' if model_id.match?(/reasoning|deep-research/)
42
+ capabilities
41
43
  end
42
44
 
43
- def supports_json_mode?(_model_id)
44
- true
45
- end
45
+ def pricing_for(model_id)
46
+ prices = PRICES.fetch(model_family(model_id), { input: 1.0, output: 1.0 })
46
47
 
47
- def format_display_name(model_id)
48
- case model_id
49
- when 'sonar' then 'Sonar'
50
- when 'sonar-pro' then 'Sonar Pro'
51
- when 'sonar-reasoning' then 'Sonar Reasoning'
52
- when 'sonar-reasoning-pro' then 'Sonar Reasoning Pro'
53
- when 'sonar-deep-research' then 'Sonar Deep Research'
54
- else
55
- model_id.split('-')
56
- .map(&:capitalize)
57
- .join(' ')
58
- end
59
- end
48
+ standard = {
49
+ input_per_million: prices[:input],
50
+ output_per_million: prices[:output]
51
+ }
52
+ standard[:reasoning_output_per_million] = prices[:reasoning_output] if prices[:reasoning_output]
60
53
 
61
- def model_type(_model_id)
62
- 'chat'
54
+ { text_tokens: { standard: standard } }
63
55
  end
64
56
 
65
57
  def model_family(model_id)
@@ -73,64 +65,7 @@ module RubyLLM
73
65
  end
74
66
  end
75
67
 
76
- def modalities_for(_model_id)
77
- {
78
- input: ['text'],
79
- output: ['text']
80
- }
81
- end
82
-
83
- def capabilities_for(model_id)
84
- capabilities = %w[streaming json_mode]
85
- capabilities << 'vision' if supports_vision?(model_id)
86
- capabilities
87
- end
88
-
89
- def pricing_for(model_id)
90
- family = model_family(model_id)
91
- prices = PRICES.fetch(family, { input: 1.0, output: 1.0 })
92
-
93
- standard_pricing = {
94
- input_per_million: prices[:input],
95
- output_per_million: prices[:output]
96
- }
97
-
98
- standard_pricing[:citation_per_million] = prices[:citation] if prices[:citation]
99
- standard_pricing[:reasoning_per_million] = prices[:reasoning] if prices[:reasoning]
100
- standard_pricing[:search_per_thousand] = prices[:search_queries] if prices[:search_queries]
101
-
102
- {
103
- text_tokens: {
104
- standard: standard_pricing
105
- }
106
- }
107
- end
108
-
109
- PRICES = {
110
- sonar: {
111
- input: 1.0,
112
- output: 1.0
113
- },
114
- sonar_pro: {
115
- input: 3.0,
116
- output: 15.0
117
- },
118
- sonar_reasoning: {
119
- input: 1.0,
120
- output: 5.0
121
- },
122
- sonar_reasoning_pro: {
123
- input: 2.0,
124
- output: 8.0
125
- },
126
- sonar_deep_research: {
127
- input: 2.0,
128
- output: 8.0,
129
- citation: 2.0,
130
- reasoning: 3.0,
131
- search_queries: 5.0
132
- }
133
- }.freeze
68
+ module_function :context_window_for, :max_tokens_for, :critical_capabilities_for, :pricing_for, :model_family
134
69
  end
135
70
  end
136
71
  end
@@ -5,33 +5,31 @@ module RubyLLM
5
5
  class Perplexity
6
6
  # Models methods of the Perplexity API integration
7
7
  module Models
8
+ MODEL_IDS = %w[
9
+ sonar
10
+ sonar-pro
11
+ sonar-reasoning
12
+ sonar-reasoning-pro
13
+ sonar-deep-research
14
+ ].freeze
15
+
8
16
  def list_models(**)
9
17
  slug = 'perplexity'
10
- capabilities = Perplexity::Capabilities
11
- parse_list_models_response(nil, slug, capabilities)
18
+ parse_list_models_response(nil, slug, Perplexity::Capabilities)
12
19
  end
13
20
 
14
21
  def parse_list_models_response(_response, slug, capabilities)
15
- [
16
- create_model_info('sonar', slug, capabilities),
17
- create_model_info('sonar-pro', slug, capabilities),
18
- create_model_info('sonar-reasoning', slug, capabilities),
19
- create_model_info('sonar-reasoning-pro', slug, capabilities),
20
- create_model_info('sonar-deep-research', slug, capabilities)
21
- ]
22
+ MODEL_IDS.map { |id| create_model_info(id, slug, capabilities) }
22
23
  end
23
24
 
24
25
  def create_model_info(id, slug, capabilities)
25
26
  Model::Info.new(
26
27
  id: id,
27
- name: capabilities.format_display_name(id),
28
+ name: id,
28
29
  provider: slug,
29
- family: capabilities.model_family(id).to_s,
30
- created_at: Time.now,
31
30
  context_window: capabilities.context_window_for(id),
32
31
  max_output_tokens: capabilities.max_tokens_for(id),
33
- modalities: capabilities.modalities_for(id),
34
- capabilities: capabilities.capabilities_for(id),
32
+ capabilities: capabilities.critical_capabilities_for(id),
35
33
  pricing: capabilities.pricing_for(id),
36
34
  metadata: {}
37
35
  )
@@ -23,6 +23,10 @@ module RubyLLM
23
23
  Perplexity::Capabilities
24
24
  end
25
25
 
26
+ def configuration_options
27
+ %i[perplexity_api_key]
28
+ end
29
+
26
30
  def configuration_requirements
27
31
  %i[perplexity_api_key]
28
32
  end
@@ -56,7 +56,7 @@ module RubyLLM
56
56
 
57
57
  all_models
58
58
  rescue StandardError => e
59
- RubyLLM.logger.debug "Error fetching Vertex AI models: #{e.message}"
59
+ RubyLLM.logger.debug { "Error fetching Vertex AI models: #{e.message}" }
60
60
  build_known_models
61
61
  end
62
62
 
@@ -10,6 +10,11 @@ module RubyLLM
10
10
  include VertexAI::Models
11
11
  include VertexAI::Transcription
12
12
 
13
+ SCOPES = [
14
+ 'https://www.googleapis.com/auth/cloud-platform',
15
+ 'https://www.googleapis.com/auth/generative-language.retriever'
16
+ ].freeze
17
+
13
18
  def initialize(config)
14
19
  super
15
20
  @authorizer = nil
@@ -35,6 +40,10 @@ module RubyLLM
35
40
  end
36
41
 
37
42
  class << self
43
+ def configuration_options
44
+ %i[vertexai_project_id vertexai_location vertexai_service_account_key]
45
+ end
46
+
38
47
  def configuration_requirements
39
48
  %i[vertexai_project_id vertexai_location]
40
49
  end
@@ -44,12 +53,15 @@ module RubyLLM
44
53
 
45
54
  def initialize_authorizer
46
55
  require 'googleauth'
47
- @authorizer = ::Google::Auth.get_application_default(
48
- scope: [
49
- 'https://www.googleapis.com/auth/cloud-platform',
50
- 'https://www.googleapis.com/auth/generative-language.retriever'
51
- ]
52
- )
56
+ @authorizer =
57
+ if @config.vertexai_service_account_key
58
+ ::Google::Auth::ServiceAccountCredentials.make_creds(
59
+ json_key_io: StringIO.new(@config.vertexai_service_account_key),
60
+ scope: SCOPES
61
+ )
62
+ else
63
+ ::Google::Auth.get_application_default(SCOPES)
64
+ end
53
65
  rescue LoadError
54
66
  raise Error,
55
67
  'The googleauth gem ~> 1.15 is required for Vertex AI. Please add it to your Gemfile: gem "googleauth"'
@@ -19,6 +19,10 @@ module RubyLLM
19
19
  end
20
20
 
21
21
  class << self
22
+ def configuration_options
23
+ %i[xai_api_key]
24
+ end
25
+
22
26
  def configuration_requirements
23
27
  %i[xai_api_key]
24
28
  end
@@ -21,13 +21,13 @@ module RubyLLM
21
21
  end
22
22
 
23
23
  def add(chunk)
24
- RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
24
+ RubyLLM.logger.debug { chunk.inspect } if RubyLLM.config.log_stream_debug
25
25
  @model_id ||= chunk.model_id
26
26
 
27
27
  handle_chunk_content(chunk)
28
28
  append_thinking_from_chunk(chunk)
29
29
  count_tokens chunk
30
- RubyLLM.logger.debug inspect if RubyLLM.config.log_stream_debug
30
+ RubyLLM.logger.debug { inspect } if RubyLLM.config.log_stream_debug
31
31
  end
32
32
 
33
33
  def to_message(response)
@@ -73,11 +73,14 @@ module RubyLLM
73
73
  end
74
74
 
75
75
  def accumulate_tool_calls(new_tool_calls) # rubocop:disable Metrics/PerceivedComplexity
76
- RubyLLM.logger.debug "Accumulating tool calls: #{new_tool_calls}" if RubyLLM.config.log_stream_debug
76
+ RubyLLM.logger.debug { "Accumulating tool calls: #{new_tool_calls}" } if RubyLLM.config.log_stream_debug
77
77
  new_tool_calls.each_value do |tool_call|
78
78
  if tool_call.id
79
79
  tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
80
- tool_call_arguments = tool_call.arguments.empty? ? +'' : tool_call.arguments
80
+ tool_call_arguments = tool_call.arguments
81
+ if tool_call_arguments.nil? || (tool_call_arguments.respond_to?(:empty?) && tool_call_arguments.empty?)
82
+ tool_call_arguments = +''
83
+ end
81
84
  @tool_calls[tool_call.id] = ToolCall.new(
82
85
  id: tool_call_id,
83
86
  name: tool_call.name,
@@ -88,7 +91,9 @@ module RubyLLM
88
91
  else
89
92
  existing = @tool_calls[@latest_tool_call_id]
90
93
  if existing
91
- existing.arguments << tool_call.arguments
94
+ fragment = tool_call.arguments
95
+ fragment = '' if fragment.nil?
96
+ existing.arguments << fragment
92
97
  if tool_call.thought_signature && existing.thought_signature.nil?
93
98
  existing.thought_signature = tool_call.thought_signature
94
99
  end
@@ -24,13 +24,13 @@ module RubyLLM
24
24
  end
25
25
 
26
26
  message = accumulator.to_message(response)
27
- RubyLLM.logger.debug "Stream completed: #{message.content}"
27
+ RubyLLM.logger.debug { "Stream completed: #{message.content}" }
28
28
  message
29
29
  end
30
30
 
31
31
  def handle_stream(&block)
32
32
  build_on_data_handler do |data|
33
- block.call(build_chunk(data)) if data
33
+ block.call(build_chunk(data)) if data.is_a?(Hash)
34
34
  end
35
35
  end
36
36
 
@@ -52,7 +52,7 @@ module RubyLLM
52
52
  end
53
53
 
54
54
  def process_stream_chunk(chunk, parser, env, &)
55
- RubyLLM.logger.debug "Received chunk: #{chunk}" if RubyLLM.config.log_stream_debug
55
+ RubyLLM.logger.debug { "Received chunk: #{chunk}" } if RubyLLM.config.log_stream_debug
56
56
 
57
57
  if error_chunk?(chunk)
58
58
  handle_error_chunk(chunk, env)
@@ -85,7 +85,7 @@ module RubyLLM
85
85
  error_data = JSON.parse(buffer)
86
86
  handle_parsed_error(error_data, env)
87
87
  rescue JSON::ParserError
88
- RubyLLM.logger.debug "Accumulating error chunk: #{chunk}"
88
+ RubyLLM.logger.debug { "Accumulating error chunk: #{chunk}" }
89
89
  end
90
90
 
91
91
  def handle_sse(chunk, parser, env, &block)
@@ -105,7 +105,7 @@ module RubyLLM
105
105
 
106
106
  handle_parsed_error(parsed, env)
107
107
  rescue JSON::ParserError => e
108
- RubyLLM.logger.debug "Failed to parse data chunk: #{e.message}"
108
+ RubyLLM.logger.debug { "Failed to parse data chunk: #{e.message}" }
109
109
  end
110
110
 
111
111
  def handle_error_event(data, env)
@@ -116,7 +116,7 @@ module RubyLLM
116
116
  error_data = JSON.parse(data)
117
117
  [500, error_data['message'] || 'Unknown streaming error']
118
118
  rescue JSON::ParserError => e
119
- RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
119
+ RubyLLM.logger.debug { "Failed to parse streaming error: #{e.message}" }
120
120
  [500, "Failed to parse error: #{data}"]
121
121
  end
122
122
 
@@ -130,7 +130,7 @@ module RubyLLM
130
130
  parsed_data = JSON.parse(data)
131
131
  handle_parsed_error(parsed_data, env)
132
132
  rescue JSON::ParserError => e
133
- RubyLLM.logger.debug "#{error_message}: #{e.message}"
133
+ RubyLLM.logger.debug { "#{error_message}: #{e.message}" }
134
134
  end
135
135
 
136
136
  def build_stream_error_response(parsed_data, env, status)
data/lib/ruby_llm/tool.rb CHANGED
@@ -99,9 +99,13 @@ module RubyLLM
99
99
  end
100
100
 
101
101
  def call(args)
102
- RubyLLM.logger.debug "Tool #{name} called with: #{args.inspect}"
103
- result = execute(**args.transform_keys(&:to_sym))
104
- RubyLLM.logger.debug "Tool #{name} returned: #{result.inspect}"
102
+ normalized_args = normalize_args(args)
103
+ validation_error = validate_keyword_arguments(normalized_args)
104
+ return { error: "Invalid tool arguments: #{validation_error}" } if validation_error
105
+
106
+ RubyLLM.logger.debug { "Tool #{name} called with: #{normalized_args.inspect}" }
107
+ result = execute(**normalized_args)
108
+ RubyLLM.logger.debug { "Tool #{name} returned: #{result.inspect}" }
105
109
  result
106
110
  end
107
111
 
@@ -115,6 +119,47 @@ module RubyLLM
115
119
  Halt.new(message)
116
120
  end
117
121
 
122
+ def normalize_args(args)
123
+ return {} if args.nil?
124
+ return args.transform_keys(&:to_sym) if args.respond_to?(:transform_keys)
125
+
126
+ {}
127
+ end
128
+
129
+ def validate_keyword_arguments(arguments)
130
+ required_keywords, optional_keywords, accepts_extra_keywords = execute_keyword_signature
131
+
132
+ return nil if required_keywords.empty? && optional_keywords.empty?
133
+
134
+ argument_keys = arguments.keys
135
+ missing_keyword = first_missing_keyword(required_keywords, argument_keys)
136
+ return "missing keyword: #{missing_keyword}" if missing_keyword
137
+ return nil if accepts_extra_keywords
138
+
139
+ allowed_keywords = required_keywords + optional_keywords
140
+ unknown_keyword = first_unknown_keyword(argument_keys, allowed_keywords)
141
+ return "unknown keyword: #{unknown_keyword}" if unknown_keyword
142
+
143
+ nil
144
+ end
145
+
146
+ def execute_keyword_signature
147
+ keyword_signature = method(:execute).parameters
148
+ required_keywords = keyword_signature.filter_map { |kind, name| name if kind == :keyreq }
149
+ optional_keywords = keyword_signature.filter_map { |kind, name| name if kind == :key }
150
+ accepts_extra_keywords = keyword_signature.any? { |kind, _| kind == :keyrest }
151
+
152
+ [required_keywords, optional_keywords, accepts_extra_keywords]
153
+ end
154
+
155
+ def first_missing_keyword(required_keywords, argument_keys)
156
+ (required_keywords - argument_keys).first
157
+ end
158
+
159
+ def first_unknown_keyword(argument_keys, allowed_keywords)
160
+ (argument_keys - allowed_keywords).first
161
+ end
162
+
118
163
  # Wraps schema handling for tool parameters, supporting JSON Schema hashes,
119
164
  # RubyLLM::Schema instances/classes, and DSL blocks.
120
165
  class SchemaDefinition
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '1.12.0'
4
+ VERSION = '1.14.1'
5
5
  end