ruby_llm_swarm 1.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +175 -0
  4. data/lib/generators/ruby_llm/chat_ui/chat_ui_generator.rb +187 -0
  5. data/lib/generators/ruby_llm/chat_ui/templates/controllers/chats_controller.rb.tt +39 -0
  6. data/lib/generators/ruby_llm/chat_ui/templates/controllers/messages_controller.rb.tt +24 -0
  7. data/lib/generators/ruby_llm/chat_ui/templates/controllers/models_controller.rb.tt +14 -0
  8. data/lib/generators/ruby_llm/chat_ui/templates/jobs/chat_response_job.rb.tt +12 -0
  9. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_chat.html.erb.tt +16 -0
  10. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/_form.html.erb.tt +29 -0
  11. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/index.html.erb.tt +16 -0
  12. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/new.html.erb.tt +11 -0
  13. data/lib/generators/ruby_llm/chat_ui/templates/views/chats/show.html.erb.tt +23 -0
  14. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_content.html.erb.tt +1 -0
  15. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_form.html.erb.tt +21 -0
  16. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_message.html.erb.tt +13 -0
  17. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/_tool_calls.html.erb.tt +7 -0
  18. data/lib/generators/ruby_llm/chat_ui/templates/views/messages/create.turbo_stream.erb.tt +9 -0
  19. data/lib/generators/ruby_llm/chat_ui/templates/views/models/_model.html.erb.tt +16 -0
  20. data/lib/generators/ruby_llm/chat_ui/templates/views/models/index.html.erb.tt +28 -0
  21. data/lib/generators/ruby_llm/chat_ui/templates/views/models/show.html.erb.tt +18 -0
  22. data/lib/generators/ruby_llm/generator_helpers.rb +194 -0
  23. data/lib/generators/ruby_llm/install/install_generator.rb +106 -0
  24. data/lib/generators/ruby_llm/install/templates/add_references_to_chats_tool_calls_and_messages_migration.rb.tt +9 -0
  25. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  26. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +7 -0
  27. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +16 -0
  28. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +45 -0
  29. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +20 -0
  30. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +12 -0
  31. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +4 -0
  32. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +3 -0
  33. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  34. data/lib/generators/ruby_llm/upgrade_to_v1_7/templates/migration.rb.tt +145 -0
  35. data/lib/generators/ruby_llm/upgrade_to_v1_7/upgrade_to_v1_7_generator.rb +124 -0
  36. data/lib/generators/ruby_llm/upgrade_to_v1_9/templates/add_v1_9_message_columns.rb.tt +15 -0
  37. data/lib/generators/ruby_llm/upgrade_to_v1_9/upgrade_to_v1_9_generator.rb +49 -0
  38. data/lib/ruby_llm/active_record/acts_as.rb +174 -0
  39. data/lib/ruby_llm/active_record/acts_as_legacy.rb +384 -0
  40. data/lib/ruby_llm/active_record/chat_methods.rb +350 -0
  41. data/lib/ruby_llm/active_record/message_methods.rb +81 -0
  42. data/lib/ruby_llm/active_record/model_methods.rb +84 -0
  43. data/lib/ruby_llm/aliases.json +295 -0
  44. data/lib/ruby_llm/aliases.rb +38 -0
  45. data/lib/ruby_llm/attachment.rb +220 -0
  46. data/lib/ruby_llm/chat.rb +816 -0
  47. data/lib/ruby_llm/chunk.rb +6 -0
  48. data/lib/ruby_llm/configuration.rb +78 -0
  49. data/lib/ruby_llm/connection.rb +126 -0
  50. data/lib/ruby_llm/content.rb +73 -0
  51. data/lib/ruby_llm/context.rb +29 -0
  52. data/lib/ruby_llm/embedding.rb +29 -0
  53. data/lib/ruby_llm/error.rb +84 -0
  54. data/lib/ruby_llm/image.rb +49 -0
  55. data/lib/ruby_llm/message.rb +86 -0
  56. data/lib/ruby_llm/mime_type.rb +71 -0
  57. data/lib/ruby_llm/model/info.rb +111 -0
  58. data/lib/ruby_llm/model/modalities.rb +22 -0
  59. data/lib/ruby_llm/model/pricing.rb +48 -0
  60. data/lib/ruby_llm/model/pricing_category.rb +46 -0
  61. data/lib/ruby_llm/model/pricing_tier.rb +33 -0
  62. data/lib/ruby_llm/model.rb +7 -0
  63. data/lib/ruby_llm/models.json +33198 -0
  64. data/lib/ruby_llm/models.rb +231 -0
  65. data/lib/ruby_llm/models_schema.json +168 -0
  66. data/lib/ruby_llm/moderation.rb +56 -0
  67. data/lib/ruby_llm/provider.rb +243 -0
  68. data/lib/ruby_llm/providers/anthropic/capabilities.rb +134 -0
  69. data/lib/ruby_llm/providers/anthropic/chat.rb +125 -0
  70. data/lib/ruby_llm/providers/anthropic/content.rb +44 -0
  71. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  72. data/lib/ruby_llm/providers/anthropic/media.rb +92 -0
  73. data/lib/ruby_llm/providers/anthropic/models.rb +63 -0
  74. data/lib/ruby_llm/providers/anthropic/streaming.rb +45 -0
  75. data/lib/ruby_llm/providers/anthropic/tools.rb +109 -0
  76. data/lib/ruby_llm/providers/anthropic.rb +36 -0
  77. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  78. data/lib/ruby_llm/providers/bedrock/chat.rb +63 -0
  79. data/lib/ruby_llm/providers/bedrock/media.rb +61 -0
  80. data/lib/ruby_llm/providers/bedrock/models.rb +98 -0
  81. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  82. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +51 -0
  83. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +71 -0
  84. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +67 -0
  85. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +80 -0
  86. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +78 -0
  87. data/lib/ruby_llm/providers/bedrock/streaming.rb +18 -0
  88. data/lib/ruby_llm/providers/bedrock.rb +82 -0
  89. data/lib/ruby_llm/providers/deepseek/capabilities.rb +130 -0
  90. data/lib/ruby_llm/providers/deepseek/chat.rb +16 -0
  91. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  92. data/lib/ruby_llm/providers/gemini/capabilities.rb +281 -0
  93. data/lib/ruby_llm/providers/gemini/chat.rb +454 -0
  94. data/lib/ruby_llm/providers/gemini/embeddings.rb +37 -0
  95. data/lib/ruby_llm/providers/gemini/images.rb +47 -0
  96. data/lib/ruby_llm/providers/gemini/media.rb +112 -0
  97. data/lib/ruby_llm/providers/gemini/models.rb +40 -0
  98. data/lib/ruby_llm/providers/gemini/streaming.rb +61 -0
  99. data/lib/ruby_llm/providers/gemini/tools.rb +198 -0
  100. data/lib/ruby_llm/providers/gemini/transcription.rb +116 -0
  101. data/lib/ruby_llm/providers/gemini.rb +37 -0
  102. data/lib/ruby_llm/providers/gpustack/chat.rb +27 -0
  103. data/lib/ruby_llm/providers/gpustack/media.rb +46 -0
  104. data/lib/ruby_llm/providers/gpustack/models.rb +90 -0
  105. data/lib/ruby_llm/providers/gpustack.rb +34 -0
  106. data/lib/ruby_llm/providers/mistral/capabilities.rb +155 -0
  107. data/lib/ruby_llm/providers/mistral/chat.rb +24 -0
  108. data/lib/ruby_llm/providers/mistral/embeddings.rb +33 -0
  109. data/lib/ruby_llm/providers/mistral/models.rb +48 -0
  110. data/lib/ruby_llm/providers/mistral.rb +32 -0
  111. data/lib/ruby_llm/providers/ollama/chat.rb +27 -0
  112. data/lib/ruby_llm/providers/ollama/media.rb +46 -0
  113. data/lib/ruby_llm/providers/ollama/models.rb +36 -0
  114. data/lib/ruby_llm/providers/ollama.rb +30 -0
  115. data/lib/ruby_llm/providers/openai/capabilities.rb +299 -0
  116. data/lib/ruby_llm/providers/openai/chat.rb +88 -0
  117. data/lib/ruby_llm/providers/openai/embeddings.rb +33 -0
  118. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  119. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  120. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  121. data/lib/ruby_llm/providers/openai/moderation.rb +34 -0
  122. data/lib/ruby_llm/providers/openai/streaming.rb +46 -0
  123. data/lib/ruby_llm/providers/openai/tools.rb +98 -0
  124. data/lib/ruby_llm/providers/openai/transcription.rb +70 -0
  125. data/lib/ruby_llm/providers/openai.rb +44 -0
  126. data/lib/ruby_llm/providers/openai_responses.rb +395 -0
  127. data/lib/ruby_llm/providers/openrouter/models.rb +73 -0
  128. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  129. data/lib/ruby_llm/providers/perplexity/capabilities.rb +137 -0
  130. data/lib/ruby_llm/providers/perplexity/chat.rb +16 -0
  131. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  132. data/lib/ruby_llm/providers/perplexity.rb +48 -0
  133. data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
  134. data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
  135. data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
  136. data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
  137. data/lib/ruby_llm/providers/vertexai/transcription.rb +16 -0
  138. data/lib/ruby_llm/providers/vertexai.rb +55 -0
  139. data/lib/ruby_llm/railtie.rb +35 -0
  140. data/lib/ruby_llm/responses_session.rb +77 -0
  141. data/lib/ruby_llm/stream_accumulator.rb +101 -0
  142. data/lib/ruby_llm/streaming.rb +153 -0
  143. data/lib/ruby_llm/tool.rb +209 -0
  144. data/lib/ruby_llm/tool_call.rb +22 -0
  145. data/lib/ruby_llm/tool_executors.rb +125 -0
  146. data/lib/ruby_llm/transcription.rb +35 -0
  147. data/lib/ruby_llm/utils.rb +91 -0
  148. data/lib/ruby_llm/version.rb +5 -0
  149. data/lib/ruby_llm.rb +140 -0
  150. data/lib/tasks/models.rake +525 -0
  151. data/lib/tasks/release.rake +67 -0
  152. data/lib/tasks/ruby_llm.rake +15 -0
  153. data/lib/tasks/vcr.rake +92 -0
  154. metadata +346 -0
@@ -0,0 +1,101 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Assembles streaming responses from LLMs into complete messages.
5
+ class StreamAccumulator
6
+ attr_reader :content, :model_id, :tool_calls
7
+
8
+ def initialize
9
+ @content = +''
10
+ @tool_calls = {}
11
+ @input_tokens = nil
12
+ @output_tokens = nil
13
+ @cached_tokens = nil
14
+ @cache_creation_tokens = nil
15
+ @latest_tool_call_id = nil
16
+ end
17
+
18
+ def add(chunk)
19
+ RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
20
+ @model_id ||= chunk.model_id
21
+
22
+ if chunk.tool_call?
23
+ accumulate_tool_calls chunk.tool_calls
24
+ else
25
+ @content << (chunk.content || '')
26
+ end
27
+
28
+ count_tokens chunk
29
+ RubyLLM.logger.debug inspect if RubyLLM.config.log_stream_debug
30
+ end
31
+
32
+ def to_message(response)
33
+ Message.new(
34
+ role: :assistant,
35
+ content: content.empty? ? nil : content,
36
+ model_id: model_id,
37
+ tool_calls: tool_calls_from_stream,
38
+ input_tokens: @input_tokens,
39
+ output_tokens: @output_tokens,
40
+ cached_tokens: @cached_tokens,
41
+ cache_creation_tokens: @cache_creation_tokens,
42
+ raw: response
43
+ )
44
+ end
45
+
46
+ private
47
+
48
+ def tool_calls_from_stream
49
+ tool_calls.transform_values do |tc|
50
+ arguments = if tc.arguments.is_a?(String) && !tc.arguments.empty?
51
+ JSON.parse(tc.arguments)
52
+ elsif tc.arguments.is_a?(String)
53
+ {}
54
+ else
55
+ tc.arguments
56
+ end
57
+
58
+ ToolCall.new(
59
+ id: tc.id,
60
+ name: tc.name,
61
+ arguments: arguments
62
+ )
63
+ end
64
+ end
65
+
66
+ def accumulate_tool_calls(new_tool_calls)
67
+ RubyLLM.logger.debug "Accumulating tool calls: #{new_tool_calls}" if RubyLLM.config.log_stream_debug
68
+ new_tool_calls.each_value do |tool_call|
69
+ if tool_call.id
70
+ tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
71
+ tool_call_arguments = tool_call.arguments.empty? ? +'' : tool_call.arguments
72
+ @tool_calls[tool_call.id] = ToolCall.new(
73
+ id: tool_call_id,
74
+ name: tool_call.name,
75
+ arguments: tool_call_arguments
76
+ )
77
+ @latest_tool_call_id = tool_call.id
78
+ else
79
+ existing = @tool_calls[@latest_tool_call_id]
80
+ existing.arguments << tool_call.arguments if existing
81
+ end
82
+ end
83
+ end
84
+
85
+ def find_tool_call(tool_call_id)
86
+ if tool_call_id.nil?
87
+ @tool_calls[@latest_tool_call]
88
+ else
89
+ @latest_tool_call_id = tool_call_id
90
+ @tool_calls[tool_call_id]
91
+ end
92
+ end
93
+
94
+ def count_tokens(chunk)
95
+ @input_tokens = chunk.input_tokens if chunk.input_tokens
96
+ @output_tokens = chunk.output_tokens if chunk.output_tokens
97
+ @cached_tokens = chunk.cached_tokens if chunk.cached_tokens
98
+ @cache_creation_tokens = chunk.cache_creation_tokens if chunk.cache_creation_tokens
99
+ end
100
+ end
101
+ end
@@ -0,0 +1,153 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Handles streaming responses from AI providers.
5
+ module Streaming
6
+ module_function
7
+
8
+ def stream_response(connection, payload, additional_headers = {}, &block)
9
+ accumulator = StreamAccumulator.new
10
+
11
+ response = connection.post stream_url, payload do |req|
12
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
13
+ if faraday_1?
14
+ req.options[:on_data] = handle_stream do |chunk|
15
+ accumulator.add chunk
16
+ block.call chunk
17
+ end
18
+ else
19
+ req.options.on_data = handle_stream do |chunk|
20
+ accumulator.add chunk
21
+ block.call chunk
22
+ end
23
+ end
24
+ end
25
+
26
+ message = accumulator.to_message(response)
27
+ RubyLLM.logger.debug "Stream completed: #{message.content}"
28
+ message
29
+ end
30
+
31
+ def handle_stream(&block)
32
+ to_json_stream do |data|
33
+ block.call(build_chunk(data)) if data
34
+ end
35
+ end
36
+
37
+ private
38
+
39
+ def faraday_1?
40
+ Faraday::VERSION.start_with?('1')
41
+ end
42
+
43
+ def to_json_stream(&)
44
+ buffer = +''
45
+ parser = EventStreamParser::Parser.new
46
+
47
+ create_stream_processor(parser, buffer, &)
48
+ end
49
+
50
+ def create_stream_processor(parser, buffer, &)
51
+ if faraday_1?
52
+ legacy_stream_processor(parser, &)
53
+ else
54
+ stream_processor(parser, buffer, &)
55
+ end
56
+ end
57
+
58
+ def process_stream_chunk(chunk, parser, env, &)
59
+ RubyLLM.logger.debug "Received chunk: #{chunk}" if RubyLLM.config.log_stream_debug
60
+
61
+ if error_chunk?(chunk)
62
+ handle_error_chunk(chunk, env)
63
+ else
64
+ yield handle_sse(chunk, parser, env, &)
65
+ end
66
+ end
67
+
68
+ def legacy_stream_processor(parser, &block)
69
+ proc do |chunk, _size|
70
+ process_stream_chunk(chunk, parser, nil, &block)
71
+ end
72
+ end
73
+
74
+ def stream_processor(parser, buffer, &block)
75
+ proc do |chunk, _bytes, env|
76
+ if env&.status == 200
77
+ process_stream_chunk(chunk, parser, env, &block)
78
+ else
79
+ handle_failed_response(chunk, buffer, env)
80
+ end
81
+ end
82
+ end
83
+
84
+ def error_chunk?(chunk)
85
+ chunk.start_with?('event: error')
86
+ end
87
+
88
+ def handle_error_chunk(chunk, env)
89
+ error_data = chunk.split("\n")[1].delete_prefix('data: ')
90
+ status, _message = parse_streaming_error(error_data)
91
+ parsed_data = JSON.parse(error_data)
92
+
93
+ error_response = if faraday_1?
94
+ Struct.new(:body, :status).new(parsed_data, status)
95
+ else
96
+ env.merge(body: parsed_data, status: status)
97
+ end
98
+
99
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
100
+ rescue JSON::ParserError => e
101
+ RubyLLM.logger.debug "Failed to parse error chunk: #{e.message}"
102
+ end
103
+
104
+ def handle_failed_response(chunk, buffer, env)
105
+ buffer << chunk
106
+ error_data = JSON.parse(buffer)
107
+ error_response = env.merge(body: error_data)
108
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
109
+ rescue JSON::ParserError
110
+ RubyLLM.logger.debug "Accumulating error chunk: #{chunk}"
111
+ end
112
+
113
+ def handle_sse(chunk, parser, env, &block)
114
+ parser.feed(chunk) do |type, data|
115
+ case type.to_sym
116
+ when :error
117
+ handle_error_event(data, env)
118
+ else
119
+ yield handle_data(data, &block) unless data == '[DONE]'
120
+ end
121
+ end
122
+ end
123
+
124
+ def handle_data(data)
125
+ JSON.parse(data)
126
+ rescue JSON::ParserError => e
127
+ RubyLLM.logger.debug "Failed to parse data chunk: #{e.message}"
128
+ end
129
+
130
+ def handle_error_event(data, env)
131
+ status, _message = parse_streaming_error(data)
132
+ parsed_data = JSON.parse(data)
133
+
134
+ error_response = if faraday_1?
135
+ Struct.new(:body, :status).new(parsed_data, status)
136
+ else
137
+ env.merge(body: parsed_data, status: status)
138
+ end
139
+
140
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
141
+ rescue JSON::ParserError => e
142
+ RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
143
+ end
144
+
145
+ def parse_streaming_error(data)
146
+ error_data = JSON.parse(data)
147
+ [500, error_data['message'] || 'Unknown streaming error']
148
+ rescue JSON::ParserError => e
149
+ RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
150
+ [500, "Failed to parse error: #{data}"]
151
+ end
152
+ end
153
+ end
@@ -0,0 +1,209 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'ruby_llm/schema'
4
+
5
+ module RubyLLM
6
+ # Parameter definition for Tool methods.
7
+ class Parameter
8
+ attr_reader :name, :type, :description, :required
9
+
10
+ def initialize(name, type: 'string', desc: nil, required: true)
11
+ @name = name
12
+ @type = type
13
+ @description = desc
14
+ @required = required
15
+ end
16
+ end
17
+
18
+ # Base class for creating tools that AI models can use
19
+ class Tool
20
+ # Stops conversation continuation after tool execution
21
+ class Halt
22
+ attr_reader :content
23
+
24
+ def initialize(content)
25
+ @content = content
26
+ end
27
+
28
+ def to_s
29
+ @content.to_s
30
+ end
31
+ end
32
+
33
+ class << self
34
+ attr_reader :params_schema_definition
35
+
36
+ def description(text = nil)
37
+ return @description unless text
38
+
39
+ @description = text
40
+ end
41
+
42
+ def param(name, **options)
43
+ parameters[name] = Parameter.new(name, **options)
44
+ end
45
+
46
+ def parameters
47
+ @parameters ||= {}
48
+ end
49
+
50
+ def params(schema = nil, &block)
51
+ @params_schema_definition = SchemaDefinition.new(schema:, block:)
52
+ self
53
+ end
54
+
55
+ def with_params(**params)
56
+ @provider_params = params
57
+ self
58
+ end
59
+
60
+ def provider_params
61
+ @provider_params ||= {}
62
+ end
63
+ end
64
+
65
+ def name
66
+ klass_name = self.class.name
67
+ normalized = klass_name.to_s.dup.force_encoding('UTF-8').unicode_normalize(:nfkd)
68
+ normalized.encode('ASCII', replace: '')
69
+ .gsub(/[^a-zA-Z0-9_-]/, '-')
70
+ .gsub(/([A-Z]+)([A-Z][a-z])/, '\1_\2')
71
+ .gsub(/([a-z\d])([A-Z])/, '\1_\2')
72
+ .downcase
73
+ .delete_suffix('_tool')
74
+ end
75
+
76
+ def description
77
+ self.class.description
78
+ end
79
+
80
+ def parameters
81
+ self.class.parameters
82
+ end
83
+
84
+ def provider_params
85
+ self.class.provider_params
86
+ end
87
+
88
+ def params_schema
89
+ return @params_schema if defined?(@params_schema)
90
+
91
+ @params_schema = begin
92
+ definition = self.class.params_schema_definition
93
+ if definition&.present?
94
+ definition.json_schema
95
+ elsif parameters.any?
96
+ SchemaDefinition.from_parameters(parameters)&.json_schema
97
+ end
98
+ end
99
+ end
100
+
101
+ def call(args)
102
+ RubyLLM.logger.debug "Tool #{name} called with: #{args.inspect}"
103
+ result = execute(**args.transform_keys(&:to_sym))
104
+ RubyLLM.logger.debug "Tool #{name} returned: #{result.inspect}"
105
+ result
106
+ end
107
+
108
+ def execute(...)
109
+ raise NotImplementedError, 'Subclasses must implement #execute'
110
+ end
111
+
112
+ protected
113
+
114
+ def halt(message)
115
+ Halt.new(message)
116
+ end
117
+
118
+ # Wraps schema handling for tool parameters, supporting JSON Schema hashes,
119
+ # RubyLLM::Schema instances/classes, and DSL blocks.
120
+ class SchemaDefinition
121
+ def self.from_parameters(parameters)
122
+ return nil if parameters.nil? || parameters.empty?
123
+
124
+ properties = parameters.to_h do |name, param|
125
+ schema = {
126
+ type: map_type(param.type),
127
+ description: param.description
128
+ }.compact
129
+
130
+ schema[:items] = default_items_schema if schema[:type] == 'array'
131
+
132
+ [name.to_s, schema]
133
+ end
134
+
135
+ required = parameters.select { |_, param| param.required }.keys.map(&:to_s)
136
+
137
+ json_schema = {
138
+ type: 'object',
139
+ properties: properties,
140
+ required: required,
141
+ additionalProperties: false,
142
+ strict: true
143
+ }
144
+
145
+ new(schema: json_schema)
146
+ end
147
+
148
+ def self.map_type(type)
149
+ case type.to_s
150
+ when 'integer', 'int' then 'integer'
151
+ when 'number', 'float', 'double' then 'number'
152
+ when 'boolean' then 'boolean'
153
+ when 'array' then 'array'
154
+ when 'object' then 'object'
155
+ else
156
+ 'string'
157
+ end
158
+ end
159
+
160
+ def self.default_items_schema
161
+ { type: 'string' }
162
+ end
163
+
164
+ def initialize(schema: nil, block: nil)
165
+ @schema = schema
166
+ @block = block
167
+ end
168
+
169
+ def present?
170
+ @schema || @block
171
+ end
172
+
173
+ def json_schema
174
+ @json_schema ||= RubyLLM::Utils.deep_stringify_keys(resolve_schema)
175
+ end
176
+
177
+ private
178
+
179
+ def resolve_schema
180
+ return resolve_direct_schema(@schema) if @schema
181
+ return build_from_block(&@block) if @block
182
+
183
+ nil
184
+ end
185
+
186
+ def resolve_direct_schema(schema)
187
+ return extract_schema(schema.to_json_schema) if schema.respond_to?(:to_json_schema)
188
+ return RubyLLM::Utils.deep_dup(schema) if schema.is_a?(Hash)
189
+ if schema.is_a?(Class) && schema.instance_methods.include?(:to_json_schema)
190
+ return extract_schema(schema.new.to_json_schema)
191
+ end
192
+
193
+ nil
194
+ end
195
+
196
+ def build_from_block(&)
197
+ schema_class = RubyLLM::Schema.create(&)
198
+ extract_schema(schema_class.new.to_json_schema)
199
+ end
200
+
201
+ def extract_schema(schema_hash)
202
+ return nil unless schema_hash.is_a?(Hash)
203
+
204
+ schema = schema_hash[:schema] || schema_hash['schema'] || schema_hash
205
+ RubyLLM::Utils.deep_dup(schema)
206
+ end
207
+ end
208
+ end
209
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Represents a function call from an AI model to a Tool.
5
+ class ToolCall
6
+ attr_reader :id, :name, :arguments
7
+
8
+ def initialize(id:, name:, arguments: {})
9
+ @id = id
10
+ @name = name
11
+ @arguments = arguments
12
+ end
13
+
14
+ def to_h
15
+ {
16
+ id: @id,
17
+ name: @name,
18
+ arguments: @arguments
19
+ }
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,125 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Built-in tool executors for concurrent tool execution.
5
+ # These are registered automatically when RubyLLM is loaded.
6
+ module ToolExecutors
7
+ class << self
8
+ # Registers the built-in executors.
9
+ # Called automatically when RubyLLM is loaded.
10
+ def register_defaults
11
+ register_threads_executor
12
+ register_async_executor
13
+ end
14
+
15
+ private
16
+
17
+ # Thread-based executor using Ruby's native threads.
18
+ # Uses only stdlib - no external dependencies.
19
+ # Good for broad compatibility and CPU-bound operations.
20
+ def register_threads_executor
21
+ RubyLLM.register_tool_executor(:threads) do |tool_calls, max_concurrency:, &execute|
22
+ results = {}
23
+ mutex = Mutex.new
24
+ semaphore = max_concurrency ? Thread::SizedQueue.new(max_concurrency) : nil
25
+
26
+ # Fill semaphore with permits
27
+ max_concurrency&.times { semaphore << :permit }
28
+
29
+ threads = tool_calls.map do |tool_call|
30
+ Thread.new do
31
+ # Acquire permit (blocks if none available)
32
+ permit = semaphore&.pop
33
+
34
+ begin
35
+ result = execute.call(tool_call)
36
+ mutex.synchronize { results[tool_call.id] = result }
37
+ rescue StandardError => e
38
+ # Store error as result so LLM sees it
39
+ error_result = "Error: #{e.class}: #{e.message}"
40
+ mutex.synchronize { results[tool_call.id] = error_result }
41
+ RubyLLM.logger.warn "[RubyLLM] Tool #{tool_call.id} failed: #{e.message}"
42
+ ensure
43
+ # Release permit
44
+ semaphore&.push(permit) if permit
45
+ end
46
+ end
47
+ end
48
+
49
+ threads.each(&:join)
50
+ results
51
+ end
52
+ end
53
+
54
+ # Async-based executor using the async gem.
55
+ # Uses lightweight fibers for I/O-bound operations.
56
+ # Requires the async gem to be installed.
57
+ def register_async_executor
58
+ RubyLLM.register_tool_executor(:async) do |tool_calls, max_concurrency:, &execute|
59
+ AsyncExecutor.execute(tool_calls, max_concurrency: max_concurrency, &execute)
60
+ end
61
+ end
62
+ end
63
+
64
+ # Internal implementation for async executor.
65
+ # Separated to keep block size manageable.
66
+ module AsyncExecutor
67
+ class << self
68
+ def execute(tool_calls, max_concurrency:, &block)
69
+ load_async_gem
70
+ run_with_sync { execute_tools(tool_calls, max_concurrency, &block) }
71
+ end
72
+
73
+ private
74
+
75
+ def load_async_gem
76
+ require 'async'
77
+ require 'async/barrier'
78
+ require 'async/semaphore'
79
+ rescue LoadError => e
80
+ raise LoadError,
81
+ 'The async gem is required for :async tool executor. ' \
82
+ "Add `gem 'async'` to your Gemfile. Original error: #{e.message}"
83
+ end
84
+
85
+ def run_with_sync(&)
86
+ # Use Kernel#Sync if available (async 2.x), otherwise Async{}.wait
87
+ if defined?(Sync)
88
+ Sync(&)
89
+ else
90
+ Async(&).wait
91
+ end
92
+ end
93
+
94
+ def execute_tools(tool_calls, max_concurrency)
95
+ semaphore = max_concurrency ? Async::Semaphore.new(max_concurrency) : nil
96
+ barrier = Async::Barrier.new
97
+ results = {}
98
+
99
+ tool_calls.each do |tool_call|
100
+ barrier.async do
101
+ results[tool_call.id] = execute_single_tool(tool_call, semaphore) { yield tool_call }
102
+ rescue StandardError => e
103
+ results[tool_call.id] = "Error: #{e.class}: #{e.message}"
104
+ RubyLLM.logger.warn "[RubyLLM] Tool #{tool_call.id} failed: #{e.message}"
105
+ end
106
+ end
107
+
108
+ barrier.wait
109
+ results
110
+ end
111
+
112
+ def execute_single_tool(_tool_call, semaphore, &)
113
+ if semaphore
114
+ semaphore.acquire(&)
115
+ else
116
+ yield
117
+ end
118
+ end
119
+ end
120
+ end
121
+ end
122
+ end
123
+
124
+ # Register built-in executors when this file is loaded
125
+ RubyLLM::ToolExecutors.register_defaults
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Represents a transcription of audio content.
5
+ class Transcription
6
+ attr_reader :text, :model, :language, :duration, :segments, :input_tokens, :output_tokens
7
+
8
+ def initialize(text:, model:, **attributes)
9
+ @text = text
10
+ @model = model
11
+ @language = attributes[:language]
12
+ @duration = attributes[:duration]
13
+ @segments = attributes[:segments]
14
+ @input_tokens = attributes[:input_tokens]
15
+ @output_tokens = attributes[:output_tokens]
16
+ end
17
+
18
+ def self.transcribe(audio_file, **kwargs)
19
+ model = kwargs.delete(:model)
20
+ language = kwargs.delete(:language)
21
+ provider = kwargs.delete(:provider)
22
+ assume_model_exists = kwargs.delete(:assume_model_exists) { false }
23
+ context = kwargs.delete(:context)
24
+ options = kwargs
25
+
26
+ config = context&.config || RubyLLM.config
27
+ model ||= config.default_transcription_model
28
+ model, provider_instance = Models.resolve(model, provider: provider, assume_exists: assume_model_exists,
29
+ config: config)
30
+ model_id = model.id
31
+
32
+ provider_instance.transcribe(audio_file, model: model_id, language:, **options)
33
+ end
34
+ end
35
+ end