ruby_llm_community 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +22 -0
  3. data/README.md +172 -0
  4. data/lib/generators/ruby_llm/install/templates/INSTALL_INFO.md.tt +108 -0
  5. data/lib/generators/ruby_llm/install/templates/chat_model.rb.tt +3 -0
  6. data/lib/generators/ruby_llm/install/templates/create_chats_migration.rb.tt +8 -0
  7. data/lib/generators/ruby_llm/install/templates/create_messages_migration.rb.tt +15 -0
  8. data/lib/generators/ruby_llm/install/templates/create_tool_calls_migration.rb.tt +14 -0
  9. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +6 -0
  10. data/lib/generators/ruby_llm/install/templates/message_model.rb.tt +3 -0
  11. data/lib/generators/ruby_llm/install/templates/tool_call_model.rb.tt +3 -0
  12. data/lib/generators/ruby_llm/install_generator.rb +121 -0
  13. data/lib/ruby_llm/active_record/acts_as.rb +382 -0
  14. data/lib/ruby_llm/aliases.json +217 -0
  15. data/lib/ruby_llm/aliases.rb +56 -0
  16. data/lib/ruby_llm/attachment.rb +164 -0
  17. data/lib/ruby_llm/chat.rb +219 -0
  18. data/lib/ruby_llm/chunk.rb +6 -0
  19. data/lib/ruby_llm/configuration.rb +75 -0
  20. data/lib/ruby_llm/connection.rb +126 -0
  21. data/lib/ruby_llm/content.rb +52 -0
  22. data/lib/ruby_llm/context.rb +29 -0
  23. data/lib/ruby_llm/embedding.rb +30 -0
  24. data/lib/ruby_llm/error.rb +84 -0
  25. data/lib/ruby_llm/image.rb +53 -0
  26. data/lib/ruby_llm/message.rb +76 -0
  27. data/lib/ruby_llm/mime_type.rb +67 -0
  28. data/lib/ruby_llm/model/info.rb +101 -0
  29. data/lib/ruby_llm/model/modalities.rb +22 -0
  30. data/lib/ruby_llm/model/pricing.rb +51 -0
  31. data/lib/ruby_llm/model/pricing_category.rb +48 -0
  32. data/lib/ruby_llm/model/pricing_tier.rb +34 -0
  33. data/lib/ruby_llm/model.rb +7 -0
  34. data/lib/ruby_llm/models.json +29924 -0
  35. data/lib/ruby_llm/models.rb +218 -0
  36. data/lib/ruby_llm/models_schema.json +168 -0
  37. data/lib/ruby_llm/provider.rb +219 -0
  38. data/lib/ruby_llm/providers/anthropic/capabilities.rb +179 -0
  39. data/lib/ruby_llm/providers/anthropic/chat.rb +106 -0
  40. data/lib/ruby_llm/providers/anthropic/embeddings.rb +20 -0
  41. data/lib/ruby_llm/providers/anthropic/media.rb +92 -0
  42. data/lib/ruby_llm/providers/anthropic/models.rb +48 -0
  43. data/lib/ruby_llm/providers/anthropic/streaming.rb +43 -0
  44. data/lib/ruby_llm/providers/anthropic/tools.rb +108 -0
  45. data/lib/ruby_llm/providers/anthropic.rb +37 -0
  46. data/lib/ruby_llm/providers/bedrock/capabilities.rb +167 -0
  47. data/lib/ruby_llm/providers/bedrock/chat.rb +65 -0
  48. data/lib/ruby_llm/providers/bedrock/media.rb +61 -0
  49. data/lib/ruby_llm/providers/bedrock/models.rb +82 -0
  50. data/lib/ruby_llm/providers/bedrock/signing.rb +831 -0
  51. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +63 -0
  52. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +63 -0
  53. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +79 -0
  54. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +90 -0
  55. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +91 -0
  56. data/lib/ruby_llm/providers/bedrock/streaming.rb +36 -0
  57. data/lib/ruby_llm/providers/bedrock.rb +83 -0
  58. data/lib/ruby_llm/providers/deepseek/capabilities.rb +131 -0
  59. data/lib/ruby_llm/providers/deepseek/chat.rb +17 -0
  60. data/lib/ruby_llm/providers/deepseek.rb +30 -0
  61. data/lib/ruby_llm/providers/gemini/capabilities.rb +351 -0
  62. data/lib/ruby_llm/providers/gemini/chat.rb +139 -0
  63. data/lib/ruby_llm/providers/gemini/embeddings.rb +39 -0
  64. data/lib/ruby_llm/providers/gemini/images.rb +48 -0
  65. data/lib/ruby_llm/providers/gemini/media.rb +55 -0
  66. data/lib/ruby_llm/providers/gemini/models.rb +41 -0
  67. data/lib/ruby_llm/providers/gemini/streaming.rb +58 -0
  68. data/lib/ruby_llm/providers/gemini/tools.rb +82 -0
  69. data/lib/ruby_llm/providers/gemini.rb +36 -0
  70. data/lib/ruby_llm/providers/gpustack/chat.rb +17 -0
  71. data/lib/ruby_llm/providers/gpustack/models.rb +55 -0
  72. data/lib/ruby_llm/providers/gpustack.rb +33 -0
  73. data/lib/ruby_llm/providers/mistral/capabilities.rb +163 -0
  74. data/lib/ruby_llm/providers/mistral/chat.rb +26 -0
  75. data/lib/ruby_llm/providers/mistral/embeddings.rb +36 -0
  76. data/lib/ruby_llm/providers/mistral/models.rb +49 -0
  77. data/lib/ruby_llm/providers/mistral.rb +32 -0
  78. data/lib/ruby_llm/providers/ollama/chat.rb +28 -0
  79. data/lib/ruby_llm/providers/ollama/media.rb +50 -0
  80. data/lib/ruby_llm/providers/ollama.rb +29 -0
  81. data/lib/ruby_llm/providers/openai/capabilities.rb +306 -0
  82. data/lib/ruby_llm/providers/openai/chat.rb +86 -0
  83. data/lib/ruby_llm/providers/openai/embeddings.rb +36 -0
  84. data/lib/ruby_llm/providers/openai/images.rb +38 -0
  85. data/lib/ruby_llm/providers/openai/media.rb +81 -0
  86. data/lib/ruby_llm/providers/openai/models.rb +39 -0
  87. data/lib/ruby_llm/providers/openai/response.rb +115 -0
  88. data/lib/ruby_llm/providers/openai/response_media.rb +76 -0
  89. data/lib/ruby_llm/providers/openai/streaming.rb +190 -0
  90. data/lib/ruby_llm/providers/openai/tools.rb +100 -0
  91. data/lib/ruby_llm/providers/openai.rb +44 -0
  92. data/lib/ruby_llm/providers/openai_base.rb +44 -0
  93. data/lib/ruby_llm/providers/openrouter/models.rb +88 -0
  94. data/lib/ruby_llm/providers/openrouter.rb +26 -0
  95. data/lib/ruby_llm/providers/perplexity/capabilities.rb +138 -0
  96. data/lib/ruby_llm/providers/perplexity/chat.rb +17 -0
  97. data/lib/ruby_llm/providers/perplexity/models.rb +42 -0
  98. data/lib/ruby_llm/providers/perplexity.rb +52 -0
  99. data/lib/ruby_llm/railtie.rb +17 -0
  100. data/lib/ruby_llm/stream_accumulator.rb +97 -0
  101. data/lib/ruby_llm/streaming.rb +162 -0
  102. data/lib/ruby_llm/tool.rb +100 -0
  103. data/lib/ruby_llm/tool_call.rb +31 -0
  104. data/lib/ruby_llm/utils.rb +49 -0
  105. data/lib/ruby_llm/version.rb +5 -0
  106. data/lib/ruby_llm.rb +98 -0
  107. data/lib/tasks/aliases.rake +235 -0
  108. data/lib/tasks/models_docs.rake +224 -0
  109. data/lib/tasks/models_update.rake +108 -0
  110. data/lib/tasks/release.rake +32 -0
  111. data/lib/tasks/vcr.rake +99 -0
  112. metadata +128 -7
@@ -0,0 +1,138 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Perplexity
6
+ # Determines capabilities and pricing for Perplexity models
7
+ module Capabilities
8
+ module_function
9
+
10
+ def context_window_for(model_id)
11
+ case model_id
12
+ when /sonar-pro/ then 200_000
13
+ else 128_000
14
+ end
15
+ end
16
+
17
+ def max_tokens_for(model_id)
18
+ case model_id
19
+ when /sonar-(?:pro|reasoning-pro)/ then 8_192
20
+ else 4_096
21
+ end
22
+ end
23
+
24
+ def input_price_for(model_id)
25
+ PRICES.dig(model_family(model_id), :input) || 1.0
26
+ end
27
+
28
+ def output_price_for(model_id)
29
+ PRICES.dig(model_family(model_id), :output) || 1.0
30
+ end
31
+
32
+ def supports_vision?(model_id)
33
+ case model_id
34
+ when /sonar-reasoning-pro/, /sonar-reasoning/, /sonar-pro/, /sonar/ then true
35
+ else false
36
+ end
37
+ end
38
+
39
+ def supports_functions?(_model_id)
40
+ false
41
+ end
42
+
43
+ def supports_json_mode?(_model_id)
44
+ true
45
+ end
46
+
47
+ def format_display_name(model_id)
48
+ case model_id
49
+ when 'sonar' then 'Sonar'
50
+ when 'sonar-pro' then 'Sonar Pro'
51
+ when 'sonar-reasoning' then 'Sonar Reasoning'
52
+ when 'sonar-reasoning-pro' then 'Sonar Reasoning Pro'
53
+ when 'sonar-deep-research' then 'Sonar Deep Research'
54
+ else
55
+ model_id.split('-')
56
+ .map(&:capitalize)
57
+ .join(' ')
58
+ end
59
+ end
60
+
61
+ def model_type(_model_id)
62
+ 'chat'
63
+ end
64
+
65
+ def model_family(model_id)
66
+ case model_id
67
+ when 'sonar' then :sonar
68
+ when 'sonar-pro' then :sonar_pro
69
+ when 'sonar-reasoning' then :sonar_reasoning
70
+ when 'sonar-reasoning-pro' then :sonar_reasoning_pro
71
+ when 'sonar-deep-research' then :sonar_deep_research
72
+ else :unknown
73
+ end
74
+ end
75
+
76
+ def modalities_for(_model_id)
77
+ {
78
+ input: ['text'],
79
+ output: ['text']
80
+ }
81
+ end
82
+
83
+ def capabilities_for(model_id)
84
+ capabilities = %w[streaming json_mode]
85
+ capabilities << 'vision' if supports_vision?(model_id)
86
+ capabilities
87
+ end
88
+
89
+ def pricing_for(model_id)
90
+ family = model_family(model_id)
91
+ prices = PRICES.fetch(family, { input: 1.0, output: 1.0 })
92
+
93
+ standard_pricing = {
94
+ input_per_million: prices[:input],
95
+ output_per_million: prices[:output]
96
+ }
97
+
98
+ standard_pricing[:citation_per_million] = prices[:citation] if prices[:citation]
99
+ standard_pricing[:reasoning_per_million] = prices[:reasoning] if prices[:reasoning]
100
+ standard_pricing[:search_per_thousand] = prices[:search_queries] if prices[:search_queries]
101
+
102
+ {
103
+ text_tokens: {
104
+ standard: standard_pricing
105
+ }
106
+ }
107
+ end
108
+
109
+ # Pricing information for Perplexity models (USD per 1M tokens)
110
+ PRICES = {
111
+ sonar: {
112
+ input: 1.0,
113
+ output: 1.0
114
+ },
115
+ sonar_pro: {
116
+ input: 3.0,
117
+ output: 15.0
118
+ },
119
+ sonar_reasoning: {
120
+ input: 1.0,
121
+ output: 5.0
122
+ },
123
+ sonar_reasoning_pro: {
124
+ input: 2.0,
125
+ output: 8.0
126
+ },
127
+ sonar_deep_research: {
128
+ input: 2.0,
129
+ output: 8.0,
130
+ citation: 2.0,
131
+ reasoning: 3.0,
132
+ search_queries: 5.0
133
+ }
134
+ }.freeze
135
+ end
136
+ end
137
+ end
138
+ end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Perplexity
6
+ # Chat formatting for Perplexity provider
7
+ module Chat
8
+ module_function
9
+
10
+ def format_role(role)
11
+ # Perplexity doesn't use the new OpenAI convention for system prompts
12
+ role.to_s
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class Perplexity
6
+ # Models methods of the Perplexity API integration
7
+ module Models
8
+ def list_models(**)
9
+ slug = 'perplexity'
10
+ capabilities = Perplexity::Capabilities
11
+ parse_list_models_response(nil, slug, capabilities)
12
+ end
13
+
14
+ def parse_list_models_response(_response, slug, capabilities)
15
+ [
16
+ create_model_info('sonar', slug, capabilities),
17
+ create_model_info('sonar-pro', slug, capabilities),
18
+ create_model_info('sonar-reasoning', slug, capabilities),
19
+ create_model_info('sonar-reasoning-pro', slug, capabilities),
20
+ create_model_info('sonar-deep-research', slug, capabilities)
21
+ ]
22
+ end
23
+
24
+ def create_model_info(id, slug, capabilities)
25
+ Model::Info.new(
26
+ id: id,
27
+ name: capabilities.format_display_name(id),
28
+ provider: slug,
29
+ family: capabilities.model_family(id).to_s,
30
+ created_at: Time.now,
31
+ context_window: capabilities.context_window_for(id),
32
+ max_output_tokens: capabilities.max_tokens_for(id),
33
+ modalities: capabilities.modalities_for(id),
34
+ capabilities: capabilities.capabilities_for(id),
35
+ pricing: capabilities.pricing_for(id),
36
+ metadata: {}
37
+ )
38
+ end
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # Perplexity API integration.
6
+ class Perplexity < OpenAIBase
7
+ include Perplexity::Chat
8
+ include Perplexity::Models
9
+
10
+ def api_base
11
+ 'https://api.perplexity.ai'
12
+ end
13
+
14
+ def headers
15
+ {
16
+ 'Authorization' => "Bearer #{@config.perplexity_api_key}",
17
+ 'Content-Type' => 'application/json'
18
+ }
19
+ end
20
+
21
+ class << self
22
+ def capabilities
23
+ Perplexity::Capabilities
24
+ end
25
+
26
+ def configuration_requirements
27
+ %i[perplexity_api_key]
28
+ end
29
+ end
30
+
31
+ def parse_error(response)
32
+ body = response.body
33
+ return if body.empty?
34
+
35
+ # If response is HTML (Perplexity returns HTML for auth errors)
36
+ if body.include?('<html>') && body.include?('<title>')
37
+ # Extract title content
38
+ title_match = body.match(%r{<title>(.+?)</title>})
39
+ if title_match
40
+ # Clean up the title - remove status code if present
41
+ message = title_match[1]
42
+ message = message.sub(/^\d+\s+/, '') # Remove leading digits and space
43
+ return message
44
+ end
45
+ end
46
+
47
+ # Fall back to parent's implementation
48
+ super
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Rails integration for RubyLLM
5
+ class Railtie < Rails::Railtie
6
+ initializer 'ruby_llm.active_record' do
7
+ ActiveSupport.on_load :active_record do
8
+ include RubyLLM::ActiveRecord::ActsAs
9
+ end
10
+ end
11
+
12
+ # Register generators
13
+ generators do
14
+ require 'generators/ruby_llm/install_generator'
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,97 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Assembles streaming responses from LLMs into complete messages.
5
+ # Handles the complexities of accumulating content and tool calls
6
+ # from partial chunks while tracking token usage.
7
+ class StreamAccumulator
8
+ attr_reader :content, :model_id, :tool_calls
9
+
10
+ def initialize
11
+ @content = +''
12
+ @tool_calls = {}
13
+ @input_tokens = 0
14
+ @output_tokens = 0
15
+ @latest_tool_call_id = nil
16
+ end
17
+
18
+ def add(chunk)
19
+ RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
20
+ @model_id ||= chunk.model_id
21
+
22
+ if chunk.tool_call?
23
+ accumulate_tool_calls chunk.tool_calls
24
+ else
25
+ @content << (chunk.content || '')
26
+ end
27
+
28
+ count_tokens chunk
29
+ RubyLLM.logger.debug inspect if RubyLLM.config.log_stream_debug
30
+ end
31
+
32
+ def to_message(response)
33
+ Message.new(
34
+ role: :assistant,
35
+ content: content.empty? ? nil : content,
36
+ model_id: model_id,
37
+ tool_calls: tool_calls_from_stream,
38
+ input_tokens: @input_tokens.positive? ? @input_tokens : nil,
39
+ output_tokens: @output_tokens.positive? ? @output_tokens : nil,
40
+ raw: response
41
+ )
42
+ end
43
+
44
+ private
45
+
46
+ def tool_calls_from_stream
47
+ tool_calls.transform_values do |tc|
48
+ arguments = if tc.arguments.is_a?(String) && !tc.arguments.empty?
49
+ JSON.parse(tc.arguments)
50
+ elsif tc.arguments.is_a?(String)
51
+ {} # Return empty hash for empty string arguments
52
+ else
53
+ tc.arguments
54
+ end
55
+
56
+ ToolCall.new(
57
+ id: tc.id,
58
+ name: tc.name,
59
+ arguments: arguments
60
+ )
61
+ end
62
+ end
63
+
64
+ def accumulate_tool_calls(new_tool_calls)
65
+ RubyLLM.logger.debug "Accumulating tool calls: #{new_tool_calls}" if RubyLLM.config.log_stream_debug
66
+ new_tool_calls.each_value do |tool_call|
67
+ if tool_call.id
68
+ tool_call_id = tool_call.id.empty? ? SecureRandom.uuid : tool_call.id
69
+ tool_call_arguments = tool_call.arguments.empty? ? +'' : tool_call.arguments
70
+ @tool_calls[tool_call.id] = ToolCall.new(
71
+ id: tool_call_id,
72
+ name: tool_call.name,
73
+ arguments: tool_call_arguments
74
+ )
75
+ @latest_tool_call_id = tool_call.id
76
+ else
77
+ existing = @tool_calls[@latest_tool_call_id]
78
+ existing.arguments << tool_call.arguments if existing
79
+ end
80
+ end
81
+ end
82
+
83
+ def find_tool_call(tool_call_id)
84
+ if tool_call_id.nil?
85
+ @tool_calls[@latest_tool_call]
86
+ else
87
+ @latest_tool_call_id = tool_call_id
88
+ @tool_calls[tool_call_id]
89
+ end
90
+ end
91
+
92
+ def count_tokens(chunk)
93
+ @input_tokens = chunk.input_tokens if chunk.input_tokens
94
+ @output_tokens = chunk.output_tokens if chunk.output_tokens
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,162 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Handles streaming responses from AI providers. Provides a unified way to process
5
+ # chunked responses, accumulate content, and handle provider-specific streaming formats.
6
+ # Each provider implements provider-specific parsing while sharing common stream handling
7
+ # patterns.
8
+ module Streaming
9
+ module_function
10
+
11
+ def stream_response(connection, payload, additional_headers = {}, &block)
12
+ accumulator = StreamAccumulator.new
13
+
14
+ response = connection.post stream_url, payload do |req|
15
+ # Merge additional headers, with existing headers taking precedence
16
+ req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
17
+ if req.options.respond_to?(:on_data)
18
+ # Handle Faraday 2.x streaming with on_data method
19
+ req.options.on_data = handle_stream do |chunk|
20
+ accumulator.add chunk
21
+ block.call chunk
22
+ end
23
+ else
24
+ # Handle Faraday 1.x streaming with :on_data key
25
+ req.options[:on_data] = handle_stream do |chunk|
26
+ accumulator.add chunk
27
+ block.call chunk
28
+ end
29
+ end
30
+ end
31
+
32
+ message = accumulator.to_message(response)
33
+ RubyLLM.logger.debug "Stream completed: #{message.inspect}"
34
+ message
35
+ end
36
+
37
+ def handle_stream(&block)
38
+ to_json_stream do |data|
39
+ block.call(build_chunk(data)) if data
40
+ end
41
+ end
42
+
43
+ private
44
+
45
+ def to_json_stream(&)
46
+ buffer = +''
47
+ parser = EventStreamParser::Parser.new
48
+
49
+ create_stream_processor(parser, buffer, &)
50
+ end
51
+
52
+ def create_stream_processor(parser, buffer, &)
53
+ if Faraday::VERSION.start_with?('1')
54
+ # Faraday 1.x: on_data receives (chunk, size)
55
+ legacy_stream_processor(parser, &)
56
+ else
57
+ # Faraday 2.x: on_data receives (chunk, bytes, env)
58
+ stream_processor(parser, buffer, &)
59
+ end
60
+ end
61
+
62
+ def process_stream_chunk(chunk, parser, env, &)
63
+ RubyLLM.logger.debug "Received chunk: #{chunk}" if RubyLLM.config.log_stream_debug
64
+
65
+ if error_chunk?(chunk)
66
+ handle_error_chunk(chunk, env)
67
+ else
68
+ yield handle_sse(chunk, parser, env, &)
69
+ end
70
+ end
71
+
72
+ def legacy_stream_processor(parser, &block)
73
+ proc do |chunk, _size|
74
+ process_stream_chunk(chunk, parser, nil, &block)
75
+ end
76
+ end
77
+
78
+ def stream_processor(parser, buffer, &block)
79
+ proc do |chunk, _bytes, env|
80
+ if env&.status == 200
81
+ process_stream_chunk(chunk, parser, env, &block)
82
+ else
83
+ handle_failed_response(chunk, buffer, env)
84
+ end
85
+ end
86
+ end
87
+
88
+ def error_chunk?(chunk)
89
+ chunk.start_with?('event: error')
90
+ end
91
+
92
+ def handle_error_chunk(chunk, env)
93
+ error_data = chunk.split("\n")[1].delete_prefix('data: ')
94
+ status, _message = parse_streaming_error(error_data)
95
+ parsed_data = JSON.parse(error_data)
96
+
97
+ # Create a response-like object that works for both Faraday v1 and v2
98
+ error_response = if env
99
+ env.merge(body: parsed_data, status: status)
100
+ else
101
+ # For Faraday v1, create a simple object that responds to .status and .body
102
+ Struct.new(:body, :status).new(parsed_data, status)
103
+ end
104
+
105
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
106
+ rescue JSON::ParserError => e
107
+ RubyLLM.logger.debug "Failed to parse error chunk: #{e.message}"
108
+ end
109
+
110
+ def handle_failed_response(chunk, buffer, env)
111
+ buffer << chunk
112
+ error_data = JSON.parse(buffer)
113
+ error_response = env.merge(body: error_data)
114
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
115
+ rescue JSON::ParserError
116
+ RubyLLM.logger.debug "Accumulating error chunk: #{chunk}"
117
+ end
118
+
119
+ def handle_sse(chunk, parser, env, &block)
120
+ parser.feed(chunk) do |type, data|
121
+ case type.to_sym
122
+ when :error
123
+ handle_error_event(data, env)
124
+ else
125
+ yield handle_data(data, &block) unless data == '[DONE]'
126
+ end
127
+ end
128
+ end
129
+
130
+ def handle_data(data)
131
+ JSON.parse(data)
132
+ rescue JSON::ParserError => e
133
+ RubyLLM.logger.debug "Failed to parse data chunk: #{e.message}"
134
+ end
135
+
136
+ def handle_error_event(data, env)
137
+ status, _message = parse_streaming_error(data)
138
+ parsed_data = JSON.parse(data)
139
+
140
+ # Create a response-like object that works for both Faraday v1 and v2
141
+ error_response = if env
142
+ env.merge(body: parsed_data, status: status)
143
+ else
144
+ # For Faraday v1, create a simple object that responds to .status and .body
145
+ Struct.new(:body, :status).new(parsed_data, status)
146
+ end
147
+
148
+ ErrorMiddleware.parse_error(provider: self, response: error_response)
149
+ rescue JSON::ParserError => e
150
+ RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
151
+ end
152
+
153
+ # Default implementation - providers should override this method
154
+ def parse_streaming_error(data)
155
+ error_data = JSON.parse(data)
156
+ [500, error_data['message'] || 'Unknown streaming error']
157
+ rescue JSON::ParserError => e
158
+ RubyLLM.logger.debug "Failed to parse streaming error: #{e.message}"
159
+ [500, "Failed to parse error: #{data}"]
160
+ end
161
+ end
162
+ end
@@ -0,0 +1,100 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Parameter definition for Tool methods. Specifies type constraints,
5
+ # descriptions, and whether parameters are required.
6
+ class Parameter
7
+ attr_reader :name, :type, :description, :required
8
+
9
+ def initialize(name, type: 'string', desc: nil, required: true)
10
+ @name = name
11
+ @type = type
12
+ @description = desc
13
+ @required = required
14
+ end
15
+ end
16
+
17
+ # Base class for creating tools that AI models can use. Provides a simple
18
+ # interface for defining parameters and implementing tool behavior.
19
+ #
20
+ # Example:
21
+ # require 'tzinfo'
22
+ #
23
+ # class TimeInfo < RubyLLM::Tool
24
+ # description 'Gets the current time in various timezones'
25
+ # param :timezone, desc: "Timezone name (e.g., 'UTC', 'America/New_York')"
26
+ #
27
+ # def execute(timezone:)
28
+ # time = TZInfo::Timezone.get(timezone).now.strftime('%Y-%m-%d %H:%M:%S')
29
+ # "Current time in #{timezone}: #{time}"
30
+ # rescue StandardError => e
31
+ # { error: e.message }
32
+ # end
33
+ # end
34
+ class Tool
35
+ # Stops conversation continuation after tool execution
36
+ class Halt
37
+ attr_reader :content
38
+
39
+ def initialize(content)
40
+ @content = content
41
+ end
42
+
43
+ def to_s
44
+ @content.to_s
45
+ end
46
+ end
47
+
48
+ class << self
49
+ def description(text = nil)
50
+ return @description unless text
51
+
52
+ @description = text
53
+ end
54
+
55
+ def param(name, **options)
56
+ parameters[name] = Parameter.new(name, **options)
57
+ end
58
+
59
+ def parameters
60
+ @parameters ||= {}
61
+ end
62
+ end
63
+
64
+ def name
65
+ klass_name = self.class.name
66
+ normalized = klass_name.to_s.dup.force_encoding('UTF-8').unicode_normalize(:nfkd)
67
+ normalized.encode('ASCII', replace: '')
68
+ .gsub(/[^a-zA-Z0-9_-]/, '-')
69
+ .gsub(/([A-Z]+)([A-Z][a-z])/, '\1_\2')
70
+ .gsub(/([a-z\d])([A-Z])/, '\1_\2')
71
+ .downcase
72
+ .delete_suffix('_tool')
73
+ end
74
+
75
+ def description
76
+ self.class.description
77
+ end
78
+
79
+ def parameters
80
+ self.class.parameters
81
+ end
82
+
83
+ def call(args)
84
+ RubyLLM.logger.debug "Tool #{name} called with: #{args.inspect}"
85
+ result = execute(**args.transform_keys(&:to_sym))
86
+ RubyLLM.logger.debug "Tool #{name} returned: #{result.inspect}"
87
+ result
88
+ end
89
+
90
+ def execute(...)
91
+ raise NotImplementedError, 'Subclasses must implement #execute'
92
+ end
93
+
94
+ protected
95
+
96
+ def halt(message)
97
+ Halt.new(message)
98
+ end
99
+ end
100
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ # Represents a function call from an AI model to a Tool.
5
+ # Encapsulates the function name, arguments, and execution results
6
+ # in a clean Ruby interface.
7
+ #
8
+ # Example:
9
+ # tool_call = ToolCall.new(
10
+ # id: "call_123",
11
+ # name: "calculator",
12
+ # arguments: { expression: "2 + 2" }
13
+ # )
14
+ class ToolCall
15
+ attr_reader :id, :name, :arguments
16
+
17
+ def initialize(id:, name:, arguments: {})
18
+ @id = id
19
+ @name = name
20
+ @arguments = arguments
21
+ end
22
+
23
+ def to_h
24
+ {
25
+ id: @id,
26
+ name: @name,
27
+ arguments: @arguments
28
+ }
29
+ end
30
+ end
31
+ end