ruby_llm_community 0.0.6 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +3 -3
  3. data/lib/generators/ruby_llm/install/templates/create_models_migration.rb.tt +34 -0
  4. data/lib/generators/ruby_llm/install/templates/initializer.rb.tt +5 -0
  5. data/lib/generators/ruby_llm/install/templates/model_model.rb.tt +6 -0
  6. data/lib/generators/ruby_llm/install_generator.rb +27 -2
  7. data/lib/ruby_llm/active_record/acts_as.rb +163 -24
  8. data/lib/ruby_llm/aliases.json +58 -5
  9. data/lib/ruby_llm/aliases.rb +7 -25
  10. data/lib/ruby_llm/chat.rb +10 -17
  11. data/lib/ruby_llm/configuration.rb +5 -12
  12. data/lib/ruby_llm/connection.rb +4 -4
  13. data/lib/ruby_llm/connection_multipart.rb +19 -0
  14. data/lib/ruby_llm/content.rb +5 -2
  15. data/lib/ruby_llm/embedding.rb +1 -2
  16. data/lib/ruby_llm/error.rb +0 -8
  17. data/lib/ruby_llm/image.rb +23 -8
  18. data/lib/ruby_llm/image_attachment.rb +21 -0
  19. data/lib/ruby_llm/message.rb +6 -6
  20. data/lib/ruby_llm/model/info.rb +12 -10
  21. data/lib/ruby_llm/model/pricing.rb +0 -3
  22. data/lib/ruby_llm/model/pricing_category.rb +0 -2
  23. data/lib/ruby_llm/model/pricing_tier.rb +0 -1
  24. data/lib/ruby_llm/models.json +2147 -470
  25. data/lib/ruby_llm/models.rb +65 -34
  26. data/lib/ruby_llm/provider.rb +8 -8
  27. data/lib/ruby_llm/providers/anthropic/capabilities.rb +1 -46
  28. data/lib/ruby_llm/providers/anthropic/chat.rb +2 -2
  29. data/lib/ruby_llm/providers/anthropic/media.rb +0 -1
  30. data/lib/ruby_llm/providers/anthropic/tools.rb +1 -2
  31. data/lib/ruby_llm/providers/anthropic.rb +1 -2
  32. data/lib/ruby_llm/providers/bedrock/chat.rb +2 -4
  33. data/lib/ruby_llm/providers/bedrock/media.rb +0 -1
  34. data/lib/ruby_llm/providers/bedrock/models.rb +0 -2
  35. data/lib/ruby_llm/providers/bedrock/streaming/base.rb +0 -12
  36. data/lib/ruby_llm/providers/bedrock/streaming/content_extraction.rb +0 -7
  37. data/lib/ruby_llm/providers/bedrock/streaming/message_processing.rb +0 -12
  38. data/lib/ruby_llm/providers/bedrock/streaming/payload_processing.rb +0 -12
  39. data/lib/ruby_llm/providers/bedrock/streaming/prelude_handling.rb +0 -13
  40. data/lib/ruby_llm/providers/bedrock/streaming.rb +0 -18
  41. data/lib/ruby_llm/providers/bedrock.rb +1 -2
  42. data/lib/ruby_llm/providers/deepseek/capabilities.rb +1 -2
  43. data/lib/ruby_llm/providers/deepseek/chat.rb +0 -1
  44. data/lib/ruby_llm/providers/gemini/capabilities.rb +28 -100
  45. data/lib/ruby_llm/providers/gemini/chat.rb +57 -29
  46. data/lib/ruby_llm/providers/gemini/embeddings.rb +0 -2
  47. data/lib/ruby_llm/providers/gemini/images.rb +1 -2
  48. data/lib/ruby_llm/providers/gemini/media.rb +0 -1
  49. data/lib/ruby_llm/providers/gemini/models.rb +1 -2
  50. data/lib/ruby_llm/providers/gemini/streaming.rb +15 -1
  51. data/lib/ruby_llm/providers/gemini/tools.rb +0 -5
  52. data/lib/ruby_llm/providers/gpustack/chat.rb +11 -1
  53. data/lib/ruby_llm/providers/gpustack/media.rb +45 -0
  54. data/lib/ruby_llm/providers/gpustack/models.rb +44 -9
  55. data/lib/ruby_llm/providers/gpustack.rb +1 -0
  56. data/lib/ruby_llm/providers/mistral/capabilities.rb +2 -10
  57. data/lib/ruby_llm/providers/mistral/chat.rb +0 -2
  58. data/lib/ruby_llm/providers/mistral/embeddings.rb +0 -3
  59. data/lib/ruby_llm/providers/mistral/models.rb +0 -1
  60. data/lib/ruby_llm/providers/ollama/chat.rb +0 -1
  61. data/lib/ruby_llm/providers/ollama/media.rb +1 -6
  62. data/lib/ruby_llm/providers/ollama/models.rb +36 -0
  63. data/lib/ruby_llm/providers/ollama.rb +1 -0
  64. data/lib/ruby_llm/providers/openai/capabilities.rb +3 -16
  65. data/lib/ruby_llm/providers/openai/chat.rb +1 -3
  66. data/lib/ruby_llm/providers/openai/embeddings.rb +0 -3
  67. data/lib/ruby_llm/providers/openai/images.rb +73 -3
  68. data/lib/ruby_llm/providers/openai/media.rb +0 -1
  69. data/lib/ruby_llm/providers/openai/response.rb +120 -29
  70. data/lib/ruby_llm/providers/openai/response_media.rb +2 -2
  71. data/lib/ruby_llm/providers/openai/streaming.rb +107 -47
  72. data/lib/ruby_llm/providers/openai/tools.rb +1 -1
  73. data/lib/ruby_llm/providers/openai.rb +1 -3
  74. data/lib/ruby_llm/providers/openai_base.rb +2 -2
  75. data/lib/ruby_llm/providers/openrouter/models.rb +1 -16
  76. data/lib/ruby_llm/providers/perplexity/capabilities.rb +0 -1
  77. data/lib/ruby_llm/providers/perplexity/chat.rb +0 -1
  78. data/lib/ruby_llm/providers/perplexity.rb +1 -5
  79. data/lib/ruby_llm/providers/vertexai/chat.rb +14 -0
  80. data/lib/ruby_llm/providers/vertexai/embeddings.rb +32 -0
  81. data/lib/ruby_llm/providers/vertexai/models.rb +130 -0
  82. data/lib/ruby_llm/providers/vertexai/streaming.rb +14 -0
  83. data/lib/ruby_llm/providers/vertexai.rb +55 -0
  84. data/lib/ruby_llm/railtie.rb +0 -1
  85. data/lib/ruby_llm/stream_accumulator.rb +72 -10
  86. data/lib/ruby_llm/streaming.rb +16 -25
  87. data/lib/ruby_llm/tool.rb +2 -19
  88. data/lib/ruby_llm/tool_call.rb +0 -9
  89. data/lib/ruby_llm/version.rb +1 -1
  90. data/lib/ruby_llm_community.rb +5 -3
  91. data/lib/tasks/models.rake +525 -0
  92. data/lib/tasks/release.rake +37 -2
  93. data/lib/tasks/vcr.rake +0 -7
  94. metadata +13 -4
  95. data/lib/tasks/aliases.rake +0 -235
  96. data/lib/tasks/models_docs.rake +0 -224
  97. data/lib/tasks/models_update.rake +0 -108
@@ -0,0 +1,130 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class VertexAI
6
+ # Models methods for the Vertex AI integration
7
+ module Models
8
+ # Gemini and other Google models that aren't returned by the API
9
+ KNOWN_GOOGLE_MODELS = %w[
10
+ gemini-2.5-flash-lite
11
+ gemini-2.5-pro
12
+ gemini-2.5-flash
13
+ gemini-2.0-flash-lite-001
14
+ gemini-2.0-flash-001
15
+ gemini-2.0-flash
16
+ gemini-2.0-flash-exp
17
+ gemini-1.5-pro-002
18
+ gemini-1.5-pro
19
+ gemini-1.5-flash-002
20
+ gemini-1.5-flash
21
+ gemini-1.5-flash-8b
22
+ gemini-pro
23
+ gemini-pro-vision
24
+ gemini-exp-1206
25
+ gemini-exp-1121
26
+ gemini-embedding-001
27
+ text-embedding-005
28
+ text-embedding-004
29
+ text-multilingual-embedding-002
30
+ ].freeze
31
+
32
+ def list_models
33
+ all_models = []
34
+ page_token = nil
35
+
36
+ all_models.concat(build_known_models)
37
+
38
+ loop do
39
+ response = @connection.get('publishers/google/models') do |req|
40
+ req.headers['x-goog-user-project'] = @config.vertexai_project_id
41
+ req.params = { pageSize: 100 }
42
+ req.params[:pageToken] = page_token if page_token
43
+ end
44
+
45
+ publisher_models = response.body['publisherModels'] || []
46
+ publisher_models.each do |model_data|
47
+ next if model_data['launchStage'] == 'DEPRECATED'
48
+
49
+ model_id = extract_model_id_from_path(model_data['name'])
50
+ all_models << build_model_from_api_data(model_data, model_id)
51
+ end
52
+
53
+ page_token = response.body['nextPageToken']
54
+ break unless page_token
55
+ end
56
+
57
+ all_models
58
+ rescue StandardError => e
59
+ RubyLLM.logger.debug "Error fetching Vertex AI models: #{e.message}"
60
+ build_known_models
61
+ end
62
+
63
+ private
64
+
65
+ def build_known_models
66
+ KNOWN_GOOGLE_MODELS.map do |model_id|
67
+ Model::Info.new(
68
+ id: model_id,
69
+ name: model_id,
70
+ provider: slug,
71
+ family: determine_model_family(model_id),
72
+ created_at: nil,
73
+ context_window: nil,
74
+ max_output_tokens: nil,
75
+ modalities: nil,
76
+ capabilities: %w[streaming function_calling],
77
+ pricing: nil,
78
+ metadata: {
79
+ source: 'known_models'
80
+ }
81
+ )
82
+ end
83
+ end
84
+
85
+ def build_model_from_api_data(model_data, model_id)
86
+ Model::Info.new(
87
+ id: model_id,
88
+ name: model_id,
89
+ provider: slug,
90
+ family: determine_model_family(model_id),
91
+ created_at: nil,
92
+ context_window: nil,
93
+ max_output_tokens: nil,
94
+ modalities: nil,
95
+ capabilities: extract_capabilities(model_data),
96
+ pricing: nil,
97
+ metadata: {
98
+ version_id: model_data['versionId'],
99
+ open_source_category: model_data['openSourceCategory'],
100
+ launch_stage: model_data['launchStage'],
101
+ supported_actions: model_data['supportedActions'],
102
+ publisher_model_template: model_data['publisherModelTemplate']
103
+ }
104
+ )
105
+ end
106
+
107
+ def extract_model_id_from_path(path)
108
+ path.split('/').last
109
+ end
110
+
111
+ def determine_model_family(model_id)
112
+ case model_id
113
+ when /^gemini-2\.\d+/ then 'gemini-2'
114
+ when /^gemini-1\.\d+/ then 'gemini-1.5'
115
+ when /^text-embedding/ then 'text-embedding'
116
+ when /bison/ then 'palm'
117
+ else 'gemini'
118
+ end
119
+ end
120
+
121
+ def extract_capabilities(model_data)
122
+ capabilities = ['streaming']
123
+ model_name = model_data['name']
124
+ capabilities << 'function_calling' if model_name.include?('gemini')
125
+ capabilities.uniq
126
+ end
127
+ end
128
+ end
129
+ end
130
+ end
@@ -0,0 +1,14 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class VertexAI
6
+ # Streaming methods for the Vertex AI implementation
7
+ module Streaming
8
+ def stream_url
9
+ "projects/#{@config.vertexai_project_id}/locations/#{@config.vertexai_location}/publishers/google/models/#{@model}:streamGenerateContent?alt=sse" # rubocop:disable Layout/LineLength
10
+ end
11
+ end
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ # Google Vertex AI implementation
6
+ class VertexAI < Gemini
7
+ include VertexAI::Chat
8
+ include VertexAI::Streaming
9
+ include VertexAI::Embeddings
10
+ include VertexAI::Models
11
+
12
+ def initialize(config)
13
+ super
14
+ @authorizer = nil
15
+ end
16
+
17
+ def api_base
18
+ "https://#{@config.vertexai_location}-aiplatform.googleapis.com/v1beta1"
19
+ end
20
+
21
+ def headers
22
+ {
23
+ 'Authorization' => "Bearer #{access_token}"
24
+ }
25
+ end
26
+
27
+ class << self
28
+ def configuration_requirements
29
+ %i[vertexai_project_id vertexai_location]
30
+ end
31
+ end
32
+
33
+ private
34
+
35
+ def access_token
36
+ return 'test-token' if defined?(VCR) && VCR.current_cassette
37
+
38
+ initialize_authorizer unless @authorizer
39
+ @authorizer.fetch_access_token!['access_token']
40
+ end
41
+
42
+ def initialize_authorizer
43
+ require 'googleauth'
44
+ @authorizer = ::Google::Auth.get_application_default(
45
+ scope: [
46
+ 'https://www.googleapis.com/auth/cloud-platform',
47
+ 'https://www.googleapis.com/auth/generative-language.retriever'
48
+ ]
49
+ )
50
+ rescue LoadError
51
+ raise Error, 'The googleauth gem is required for Vertex AI. Please add it to your Gemfile: gem "googleauth"'
52
+ end
53
+ end
54
+ end
55
+ end
@@ -9,7 +9,6 @@ module RubyLLM
9
9
  end
10
10
  end
11
11
 
12
- # Register generators
13
12
  generators do
14
13
  require 'generators/ruby_llm/install_generator'
15
14
  end
@@ -2,29 +2,29 @@
2
2
 
3
3
  module RubyLLM
4
4
  # Assembles streaming responses from LLMs into complete messages.
5
- # Handles the complexities of accumulating content and tool calls
6
- # from partial chunks while tracking token usage.
7
5
  class StreamAccumulator
8
6
  attr_reader :content, :model_id, :tool_calls
9
7
 
10
8
  def initialize
11
- @content = +''
9
+ @content = nil
12
10
  @tool_calls = {}
13
11
  @input_tokens = 0
14
12
  @output_tokens = 0
15
13
  @cached_tokens = 0
16
14
  @cache_creation_tokens = 0
17
15
  @latest_tool_call_id = nil
16
+ @reasoning_id = nil
18
17
  end
19
18
 
20
19
  def add(chunk)
21
20
  RubyLLM.logger.debug chunk.inspect if RubyLLM.config.log_stream_debug
22
21
  @model_id ||= chunk.model_id
22
+ @reasoning_id ||= chunk.reasoning_id
23
23
 
24
24
  if chunk.tool_call?
25
25
  accumulate_tool_calls chunk.tool_calls
26
26
  else
27
- @content << (chunk.content || '')
27
+ accumulate_content(chunk.content)
28
28
  end
29
29
 
30
30
  count_tokens chunk
@@ -32,27 +32,89 @@ module RubyLLM
32
32
  end
33
33
 
34
34
  def to_message(response)
35
+ content = final_content
36
+ associate_reasoning_with_images(content)
37
+
35
38
  Message.new(
36
39
  role: :assistant,
37
- content: content.empty? ? nil : content,
40
+ content: content,
38
41
  model_id: model_id,
39
42
  tool_calls: tool_calls_from_stream,
40
- input_tokens: @input_tokens.positive? ? @input_tokens : nil,
41
- output_tokens: @output_tokens.positive? ? @output_tokens : nil,
42
- cached_tokens: @cached_tokens.positive? ? @cached_tokens : nil,
43
- cache_creation_tokens: @cache_creation_tokens.positive? ? @cache_creation_tokens : nil,
43
+ input_tokens: positive_or_nil(@input_tokens),
44
+ output_tokens: positive_or_nil(@output_tokens),
45
+ cached_tokens: positive_or_nil(@cached_tokens),
46
+ cache_creation_tokens: positive_or_nil(@cache_creation_tokens),
44
47
  raw: response
45
48
  )
46
49
  end
47
50
 
48
51
  private
49
52
 
53
+ def associate_reasoning_with_images(content)
54
+ return unless @reasoning_id && content.is_a?(Content) && content.attachments.any?
55
+
56
+ content.attachments.each do |attachment|
57
+ attachment.instance_variable_set(:@reasoning_id, @reasoning_id) if attachment.is_a?(ImageAttachment)
58
+ end
59
+ end
60
+
61
+ def positive_or_nil(value)
62
+ value.positive? ? value : nil
63
+ end
64
+
65
+ def accumulate_content(new_content)
66
+ return unless new_content
67
+
68
+ if @content.nil?
69
+ @content = new_content.is_a?(String) ? +new_content : new_content
70
+ else
71
+ case [@content.class, new_content.class]
72
+ when [String, String]
73
+ @content << new_content
74
+ when [String, Content]
75
+ # Convert accumulated string to Content and merge
76
+ @content = Content.new(@content)
77
+ merge_content(new_content)
78
+ when [Content, String]
79
+ # Append string to existing Content's text
80
+ @content.instance_variable_set(:@text, (@content.text || '') + new_content)
81
+ when [Content, Content]
82
+ merge_content(new_content)
83
+ end
84
+ end
85
+ end
86
+
87
+ def merge_content(new_content)
88
+ # Merge text
89
+ current_text = @content.text || ''
90
+ new_text = new_content.text || ''
91
+ @content.instance_variable_set(:@text, current_text + new_text)
92
+
93
+ # Merge attachments
94
+ new_content.attachments.each do |attachment|
95
+ @content.attach(attachment)
96
+ end
97
+ end
98
+
99
+ def final_content
100
+ case @content
101
+ when nil
102
+ nil
103
+ when String
104
+ @content.empty? ? nil : @content
105
+ when Content
106
+ @content.text.nil? && @content.attachments.empty? ? nil : @content
107
+ else
108
+ @content
109
+ end
110
+ end
111
+
50
112
  def tool_calls_from_stream
51
113
  tool_calls.transform_values do |tc|
52
114
  arguments = if tc.arguments.is_a?(String) && !tc.arguments.empty?
53
115
  JSON.parse(tc.arguments)
54
116
  elsif tc.arguments.is_a?(String)
55
- {} # Return empty hash for empty string arguments
117
+ {}
56
118
  else
57
119
  tc.arguments
58
120
  end
@@ -1,10 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Handles streaming responses from AI providers. Provides a unified way to process
5
- # chunked responses, accumulate content, and handle provider-specific streaming formats.
6
- # Each provider implements provider-specific parsing while sharing common stream handling
7
- # patterns.
4
+ # Handles streaming responses from AI providers.
8
5
  module Streaming
9
6
  module_function
10
7
 
@@ -12,17 +9,14 @@ module RubyLLM
12
9
  accumulator = StreamAccumulator.new
13
10
 
14
11
  response = connection.post stream_url, payload do |req|
15
- # Merge additional headers, with existing headers taking precedence
16
12
  req.headers = additional_headers.merge(req.headers) unless additional_headers.empty?
17
- if req.options.respond_to?(:on_data)
18
- # Handle Faraday 2.x streaming with on_data method
19
- req.options.on_data = handle_stream do |chunk|
13
+ if faraday_1?
14
+ req.options[:on_data] = handle_stream do |chunk|
20
15
  accumulator.add chunk
21
16
  block.call chunk
22
17
  end
23
18
  else
24
- # Handle Faraday 1.x streaming with :on_data key
25
- req.options[:on_data] = handle_stream do |chunk|
19
+ req.options.on_data = handle_stream do |chunk|
26
20
  accumulator.add chunk
27
21
  block.call chunk
28
22
  end
@@ -30,7 +24,7 @@ module RubyLLM
30
24
  end
31
25
 
32
26
  message = accumulator.to_message(response)
33
- RubyLLM.logger.debug "Stream completed: #{message.inspect}"
27
+ RubyLLM.logger.debug "Stream completed: #{message.content}"
34
28
  message
35
29
  end
36
30
 
@@ -42,6 +36,10 @@ module RubyLLM
42
36
 
43
37
  private
44
38
 
39
+ def faraday_1?
40
+ Faraday::VERSION.start_with?('1')
41
+ end
42
+
45
43
  def to_json_stream(&)
46
44
  buffer = +''
47
45
  parser = EventStreamParser::Parser.new
@@ -50,11 +48,9 @@ module RubyLLM
50
48
  end
51
49
 
52
50
  def create_stream_processor(parser, buffer, &)
53
- if Faraday::VERSION.start_with?('1')
54
- # Faraday 1.x: on_data receives (chunk, size)
51
+ if faraday_1?
55
52
  legacy_stream_processor(parser, &)
56
53
  else
57
- # Faraday 2.x: on_data receives (chunk, bytes, env)
58
54
  stream_processor(parser, buffer, &)
59
55
  end
60
56
  end
@@ -94,12 +90,10 @@ module RubyLLM
94
90
  status, _message = parse_streaming_error(error_data)
95
91
  parsed_data = JSON.parse(error_data)
96
92
 
97
- # Create a response-like object that works for both Faraday v1 and v2
98
- error_response = if env
99
- env.merge(body: parsed_data, status: status)
100
- else
101
- # For Faraday v1, create a simple object that responds to .status and .body
93
+ error_response = if faraday_1?
102
94
  Struct.new(:body, :status).new(parsed_data, status)
95
+ else
96
+ env.merge(body: parsed_data, status: status)
103
97
  end
104
98
 
105
99
  ErrorMiddleware.parse_error(provider: self, response: error_response)
@@ -137,12 +131,10 @@ module RubyLLM
137
131
  status, _message = parse_streaming_error(data)
138
132
  parsed_data = JSON.parse(data)
139
133
 
140
- # Create a response-like object that works for both Faraday v1 and v2
141
- error_response = if env
142
- env.merge(body: parsed_data, status: status)
143
- else
144
- # For Faraday v1, create a simple object that responds to .status and .body
134
+ error_response = if faraday_1?
145
135
  Struct.new(:body, :status).new(parsed_data, status)
136
+ else
137
+ env.merge(body: parsed_data, status: status)
146
138
  end
147
139
 
148
140
  ErrorMiddleware.parse_error(provider: self, response: error_response)
@@ -150,7 +142,6 @@ module RubyLLM
150
142
  RubyLLM.logger.debug "Failed to parse error event: #{e.message}"
151
143
  end
152
144
 
153
- # Default implementation - providers should override this method
154
145
  def parse_streaming_error(data)
155
146
  error_data = JSON.parse(data)
156
147
  [500, error_data['message'] || 'Unknown streaming error']
data/lib/ruby_llm/tool.rb CHANGED
@@ -1,8 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Parameter definition for Tool methods. Specifies type constraints,
5
- # descriptions, and whether parameters are required.
4
+ # Parameter definition for Tool methods.
6
5
  class Parameter
7
6
  attr_reader :name, :type, :description, :required
8
7
 
@@ -14,23 +13,7 @@ module RubyLLM
14
13
  end
15
14
  end
16
15
 
17
- # Base class for creating tools that AI models can use. Provides a simple
18
- # interface for defining parameters and implementing tool behavior.
19
- #
20
- # Example:
21
- # require 'tzinfo'
22
- #
23
- # class TimeInfo < RubyLLM::Tool
24
- # description 'Gets the current time in various timezones'
25
- # param :timezone, desc: "Timezone name (e.g., 'UTC', 'America/New_York')"
26
- #
27
- # def execute(timezone:)
28
- # time = TZInfo::Timezone.get(timezone).now.strftime('%Y-%m-%d %H:%M:%S')
29
- # "Current time in #{timezone}: #{time}"
30
- # rescue StandardError => e
31
- # { error: e.message }
32
- # end
33
- # end
16
+ # Base class for creating tools that AI models can use
34
17
  class Tool
35
18
  # Stops conversation continuation after tool execution
36
19
  class Halt
@@ -2,15 +2,6 @@
2
2
 
3
3
  module RubyLLM
4
4
  # Represents a function call from an AI model to a Tool.
5
- # Encapsulates the function name, arguments, and execution results
6
- # in a clean Ruby interface.
7
- #
8
- # Example:
9
- # tool_call = ToolCall.new(
10
- # id: "call_123",
11
- # name: "calculator",
12
- # arguments: { expression: "2 + 2" }
13
- # )
14
5
  class ToolCall
15
6
  attr_reader :id, :name, :arguments
16
7
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.0.6'
4
+ VERSION = '1.0.0'
5
5
  end
@@ -4,6 +4,8 @@ require 'base64'
4
4
  require 'event_stream_parser'
5
5
  require 'faraday'
6
6
  require 'faraday/retry'
7
+ require 'faraday/multipart'
8
+
7
9
  require 'json'
8
10
  require 'logger'
9
11
  require 'securerandom'
@@ -23,7 +25,8 @@ loader.inflector.inflect(
23
25
  'gpustack' => 'GPUStack',
24
26
  'mistral' => 'Mistral',
25
27
  'pdf' => 'PDF',
26
- 'version' => 'VERSION'
28
+ 'version' => 'VERSION',
29
+ 'vertexai' => 'VertexAI'
27
30
  )
28
31
  loader.ignore("#{__dir__}/shims")
29
32
  loader.ignore("#{__dir__}/tasks")
@@ -37,8 +40,6 @@ module RubyLlmCommunity
37
40
  end
38
41
 
39
42
  # A delightful Ruby interface to modern AI language models.
40
- # Provides a unified way to interact with models from OpenAI, Anthropic and others
41
- # with a focus on developer happiness and convention over configuration.
42
43
  module RubyLLM
43
44
  class Error < StandardError; end
44
45
 
@@ -97,6 +98,7 @@ RubyLLM::Provider.register :ollama, RubyLLM::Providers::Ollama
97
98
  RubyLLM::Provider.register :openai, RubyLLM::Providers::OpenAI
98
99
  RubyLLM::Provider.register :openrouter, RubyLLM::Providers::OpenRouter
99
100
  RubyLLM::Provider.register :perplexity, RubyLLM::Providers::Perplexity
101
+ RubyLLM::Provider.register :vertexai, RubyLLM::Providers::VertexAI
100
102
 
101
103
  if defined?(Rails::Railtie)
102
104
  require 'ruby_llm/railtie'