ruby_llm 0.1.0.pre7 → 0.1.0.pre8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8eb02dfff845a6208bc5adfdf8521b631d9d55f426f97055c896e25c225d3951
4
- data.tar.gz: 82406d843c66ad41a7a4cdc1b7ea756b6deb120e5e30a77ef983b8cc9e7ca935
3
+ metadata.gz: 38dc88557f16216a881c8cd0be702de77e8d0d332ab927f8c4fbfb804063a193
4
+ data.tar.gz: b5cf788ab7d948081e6fdf7649197dcecaa3d596b5afa992fe1b2288cb7bb935
5
5
  SHA512:
6
- metadata.gz: fe88fbc67f5fd50951fb16b407279a507094b793c674e5cadab99ed2de58aff44aecdc7fcc1f36bb9a22ebb8faf9cc990c8682f7728c4665d08b20bfdcbbfb46
7
- data.tar.gz: 62ca5d2399b8ffee6cc247ba36eec99d52488e7cbead4b20e7b7f17284ec65c4113ff3b2dad1b6c1169aab9a8a1bc1365c606a0f172e0d9d205f545c75eef1aa
6
+ metadata.gz: 7058ba5bad5baf2fabab9c01a8dd076d2b5b408d6538c87b6d8940464fd575f7da1c3c95294794d8bbc5f5875abc8321f0dba8d3085ad449d3720a82ca4c9d30
7
+ data.tar.gz: 1fe4831f21f43aa924cb86b4871d6318d3d98d910988b21339a327a2ddc818094e251586c37d48981d9ef8bdbd31ca91fd6a6cd55fbc7e47b2ad7d774abdc8a6
data/README.md CHANGED
@@ -30,13 +30,11 @@ require 'ruby_llm'
30
30
  RubyLLM.configure do |config|
31
31
  config.openai_api_key = ENV['OPENAI_API_KEY']
32
32
  config.anthropic_api_key = ENV['ANTHROPIC_API_KEY']
33
- config.default_model = 'gpt-4o-mini' # OpenAI's efficient model
34
33
  end
35
34
 
36
35
  # Start a conversation
37
36
  chat = RubyLLM.chat
38
- response = chat.ask "What's the best way to learn Ruby?"
39
- puts response.content
37
+ chat.ask "What's the best way to learn Ruby?"
40
38
  ```
41
39
 
42
40
  ## Available Models
@@ -45,12 +43,7 @@ RubyLLM gives you access to the latest models from multiple providers. Check wha
45
43
 
46
44
  ```ruby
47
45
  # List all available models
48
- RubyLLM.models.all.each do |model|
49
- puts "#{model.display_name} (#{model.provider})"
50
- puts " Context window: #{model.context_window}"
51
- puts " Price: $#{model.input_price_per_million}/M tokens (input)"
52
- puts " $#{model.output_price_per_million}/M tokens (output)"
53
- end
46
+ RubyLLM.models.all
54
47
 
55
48
  # Get models by type
56
49
  chat_models = RubyLLM.models.chat_models
@@ -64,7 +57,7 @@ image_models = RubyLLM.models.image_models
64
57
  Conversations are simple and natural, with automatic token counting built right in:
65
58
 
66
59
  ```ruby
67
- chat = RubyLLM.chat model: 'claude-3-opus-20240229'
60
+ chat = RubyLLM.chat model: 'claude-3-5-sonnet-20241022'
68
61
 
69
62
  # Single messages with token tracking
70
63
  response = chat.ask "What's your favorite Ruby feature?"
@@ -84,23 +77,54 @@ last_message = chat.messages.last
84
77
  puts "Conversation used #{last_message.input_tokens} input tokens and #{last_message.output_tokens} output tokens"
85
78
  ```
86
79
 
80
+ ## Using Tools
81
+
82
+ Give your AI superpowers by letting it use Ruby tools. This opens up a world of possibilities - from performing calculations to fetching data:
83
+
84
+ ```ruby
85
+ # Define a calculator tool
86
+ calculator = RubyLLM::Tool.define "calculate" do
87
+ description "Performs basic arithmetic calculations"
88
+ param :expression, type: "string"
89
+ handler do |args|
90
+ eval(args[:expression]).to_s
91
+ rescue => e
92
+ { error: "Invalid expression: #{e.message}" }
93
+ end
94
+ end
95
+
96
+ # Use the tool in a conversation
97
+ chat = RubyLLM.chat.with_tool calculator
98
+
99
+ # The model will automatically use the tool when needed
100
+ chat.ask "What's 2+2?"
101
+ # => "The result of 2 + 2 is 4."
102
+
103
+ chat.ask "and what's 2+100000000000?"
104
+ # => "The result of 2 + 100,000,000,000 is 100,000,000,002."
105
+
106
+ # Add multiple tools
107
+ chat.with_tools calculator, other_tool, another_tool
108
+ ```
109
+
110
+ Tools let you seamlessly integrate Ruby code with AI capabilities. Define tools for anything - database queries, API calls, custom business logic - and let the AI use them naturally in conversation.
111
+
87
112
  ## Choosing the Right Model
88
113
 
89
114
  RubyLLM gives you easy access to model capabilities:
90
115
 
91
116
  ```ruby
92
- model = RubyLLM.models.find 'claude-3-opus-20240229'
117
+ model = RubyLLM.models.find 'claude-3-5-sonnet-20241022'
93
118
 
94
- model.context_window # => 200000
95
- model.max_tokens # => 4096
96
- model.supports_vision? # => true
97
- model.supports_json_mode? # => true
119
+ model.context_window # => 200000
120
+ model.max_tokens # => 8192
121
+ model.supports_vision # => true
122
+ model.supports_json_mode # => true
98
123
  ```
99
124
 
100
125
  ## Coming Soon
101
126
 
102
127
  - Rails integration for seamless database and Active Record support
103
- - Function calling / tool use capabilities
104
128
  - Automatic retries and error handling
105
129
  - Much more!
106
130
 
data/lib/ruby_llm/chat.rb CHANGED
@@ -8,10 +8,12 @@ module RubyLLM
8
8
 
9
9
  def initialize(model: nil)
10
10
  model_id = model || RubyLLM.config.default_model
11
- @model = Models.find(model_id)
12
- @provider = Models.provider_for(model_id)
11
+ @model = Models.find model_id
12
+ @provider = Models.provider_for model_id
13
13
  @messages = []
14
- @tools = []
14
+ @tools = {}
15
+
16
+ ensure_valid_tools
15
17
  end
16
18
 
17
19
  def ask(message, &block)
@@ -19,22 +21,20 @@ module RubyLLM
19
21
  complete(&block)
20
22
  end
21
23
 
22
- def tool(tool)
24
+ alias say ask
25
+
26
+ def with_tool(tool)
23
27
  raise Error, "Model #{@model.id} doesn't support function calling" unless @model.supports_functions
24
28
 
25
- @tools << tool
29
+ @tools[tool.name] = tool
26
30
  self
27
31
  end
28
32
 
29
- alias with_tool tool
30
-
31
- def tools(*tools)
32
- tools.each { |tool| self.tool(tool) }
33
+ def with_tools(*tools)
34
+ tools.each { |tool| with_tool tool }
33
35
  self
34
36
  end
35
37
 
36
- alias with_tools tools
37
-
38
38
  def each(&block)
39
39
  messages.each(&block)
40
40
  end
@@ -45,33 +45,27 @@ module RubyLLM
45
45
  response = @provider.complete(messages, tools: @tools, model: @model.id, &block)
46
46
 
47
47
  if response.tool_call?
48
- handle_tool_calls(response)
48
+ handle_tool_calls response
49
49
  else
50
- add_message(response)
50
+ add_message response
51
+ response
51
52
  end
52
-
53
- response
54
53
  end
55
54
 
56
55
  def handle_tool_calls(response)
57
- add_message(response)
56
+ add_message response
58
57
 
59
- response.tool_calls.each do |tool_call|
60
- result = execute_tool(tool_call)
61
- add_tool_result(tool_call[:id], result) if result
58
+ response.tool_calls.each_value do |tool_call|
59
+ result = execute_tool tool_call
60
+ add_tool_result tool_call.id, result if result
62
61
  end
63
62
 
64
- # Get final response after tool calls
65
- final_response = complete
66
- add_message(final_response)
67
- final_response
63
+ complete
68
64
  end
69
65
 
70
66
  def execute_tool(tool_call)
71
- tool = @tools.find { |t| t.name == tool_call[:name] }
72
- return unless tool
73
-
74
- args = JSON.parse(tool_call[:arguments], symbolize_names: true)
67
+ tool = tools[tool_call.name]
68
+ args = tool_call.arguments
75
69
  tool.call(args)
76
70
  end
77
71
 
@@ -83,13 +77,18 @@ module RubyLLM
83
77
 
84
78
  def add_tool_result(tool_use_id, result)
85
79
  add_message(
86
- role: :user,
87
- tool_results: {
88
- tool_use_id: tool_use_id,
89
- content: result.is_a?(Hash) && result[:error] ? result[:error] : result.to_s,
90
- is_error: result.is_a?(Hash) && result[:error]
91
- }
80
+ role: :tool,
81
+ content: result.is_a?(Hash) && result[:error] ? result[:error] : result.to_s,
82
+ tool_call_id: tool_use_id
92
83
  )
93
84
  end
85
+
86
+ def ensure_valid_tools
87
+ tools.each_key do |name|
88
+ unless name.is_a?(String) && tools[name].is_a?(RubyLLM::Tool)
89
+ raise Error, 'Tools should be of the format {<name>: <RubyLLM::Tool>}'
90
+ end
91
+ end
92
+ end
94
93
  end
95
94
  end
@@ -4,16 +4,16 @@ module RubyLLM
4
4
  class Message
5
5
  ROLES = %i[system user assistant tool].freeze
6
6
 
7
- attr_reader :role, :content, :tool_calls, :tool_results, :input_tokens, :output_tokens, :model_id
7
+ attr_reader :role, :content, :tool_calls, :tool_call_id, :input_tokens, :output_tokens, :model_id
8
8
 
9
9
  def initialize(options = {})
10
10
  @role = options[:role].to_sym
11
11
  @content = options[:content]
12
12
  @tool_calls = options[:tool_calls]
13
- @tool_results = options[:tool_results]
14
13
  @input_tokens = options[:input_tokens]
15
14
  @output_tokens = options[:output_tokens]
16
15
  @model_id = options[:model_id]
16
+ @tool_call_id = options[:tool_call_id]
17
17
 
18
18
  ensure_valid_role
19
19
  end
@@ -23,7 +23,11 @@ module RubyLLM
23
23
  end
24
24
 
25
25
  def tool_result?
26
- !tool_results.nil? && !tool_results.empty?
26
+ !tool_call_id.nil? && !tool_call_id.empty?
27
+ end
28
+
29
+ def tool_result
30
+ content if tool_result?
27
31
  end
28
32
 
29
33
  def to_h
@@ -31,7 +35,7 @@ module RubyLLM
31
35
  role: role,
32
36
  content: content,
33
37
  tool_calls: tool_calls,
34
- tool_results: tool_results,
38
+ tool_call_id: tool_call_id,
35
39
  input_tokens: input_tokens,
36
40
  output_tokens: output_tokens,
37
41
  model_id: model_id
@@ -8,49 +8,50 @@ module RubyLLM
8
8
 
9
9
  module InstanceMethods
10
10
  def complete(messages, tools: [], model: nil, &block)
11
- # TODO: refactor
12
- payload = build_payload(messages, tools, model: model, stream: block_given?)
13
-
14
- content = String.new
15
- model_id = nil
16
- input_tokens = 0
17
- output_tokens = 0
18
- response = connection.post(completion_url, payload) do |req|
19
- req.headers.merge! headers
20
- if block_given?
21
- req.options.on_data = handle_stream do |chunk|
22
- model_id ||= chunk.model_id
23
- content << (chunk.content || '')
24
- input_tokens += chunk.input_tokens if chunk.input_tokens
25
- output_tokens += chunk.output_tokens if chunk.output_tokens
26
- block.call(chunk)
27
- end
28
- end
29
- end
11
+ payload = build_payload messages, tools, model: model, stream: block_given?
30
12
 
31
13
  if block_given?
32
- Message.new(
33
- role: :assistant,
34
- content: content,
35
- model_id: model_id,
36
- input_tokens: input_tokens.positive? ? input_tokens : nil,
37
- output_tokens: output_tokens.positive? ? output_tokens : nil
38
- )
14
+ stream_response payload, &block
39
15
  else
40
- parse_completion_response(response)
16
+ sync_response payload
41
17
  end
42
18
  end
43
19
 
44
20
  def list_models
45
21
  response = connection.get(models_url) do |req|
46
- req.headers.merge!(headers)
22
+ req.headers.merge! headers
47
23
  end
48
24
 
49
- parse_list_models_response(response)
25
+ parse_list_models_response response
50
26
  end
51
27
 
52
28
  private
53
29
 
30
+ def sync_response(payload)
31
+ response = post payload
32
+ parse_completion_response response
33
+ end
34
+
35
+ def stream_response(payload, &block)
36
+ accumulator = StreamAccumulator.new
37
+
38
+ post payload do |req|
39
+ req.options.on_data = handle_stream do |chunk|
40
+ accumulator.add chunk
41
+ block.call chunk
42
+ end
43
+ end
44
+
45
+ accumulator.to_message
46
+ end
47
+
48
+ def post(payload)
49
+ connection.post completion_url, payload do |req|
50
+ req.headers.merge! headers
51
+ yield req if block_given?
52
+ end
53
+ end
54
+
54
55
  def connection
55
56
  @connection ||= Faraday.new(api_base) do |f|
56
57
  f.options.timeout = RubyLLM.config.request_timeout
@@ -33,7 +33,7 @@ module RubyLLM
33
33
  stream: stream
34
34
  }.tap do |payload|
35
35
  if tools.any?
36
- payload[:tools] = tools.map { |t| tool_for(t) }
36
+ payload[:tools] = tools.map { |_, tool| tool_for(tool) }
37
37
  payload[:tool_choice] = 'auto'
38
38
  end
39
39
  end
@@ -44,21 +44,22 @@ module RubyLLM
44
44
  {
45
45
  role: msg.role.to_s,
46
46
  content: msg.content,
47
- tool_calls: format_tool_calls(msg.tool_calls)
47
+ tool_calls: format_tool_calls(msg.tool_calls),
48
+ tool_call_id: msg.tool_call_id
48
49
  }.compact
49
50
  end
50
51
  end
51
52
 
52
53
  def format_tool_calls(tool_calls)
53
- return nil unless tool_calls
54
+ return nil unless tool_calls&.any?
54
55
 
55
- tool_calls.map do |tc|
56
+ tool_calls.map do |_, tc|
56
57
  {
57
- id: tc[:id],
58
+ id: tc.id,
58
59
  type: 'function',
59
60
  function: {
60
- name: tc[:name],
61
- arguments: tc[:arguments]
61
+ name: tc.name,
62
+ arguments: JSON.generate(tc.arguments)
62
63
  }
63
64
  }
64
65
  end
@@ -103,15 +104,18 @@ module RubyLLM
103
104
  )
104
105
  end
105
106
 
106
- def parse_tool_calls(tool_calls)
107
+ def parse_tool_calls(tool_calls, parse_arguments: true)
107
108
  return nil unless tool_calls&.any?
108
109
 
109
- tool_calls.map do |tc|
110
- {
111
- id: tc['id'],
112
- name: tc.dig('function', 'name'),
113
- arguments: tc.dig('function', 'arguments')
114
- }
110
+ tool_calls.to_h do |tc|
111
+ [
112
+ tc['id'],
113
+ ToolCall.new(
114
+ id: tc['id'],
115
+ name: tc.dig('function', 'name'),
116
+ arguments: parse_arguments ? JSON.parse(tc.dig('function', 'arguments')) : tc.dig('function', 'arguments')
117
+ )
118
+ ]
115
119
  end
116
120
  end
117
121
 
@@ -139,7 +143,8 @@ module RubyLLM
139
143
  Chunk.new(
140
144
  role: :assistant,
141
145
  model_id: data['model'],
142
- content: data.dig('choices', 0, 'delta', 'content')
146
+ content: data.dig('choices', 0, 'delta', 'content'),
147
+ tool_calls: parse_tool_calls(data.dig('choices', 0, 'delta', 'tool_calls'), parse_arguments: false)
143
148
  )
144
149
  )
145
150
  end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ class StreamAccumulator
5
+ attr_reader :content, :model_id, :tool_calls
6
+
7
+ def initialize
8
+ @content = String.new
9
+ @tool_calls = {}
10
+ @input_tokens = 0
11
+ @output_tokens = 0
12
+ @latest_tool_call_id = nil
13
+ end
14
+
15
+ def add(chunk)
16
+ RubyLLM.logger.debug chunk.inspect
17
+ @model_id ||= chunk.model_id
18
+
19
+ if chunk.tool_call?
20
+ accumulate_tool_calls chunk.tool_calls
21
+ else
22
+ @content << (chunk.content || '')
23
+ end
24
+
25
+ count_tokens chunk
26
+ RubyLLM.logger.debug inspect
27
+ end
28
+
29
+ def to_message
30
+ Message.new(
31
+ role: :assistant,
32
+ content: content,
33
+ model_id: model_id,
34
+ tool_calls: tool_calls_from_stream,
35
+ input_tokens: @input_tokens.positive? ? @input_tokens : nil,
36
+ output_tokens: @output_tokens.positive? ? @output_tokens : nil
37
+ )
38
+ end
39
+
40
+ private
41
+
42
+ def tool_calls_from_stream
43
+ tool_calls.transform_values do |tc|
44
+ ToolCall.new(
45
+ id: tc.id,
46
+ name: tc.name,
47
+ arguments: JSON.parse(tc.arguments)
48
+ )
49
+ end
50
+ end
51
+
52
+ def accumulate_tool_calls(new_tool_calls)
53
+ new_tool_calls.each_value do |tool_call|
54
+ if tool_call.id
55
+ @tool_calls[tool_call.id] = ToolCall.new(
56
+ id: tool_call.id,
57
+ name: tool_call.name,
58
+ arguments: String.new
59
+ )
60
+ @latest_tool_call_id = tool_call.id
61
+ else
62
+ existing = @tool_calls[@latest_tool_call_id]
63
+ existing.arguments << tool_call.arguments if existing
64
+ end
65
+ end
66
+ end
67
+
68
+ def find_tool_call(tool_call_id)
69
+ if tool_call_id.nil?
70
+ @tool_calls[@latest_tool_call]
71
+ else
72
+ @latest_tool_call_id = tool_call_id
73
+ @tool_calls[tool_call_id]
74
+ end
75
+ end
76
+
77
+ def count_tokens(chunk)
78
+ @input_tokens += chunk.input_tokens if chunk.input_tokens
79
+ @output_tokens += chunk.output_tokens if chunk.output_tokens
80
+ end
81
+ end
82
+ end
data/lib/ruby_llm/tool.rb CHANGED
@@ -60,9 +60,13 @@ module RubyLLM
60
60
  raise Error, "No handler defined for tool #{name}" unless @handler
61
61
 
62
62
  begin
63
+ RubyLLM.logger.debug "Calling tool #{name}(#{args.inspect})"
63
64
  args = symbolize_keys(args)
64
- @handler.call(args)
65
+ result = @handler.call(args)
66
+ RubyLLM.logger.debug "Tool #{name}(#{args.inspect}) returned: #{result.inspect}"
67
+ result
65
68
  rescue StandardError => e
69
+ RubyLLM.logger.error "Tool #{name}(#{args.inspect}) failed with error #{e.message}"
66
70
  { error: e.message }
67
71
  end
68
72
  end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ class ToolCall
5
+ attr_reader :id, :name, :arguments
6
+
7
+ def initialize(id:, name:, arguments: {})
8
+ @id = id
9
+ @name = name
10
+ @arguments = arguments
11
+ end
12
+ end
13
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre7'
4
+ VERSION = '0.1.0.pre8'
5
5
  end
data/lib/ruby_llm.rb CHANGED
@@ -28,7 +28,11 @@ module RubyLLM
28
28
  end
29
29
 
30
30
  def logger
31
- @logger ||= Logger.new($stdout, level: ENV['RUBY_LLM_DEBUG'] == 'true' ? Logger::DEBUG : Logger::INFO)
31
+ @logger ||= Logger.new(
32
+ $stdout,
33
+ progname: 'RubyLLM',
34
+ level: ENV['RUBY_LLM_DEBUG'] == 'true' ? Logger::DEBUG : Logger::INFO
35
+ )
32
36
  end
33
37
  end
34
38
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre7
4
+ version: 0.1.0.pre8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-02-01 00:00:00.000000000 Z
11
+ date: 2025-02-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -357,7 +357,9 @@ files:
357
357
  - lib/ruby_llm/providers/anthropic.rb
358
358
  - lib/ruby_llm/providers/openai.rb
359
359
  - lib/ruby_llm/railtie.rb
360
+ - lib/ruby_llm/stream_accumulator.rb
360
361
  - lib/ruby_llm/tool.rb
362
+ - lib/ruby_llm/tool_call.rb
361
363
  - lib/ruby_llm/version.rb
362
364
  - lib/tasks/models.rake
363
365
  - ruby_llm.gemspec