ruby_llm 0.1.0.pre3 → 0.1.0.pre4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d1e5d243e58cd5fc884ef6a8ed5078df18e7659523296f175c55dff24c116079
4
- data.tar.gz: 9a0cebfd7392a92e05b4f98974735ffab608e96a9af2caa2c5886af92584e0e7
3
+ metadata.gz: 33195e0b579713ebc168acdf2d1b6e67e1ee4a0d5db68cacaf1648cd4d1e744e
4
+ data.tar.gz: ee20d7adaa4f8ef22853894885dc6dfab2e19aab1526fd5022cd7413653e4736
5
5
  SHA512:
6
- metadata.gz: 3df37307db5c36064502f476c714c893717a38fb73136e57c9e395386e8354cce62b47ad12da7cd6fe1c666a8c6e55faf333cd2808cfc280ae775bb96c181d23
7
- data.tar.gz: df0f1a5cff449264a3868ac463ada43a94eddc712ed1b3de23802b5188ae217e86b8b48314cb99b24468bbdf2cb9970886908156e3a72642d7967b9a4088db40
6
+ metadata.gz: bef854e5f925aa246b40ef1f12e4a9cde1c085c97a996ecd4c266810b97f9ac324f6cd19dede1526215e92249cbb9aee7017a512fd309baf834aee06570be6c0
7
+ data.tar.gz: 3dc20b4b9093cca54bb1d5e033e2270a8e33146c3b3c8dcc85997ca6b01eb9e2b07e95d833938df5ceca561d4a2da75a45473dbc878ce744c93d7816fde82009
@@ -14,7 +14,7 @@ module RubyLLM
14
14
  end
15
15
 
16
16
  provider = provider_for(model)
17
- provider.chat(
17
+ response_messages = provider.chat(
18
18
  formatted_messages,
19
19
  model: model,
20
20
  temperature: temperature,
@@ -22,6 +22,9 @@ module RubyLLM
22
22
  tools: tools,
23
23
  &block
24
24
  )
25
+
26
+ # Always return an array of messages, even for single responses
27
+ response_messages.is_a?(Array) ? response_messages : [response_messages]
25
28
  end
26
29
 
27
30
  def list_models(provider = nil)
@@ -30,12 +30,10 @@ module RubyLLM
30
30
  private
31
31
 
32
32
  def validate!
33
- unless VALID_ROLES.include?(role)
34
- raise ArgumentError,
35
- "Invalid role: #{role}. Must be one of: #{VALID_ROLES.join(', ')}"
36
- end
37
- raise ArgumentError, 'Content cannot be nil' if content.nil?
38
- raise ArgumentError, 'Content cannot be empty' if content.empty?
33
+ return if VALID_ROLES.include?(role)
34
+
35
+ raise ArgumentError,
36
+ "Invalid role: #{role}. Must be one of: #{VALID_ROLES.join(', ')}"
39
37
  end
40
38
  end
41
39
  end
@@ -102,7 +102,7 @@ module RubyLLM
102
102
  name = model_id.tr('-', ' ')
103
103
 
104
104
  # Capitalize each word
105
- name = name.split(' ').map { |word| word.capitalize }.join(' ')
105
+ name = name.split(' ').map(&:capitalize).join(' ')
106
106
 
107
107
  # Apply specific formatting rules
108
108
  name.gsub(/(\d{4}) (\d{2}) (\d{2})/, '\1\2\3') # Convert dates to YYYYMMDD
@@ -62,13 +62,24 @@ module RubyLLM
62
62
  private
63
63
 
64
64
  def tool_to_anthropic(tool)
65
+ # Get required fields and clean properties
66
+ required_fields = []
67
+ cleaned_properties = {}
68
+
69
+ tool.parameters.each do |name, props|
70
+ required_fields << name.to_s if props[:required]
71
+ cleaned_props = props.dup
72
+ cleaned_props.delete(:required)
73
+ cleaned_properties[name] = cleaned_props
74
+ end
75
+
65
76
  {
66
77
  name: tool.name,
67
78
  description: tool.description,
68
79
  input_schema: {
69
80
  type: 'object',
70
- properties: tool.parameters,
71
- required: tool.parameters.select { |_, v| v[:required] }.keys
81
+ properties: cleaned_properties,
82
+ required: required_fields
72
83
  }
73
84
  }
74
85
  end
@@ -94,7 +105,7 @@ module RubyLLM
94
105
  end
95
106
  end
96
107
 
97
- def create_chat_completion(payload, tools = nil)
108
+ def create_chat_completion(payload, tools = nil, &block)
98
109
  response = @connection.post('/v1/messages') do |req|
99
110
  req.headers['x-api-key'] = RubyLLM.configuration.anthropic_api_key
100
111
  req.headers['anthropic-version'] = '2023-06-01'
@@ -105,7 +116,10 @@ module RubyLLM
105
116
  puts 'Response from Anthropic:' if ENV['RUBY_LLM_DEBUG']
106
117
  puts JSON.pretty_generate(response.body) if ENV['RUBY_LLM_DEBUG']
107
118
 
108
- handle_response(response, tools, payload)
119
+ # Check for API errors first
120
+ check_for_api_error(response)
121
+
122
+ handle_response(response, tools, payload, &block)
109
123
  rescue Faraday::Error => e
110
124
  handle_error(e)
111
125
  end
@@ -117,6 +131,7 @@ module RubyLLM
117
131
  req.body = payload
118
132
  end
119
133
 
134
+ messages = []
120
135
  response.body.each_line do |line|
121
136
  next if line.strip.empty?
122
137
  next if line == 'data: [DONE]'
@@ -124,32 +139,48 @@ module RubyLLM
124
139
  begin
125
140
  data = JSON.parse(line.sub(/^data: /, ''))
126
141
 
127
- if data['type'] == 'content_block_delta'
128
- content = data['delta']['text']
129
- yield Message.new(role: :assistant, content: content) if content
130
- elsif data['type'] == 'tool_call'
131
- handle_tool_calls(data['tool_calls'], tools) do |result|
132
- yield Message.new(role: :assistant, content: result)
133
- end
142
+ message = case data['type']
143
+ when 'content_block_delta'
144
+ Message.new(role: :assistant, content: data['delta']['text']) if data['delta']['text']
145
+ when 'tool_call'
146
+ handle_tool_calls(data['tool_calls'], tools) do |result|
147
+ Message.new(role: :assistant, content: result)
148
+ end
149
+ end
150
+
151
+ if message
152
+ messages << message
153
+ yield message if block_given?
134
154
  end
135
155
  rescue JSON::ParserError
136
156
  next
137
157
  end
138
158
  end
159
+
160
+ messages
139
161
  rescue Faraday::Error => e
140
162
  handle_error(e)
141
163
  end
142
164
 
143
- def handle_response(response, tools, payload)
165
+ def handle_response(response, tools, payload, &block)
144
166
  data = response.body
145
- return Message.new(role: :assistant, content: '') if data['type'] == 'error'
146
167
 
147
- # Extract text content and tool use from response
148
168
  content_parts = data['content'] || []
149
169
  text_content = content_parts.find { |c| c['type'] == 'text' }&.fetch('text', '')
150
170
  tool_use = content_parts.find { |c| c['type'] == 'tool_use' }
151
171
 
152
172
  if tool_use && tools
173
+ # Tool call handling code...
174
+ tool_message = Message.new(
175
+ role: :assistant,
176
+ content: text_content,
177
+ tool_calls: [{
178
+ name: tool_use['name'],
179
+ arguments: JSON.generate(tool_use['input'] || {})
180
+ }]
181
+ )
182
+ yield tool_message if block_given?
183
+
153
184
  tool = tools.find { |t| t.name == tool_use['name'] }
154
185
  result = if tool
155
186
  begin
@@ -167,7 +198,13 @@ module RubyLLM
167
198
  end
168
199
  end
169
200
 
170
- # Create a new message with the tool result
201
+ result_message = Message.new(
202
+ role: :tool,
203
+ content: result[:content],
204
+ tool_results: result
205
+ )
206
+ yield result_message if block_given?
207
+
171
208
  new_messages = payload[:messages] + [
172
209
  { role: 'assistant', content: data['content'] },
173
210
  {
@@ -183,24 +220,29 @@ module RubyLLM
183
220
  }
184
221
  ]
185
222
 
186
- return create_chat_completion(payload.merge(messages: new_messages), tools)
187
- end
188
-
189
- # Extract token usage from response
190
- token_usage = if data['usage']
191
- {
192
- input_tokens: data['usage']['input_tokens'],
193
- output_tokens: data['usage']['output_tokens'],
194
- total_tokens: data['usage']['input_tokens'] + data['usage']['output_tokens']
195
- }
196
- end
223
+ final_response = create_chat_completion(
224
+ payload.merge(messages: new_messages),
225
+ tools,
226
+ &block
227
+ )
197
228
 
198
- Message.new(
199
- role: :assistant,
200
- content: text_content,
201
- token_usage: token_usage,
202
- model_id: data['model']
203
- )
229
+ [tool_message, result_message] + final_response
230
+ else
231
+ token_usage = if data['usage']
232
+ {
233
+ input_tokens: data['usage']['input_tokens'],
234
+ output_tokens: data['usage']['output_tokens'],
235
+ total_tokens: data['usage']['input_tokens'] + data['usage']['output_tokens']
236
+ }
237
+ end
238
+
239
+ [Message.new(
240
+ role: :assistant,
241
+ content: text_content,
242
+ token_usage: token_usage,
243
+ model_id: data['model']
244
+ )]
245
+ end
204
246
  end
205
247
 
206
248
  def handle_tool_calls(tool_calls, tools)
@@ -239,13 +281,26 @@ module RubyLLM
239
281
  rescue JSON::ParserError
240
282
  raise RubyLLM::Error, "API error: #{error.response[:status]}"
241
283
  end
242
- elsif response_body.dig('error', 'type') == 'invalid_request_error'
284
+ elsif response_body['error']
243
285
  raise RubyLLM::Error, "API error: #{response_body['error']['message']}"
244
286
  else
245
287
  raise RubyLLM::Error, "API error: #{error.response[:status]}"
246
288
  end
247
289
  end
248
290
 
291
+ def handle_error(error)
292
+ case error
293
+ when Faraday::TimeoutError
294
+ raise RubyLLM::Error, 'Request timed out'
295
+ when Faraday::ConnectionFailed
296
+ raise RubyLLM::Error, 'Connection failed'
297
+ when Faraday::ClientError
298
+ handle_api_error(error)
299
+ else
300
+ raise error
301
+ end
302
+ end
303
+
249
304
  def api_base
250
305
  'https://api.anthropic.com'
251
306
  end
@@ -16,6 +16,13 @@ module RubyLLM
16
16
 
17
17
  protected
18
18
 
19
+ def check_for_api_error(response)
20
+ return unless response.body.is_a?(Hash) && response.body['type'] == 'error'
21
+
22
+ error_msg = response.body.dig('error', 'message') || 'Unknown API error'
23
+ raise RubyLLM::Error, "API error: #{error_msg}"
24
+ end
25
+
19
26
  def build_connection
20
27
  Faraday.new(url: api_base) do |f|
21
28
  f.options.timeout = RubyLLM.configuration.request_timeout
@@ -22,7 +22,7 @@ module RubyLLM
22
22
  if stream && block_given?
23
23
  stream_chat_completion(payload, tools, &block)
24
24
  else
25
- create_chat_completion(payload, tools)
25
+ create_chat_completion(payload, tools, &block)
26
26
  end
27
27
  rescue Faraday::TimeoutError
28
28
  raise RubyLLM::Error, 'Request timed out'
@@ -80,7 +80,7 @@ module RubyLLM
80
80
  }
81
81
  end
82
82
 
83
- def create_chat_completion(payload, tools = nil)
83
+ def create_chat_completion(payload, tools = nil, &block)
84
84
  response = connection.post('/v1/chat/completions') do |req|
85
85
  req.headers['Authorization'] = "Bearer #{RubyLLM.configuration.openai_api_key}"
86
86
  req.headers['Content-Type'] = 'application/json'
@@ -90,47 +90,74 @@ module RubyLLM
90
90
  puts 'Response from OpenAI:' if ENV['RUBY_LLM_DEBUG']
91
91
  puts JSON.pretty_generate(response.body) if ENV['RUBY_LLM_DEBUG']
92
92
 
93
+ # Check for API errors
94
+ check_for_api_error(response)
95
+
96
+ # Check for HTTP errors
93
97
  if response.status >= 400
94
98
  error_msg = response.body['error']&.fetch('message', nil) || "HTTP #{response.status}"
95
99
  raise RubyLLM::Error, "API error: #{error_msg}"
96
100
  end
97
101
 
98
- handle_response(response, tools, payload)
102
+ handle_response(response, tools, payload, &block)
99
103
  end
100
104
 
101
- def handle_response(response, tools, payload)
105
+ def handle_response(response, tools, payload, &block)
102
106
  data = response.body
103
107
  message_data = data.dig('choices', 0, 'message')
104
- return Message.new(role: :assistant, content: '') unless message_data
108
+ return [] unless message_data
105
109
 
106
110
  if message_data['function_call'] && tools
111
+ # Create function call message
112
+ function_message = Message.new(
113
+ role: :assistant,
114
+ content: message_data['content'],
115
+ tool_calls: [message_data['function_call']]
116
+ )
117
+ yield function_message if block_given?
118
+
119
+ # Execute function and create result message
107
120
  result = handle_function_call(message_data['function_call'], tools)
108
- puts "Function result: #{result}" if ENV['RUBY_LLM_DEBUG']
121
+ result_message = Message.new(
122
+ role: :tool,
123
+ content: result,
124
+ tool_results: {
125
+ name: message_data['function_call']['name'],
126
+ content: result
127
+ }
128
+ )
129
+ yield result_message if block_given?
109
130
 
110
- # Create a new chat completion with the function results
131
+ # Get final response with function results
111
132
  new_messages = payload[:messages] + [
112
133
  { role: 'assistant', content: message_data['content'], function_call: message_data['function_call'] },
113
134
  { role: 'function', name: message_data['function_call']['name'], content: result }
114
135
  ]
115
136
 
116
- return create_chat_completion(payload.merge(messages: new_messages), tools)
117
- end
137
+ final_response = create_chat_completion(
138
+ payload.merge(messages: new_messages),
139
+ tools,
140
+ &block
141
+ )
118
142
 
119
- # Extract token usage from response
120
- token_usage = if data['usage']
121
- {
122
- input_tokens: data['usage']['prompt_tokens'],
123
- output_tokens: data['usage']['completion_tokens'],
124
- total_tokens: data['usage']['total_tokens']
125
- }
126
- end
127
-
128
- Message.new(
129
- role: :assistant,
130
- content: message_data['content'],
131
- token_usage: token_usage,
132
- model_id: data['model']
133
- )
143
+ # Return all messages in sequence
144
+ [function_message, result_message] + final_response
145
+ else
146
+ token_usage = if data['usage']
147
+ {
148
+ input_tokens: data['usage']['prompt_tokens'],
149
+ output_tokens: data['usage']['completion_tokens'],
150
+ total_tokens: data['usage']['total_tokens']
151
+ }
152
+ end
153
+
154
+ [Message.new(
155
+ role: :assistant,
156
+ content: message_data['content'],
157
+ token_usage: token_usage,
158
+ model_id: data['model']
159
+ )]
160
+ end
134
161
  end
135
162
 
136
163
  def handle_function_call(function_call, tools)
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre3'
4
+ VERSION = '0.1.0.pre4'
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre3
4
+ version: 0.1.0.pre4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino