ruby_llm 0.1.0.pre2 → 0.1.0.pre3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 210659964d16b741d71f9abf6eed197ed2caea0a76a036542cdac736ea1c433b
4
- data.tar.gz: 7836e9182e7a978492e8060222d490e7a7be2cbf2a266668fd69c6068e4d103e
3
+ metadata.gz: d1e5d243e58cd5fc884ef6a8ed5078df18e7659523296f175c55dff24c116079
4
+ data.tar.gz: 9a0cebfd7392a92e05b4f98974735ffab608e96a9af2caa2c5886af92584e0e7
5
5
  SHA512:
6
- metadata.gz: 901b4c602deff87cfaa3a1fe2b03b01e00165c68e7b15a5ebe942855f7d8c136f4a6e1e27c78d7584a07fd036c0c014f8f9a28c3555a516de68c565f405448c5
7
- data.tar.gz: 534807760c3dfdadc18df7c4cfc435831352961550782b897aa99c63b911f5e2eef50b5bd1ecf32e591fbc77eb1c5fe2d7d6820b9250b58f5ccff93b84e16070
6
+ metadata.gz: 3df37307db5c36064502f476c714c893717a38fb73136e57c9e395386e8354cce62b47ad12da7cd6fe1c666a8c6e55faf333cd2808cfc280ae775bb96c181d23
7
+ data.tar.gz: df0f1a5cff449264a3868ac463ada43a94eddc712ed1b3de23802b5188ae217e86b8b48314cb99b24468bbdf2cb9970886908156e3a72642d7967b9a4088db40
@@ -1,17 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- # Represents a message in an LLM conversation
5
4
  class Message
6
5
  VALID_ROLES = %i[system user assistant tool].freeze
7
6
 
8
- attr_reader :role, :content, :tool_calls, :tool_results
7
+ attr_reader :role, :content, :tool_calls, :tool_results, :token_usage, :model_id
9
8
 
10
- def initialize(role:, content: nil, tool_calls: nil, tool_results: nil)
9
+ def initialize(role:, content: nil, tool_calls: nil, tool_results: nil, token_usage: nil, model_id: nil)
11
10
  @role = role.to_sym
12
11
  @content = content
13
12
  @tool_calls = tool_calls
14
13
  @tool_results = tool_results
14
+ @token_usage = token_usage
15
+ @model_id = model_id
15
16
  validate!
16
17
  end
17
18
 
@@ -20,16 +21,21 @@ module RubyLLM
20
21
  role: role,
21
22
  content: content,
22
23
  tool_calls: tool_calls,
23
- tool_results: tool_results
24
+ tool_results: tool_results,
25
+ token_usage: token_usage,
26
+ model_id: model_id
24
27
  }.compact
25
28
  end
26
29
 
27
30
  private
28
31
 
29
32
  def validate!
30
- return if VALID_ROLES.include?(role)
31
-
32
- raise ArgumentError, "Invalid role: #{role}. Must be one of: #{VALID_ROLES.join(', ')}"
33
+ unless VALID_ROLES.include?(role)
34
+ raise ArgumentError,
35
+ "Invalid role: #{role}. Must be one of: #{VALID_ROLES.join(', ')}"
36
+ end
37
+ raise ArgumentError, 'Content cannot be nil' if content.nil?
38
+ raise ArgumentError, 'Content cannot be empty' if content.empty?
33
39
  end
34
40
  end
35
41
  end
@@ -186,9 +186,20 @@ module RubyLLM
186
186
  return create_chat_completion(payload.merge(messages: new_messages), tools)
187
187
  end
188
188
 
189
+ # Extract token usage from response
190
+ token_usage = if data['usage']
191
+ {
192
+ input_tokens: data['usage']['input_tokens'],
193
+ output_tokens: data['usage']['output_tokens'],
194
+ total_tokens: data['usage']['input_tokens'] + data['usage']['output_tokens']
195
+ }
196
+ end
197
+
189
198
  Message.new(
190
199
  role: :assistant,
191
- content: text_content
200
+ content: text_content,
201
+ token_usage: token_usage,
202
+ model_id: data['model']
192
203
  )
193
204
  end
194
205
 
@@ -218,6 +229,23 @@ module RubyLLM
218
229
  end.compact
219
230
  end
220
231
 
232
+ def handle_api_error(error)
233
+ response_body = error.response[:body]
234
+ if response_body.is_a?(String)
235
+ begin
236
+ error_data = JSON.parse(response_body)
237
+ message = error_data.dig('error', 'message')
238
+ raise RubyLLM::Error, "API error: #{message}" if message
239
+ rescue JSON::ParserError
240
+ raise RubyLLM::Error, "API error: #{error.response[:status]}"
241
+ end
242
+ elsif response_body.dig('error', 'type') == 'invalid_request_error'
243
+ raise RubyLLM::Error, "API error: #{response_body['error']['message']}"
244
+ else
245
+ raise RubyLLM::Error, "API error: #{error.response[:status]}"
246
+ end
247
+ end
248
+
221
249
  def api_base
222
250
  'https://api.anthropic.com'
223
251
  end
@@ -116,9 +116,20 @@ module RubyLLM
116
116
  return create_chat_completion(payload.merge(messages: new_messages), tools)
117
117
  end
118
118
 
119
+ # Extract token usage from response
120
+ token_usage = if data['usage']
121
+ {
122
+ input_tokens: data['usage']['prompt_tokens'],
123
+ output_tokens: data['usage']['completion_tokens'],
124
+ total_tokens: data['usage']['total_tokens']
125
+ }
126
+ end
127
+
119
128
  Message.new(
120
129
  role: :assistant,
121
- content: message_data['content']
130
+ content: message_data['content'],
131
+ token_usage: token_usage,
132
+ model_id: data['model']
122
133
  )
123
134
  end
124
135
 
@@ -153,6 +164,23 @@ module RubyLLM
153
164
  end
154
165
  end
155
166
 
167
+ def handle_api_error(error)
168
+ response_body = error.response[:body]
169
+ if response_body.is_a?(String)
170
+ begin
171
+ error_data = JSON.parse(response_body)
172
+ message = error_data.dig('error', 'message')
173
+ raise RubyLLM::Error, "API error: #{message}" if message
174
+ rescue JSON::ParserError
175
+ raise RubyLLM::Error, "API error: #{error.response[:status]}"
176
+ end
177
+ elsif response_body['error']
178
+ raise RubyLLM::Error, "API error: #{response_body['error']['message']}"
179
+ else
180
+ raise RubyLLM::Error, "API error: #{error.response[:status]}"
181
+ end
182
+ end
183
+
156
184
  def api_base
157
185
  'https://api.openai.com'
158
186
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module RubyLLM
4
- VERSION = '0.1.0.pre2'
4
+ VERSION = '0.1.0.pre3'
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0.pre2
4
+ version: 0.1.0.pre3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Carmine Paolino