rasti-ai 2.0.1 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/rasti/ai/assistant.rb +2 -14
- data/lib/rasti/ai/client.rb +16 -3
- data/lib/rasti/ai/gemini/assistant.rb +0 -13
- data/lib/rasti/ai/gemini/client.rb +14 -0
- data/lib/rasti/ai/open_ai/assistant.rb +0 -13
- data/lib/rasti/ai/open_ai/client.rb +14 -0
- data/lib/rasti/ai/usage.rb +2 -1
- data/lib/rasti/ai/version.rb +1 -1
- data/spec/gemini/assistant_spec.rb +0 -66
- data/spec/gemini/client_spec.rb +50 -0
- data/spec/open_ai/assistant_spec.rb +0 -66
- data/spec/open_ai/client_spec.rb +53 -0
- data/spec/resources/gemini/basic_response.json +10 -3
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 47f94217b69770cc1a2238f28995a4b8cda405554134d0ba7a9dc8a6418080c7
|
|
4
|
+
data.tar.gz: fc5522ee0c3ba8bc03f71c50f2c0853be9c2f7c992db7c6a65856ea555b79107
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 5e6ecfcb985c408abc48c12e89ec3b106056d0e8b03958a02fe3a0ee18418a0260cc6a8d736f25e82d15f9c454aa3983b322bafec69122e24149b7d4d90c2f08
|
|
7
|
+
data.tar.gz: 966afa955522d3aa598a047abef9457e0dac1e9636c80a3c1df2c0509c8343436ed4d59b585a9587cda9bce96848d6e06adf57550259525cc37f0a92e4470baf
|
data/lib/rasti/ai/assistant.rb
CHANGED
|
@@ -4,7 +4,7 @@ module Rasti
|
|
|
4
4
|
|
|
5
5
|
attr_reader :state
|
|
6
6
|
|
|
7
|
-
def initialize(client:nil, json_schema:nil, state:nil, model:nil, tools:[], mcp_servers:{}, logger:nil
|
|
7
|
+
def initialize(client:nil, json_schema:nil, state:nil, model:nil, tools:[], mcp_servers:{}, logger:nil)
|
|
8
8
|
@client = client || build_default_client
|
|
9
9
|
@json_schema = json_schema
|
|
10
10
|
@state = state || AssistantState.new
|
|
@@ -12,7 +12,6 @@ module Rasti
|
|
|
12
12
|
@tools = {}
|
|
13
13
|
@serialized_tools = []
|
|
14
14
|
@logger = logger || Rasti::AI.logger
|
|
15
|
-
@usage_tracker = usage_tracker || Rasti::AI.usage_tracker
|
|
16
15
|
|
|
17
16
|
register_tools(tools)
|
|
18
17
|
register_mcp_servers(mcp_servers)
|
|
@@ -23,7 +22,6 @@ module Rasti
|
|
|
23
22
|
|
|
24
23
|
loop do
|
|
25
24
|
response = request_completion
|
|
26
|
-
track_usage response
|
|
27
25
|
|
|
28
26
|
tool_calls = parse_tool_calls(response)
|
|
29
27
|
|
|
@@ -47,18 +45,12 @@ module Rasti
|
|
|
47
45
|
|
|
48
46
|
private
|
|
49
47
|
|
|
50
|
-
attr_reader :client, :json_schema, :model, :tools, :serialized_tools, :logger
|
|
48
|
+
attr_reader :client, :json_schema, :model, :tools, :serialized_tools, :logger
|
|
51
49
|
|
|
52
50
|
def messages
|
|
53
51
|
state.messages
|
|
54
52
|
end
|
|
55
53
|
|
|
56
|
-
def track_usage(response)
|
|
57
|
-
return unless usage_tracker
|
|
58
|
-
usage = parse_usage response
|
|
59
|
-
usage_tracker.call usage if usage
|
|
60
|
-
end
|
|
61
|
-
|
|
62
54
|
# --- Shared behavior ---
|
|
63
55
|
|
|
64
56
|
def register_tools(tools)
|
|
@@ -140,10 +132,6 @@ module Rasti
|
|
|
140
132
|
raise NotImplementedError
|
|
141
133
|
end
|
|
142
134
|
|
|
143
|
-
def parse_usage(response)
|
|
144
|
-
raise NotImplementedError
|
|
145
|
-
end
|
|
146
|
-
|
|
147
135
|
def extract_tool_call_info(tool_call)
|
|
148
136
|
raise NotImplementedError
|
|
149
137
|
end
|
data/lib/rasti/ai/client.rb
CHANGED
|
@@ -4,17 +4,28 @@ module Rasti
|
|
|
4
4
|
|
|
5
5
|
RETRYABLE_STATUS_CODES = [502, 503, 504].freeze
|
|
6
6
|
|
|
7
|
-
def initialize(api_key:nil, logger:nil, http_connect_timeout:nil, http_read_timeout:nil, http_max_retries:nil)
|
|
7
|
+
def initialize(api_key:nil, logger:nil, http_connect_timeout:nil, http_read_timeout:nil, http_max_retries:nil, usage_tracker:nil)
|
|
8
8
|
@api_key = api_key || default_api_key
|
|
9
9
|
@logger = logger || Rasti::AI.logger
|
|
10
10
|
@http_connect_timeout = http_connect_timeout || Rasti::AI.http_connect_timeout
|
|
11
11
|
@http_read_timeout = http_read_timeout || Rasti::AI.http_read_timeout
|
|
12
12
|
@http_max_retries = http_max_retries || Rasti::AI.http_max_retries
|
|
13
|
+
@usage_tracker = usage_tracker || Rasti::AI.usage_tracker
|
|
13
14
|
end
|
|
14
15
|
|
|
15
16
|
private
|
|
16
17
|
|
|
17
|
-
attr_reader :api_key, :logger, :http_connect_timeout, :http_read_timeout, :http_max_retries
|
|
18
|
+
attr_reader :api_key, :logger, :http_connect_timeout, :http_read_timeout, :http_max_retries, :usage_tracker
|
|
19
|
+
|
|
20
|
+
def track_usage(response)
|
|
21
|
+
return unless usage_tracker
|
|
22
|
+
usage = parse_usage response
|
|
23
|
+
usage_tracker.call usage if usage
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def parse_usage(response)
|
|
27
|
+
raise NotImplementedError
|
|
28
|
+
end
|
|
18
29
|
|
|
19
30
|
def default_api_key
|
|
20
31
|
raise NotImplementedError
|
|
@@ -63,7 +74,9 @@ module Rasti
|
|
|
63
74
|
raise Errors::RequestFail.new(url, body, response)
|
|
64
75
|
end
|
|
65
76
|
|
|
66
|
-
JSON.parse response.body
|
|
77
|
+
parsed_response = JSON.parse response.body
|
|
78
|
+
track_usage parsed_response
|
|
79
|
+
parsed_response
|
|
67
80
|
|
|
68
81
|
rescue SocketError, Net::OpenTimeout, Net::ReadTimeout, Errors::RequestFail => e
|
|
69
82
|
if retry_count < max_retries
|
|
@@ -60,19 +60,6 @@ module Rasti
|
|
|
60
60
|
!response.dig('candidates', 0, 'finishReason').nil?
|
|
61
61
|
end
|
|
62
62
|
|
|
63
|
-
def parse_usage(response)
|
|
64
|
-
usage = response['usageMetadata']
|
|
65
|
-
return unless usage
|
|
66
|
-
Usage.new(
|
|
67
|
-
provider: :gemini,
|
|
68
|
-
model: response['modelVersion'],
|
|
69
|
-
input_tokens: usage['promptTokenCount'],
|
|
70
|
-
output_tokens: usage['candidatesTokenCount'],
|
|
71
|
-
cached_tokens: usage['cachedContentTokenCount'] || 0,
|
|
72
|
-
reasoning_tokens: usage['thoughtsTokenCount'] || 0
|
|
73
|
-
)
|
|
74
|
-
end
|
|
75
|
-
|
|
76
63
|
def extract_tool_call_info(tool_call)
|
|
77
64
|
fc = tool_call['functionCall']
|
|
78
65
|
[fc['name'], fc['args'] || {}]
|
|
@@ -17,6 +17,20 @@ module Rasti
|
|
|
17
17
|
|
|
18
18
|
private
|
|
19
19
|
|
|
20
|
+
def parse_usage(response)
|
|
21
|
+
usage = response['usageMetadata']
|
|
22
|
+
return unless usage
|
|
23
|
+
Usage.new(
|
|
24
|
+
provider: 'gemini',
|
|
25
|
+
model: response['modelVersion'],
|
|
26
|
+
input_tokens: usage['promptTokenCount'],
|
|
27
|
+
output_tokens: usage['candidatesTokenCount'],
|
|
28
|
+
cached_tokens: usage['cachedContentTokenCount'] || 0,
|
|
29
|
+
reasoning_tokens: usage['thoughtsTokenCount'] || 0,
|
|
30
|
+
raw: usage
|
|
31
|
+
)
|
|
32
|
+
end
|
|
33
|
+
|
|
20
34
|
def default_api_key
|
|
21
35
|
Rasti::AI.gemini_api_key
|
|
22
36
|
end
|
|
@@ -51,19 +51,6 @@ module Rasti
|
|
|
51
51
|
!response.dig('choices', 0, 'finish_reason').nil?
|
|
52
52
|
end
|
|
53
53
|
|
|
54
|
-
def parse_usage(response)
|
|
55
|
-
usage = response['usage']
|
|
56
|
-
return unless usage
|
|
57
|
-
Usage.new(
|
|
58
|
-
provider: :open_ai,
|
|
59
|
-
model: response['model'],
|
|
60
|
-
input_tokens: usage['prompt_tokens'],
|
|
61
|
-
output_tokens: usage['completion_tokens'],
|
|
62
|
-
cached_tokens: usage.dig('prompt_tokens_details', 'cached_tokens') || 0,
|
|
63
|
-
reasoning_tokens: usage.dig('completion_tokens_details', 'reasoning_tokens') || 0
|
|
64
|
-
)
|
|
65
|
-
end
|
|
66
|
-
|
|
67
54
|
def extract_tool_call_info(tool_call)
|
|
68
55
|
name = tool_call['function']['name']
|
|
69
56
|
args = JSON.parse(tool_call['function']['arguments'])
|
|
@@ -18,6 +18,20 @@ module Rasti
|
|
|
18
18
|
|
|
19
19
|
private
|
|
20
20
|
|
|
21
|
+
def parse_usage(response)
|
|
22
|
+
usage = response['usage']
|
|
23
|
+
return unless usage
|
|
24
|
+
Usage.new(
|
|
25
|
+
provider: 'open_ai',
|
|
26
|
+
model: response['model'],
|
|
27
|
+
input_tokens: usage['prompt_tokens'],
|
|
28
|
+
output_tokens: usage['completion_tokens'],
|
|
29
|
+
cached_tokens: usage.dig('prompt_tokens_details', 'cached_tokens') || 0,
|
|
30
|
+
reasoning_tokens: usage.dig('completion_tokens_details', 'reasoning_tokens') || 0,
|
|
31
|
+
raw: usage
|
|
32
|
+
)
|
|
33
|
+
end
|
|
34
|
+
|
|
21
35
|
def default_api_key
|
|
22
36
|
Rasti::AI.openai_api_key
|
|
23
37
|
end
|
data/lib/rasti/ai/usage.rb
CHANGED
|
@@ -2,12 +2,13 @@ module Rasti
|
|
|
2
2
|
module AI
|
|
3
3
|
class Usage < Rasti::Model
|
|
4
4
|
|
|
5
|
-
attribute :provider, Rasti::Types::
|
|
5
|
+
attribute :provider, Rasti::Types::String
|
|
6
6
|
attribute :model, Rasti::Types::String
|
|
7
7
|
attribute :input_tokens, Rasti::Types::Integer
|
|
8
8
|
attribute :output_tokens, Rasti::Types::Integer
|
|
9
9
|
attribute :cached_tokens, Rasti::Types::Integer
|
|
10
10
|
attribute :reasoning_tokens, Rasti::Types::Integer
|
|
11
|
+
attribute :raw
|
|
11
12
|
|
|
12
13
|
end
|
|
13
14
|
end
|
data/lib/rasti/ai/version.rb
CHANGED
|
@@ -132,73 +132,7 @@ describe Rasti::AI::Gemini::Assistant do
|
|
|
132
132
|
|
|
133
133
|
end
|
|
134
134
|
|
|
135
|
-
describe 'Usage tracker' do
|
|
136
135
|
|
|
137
|
-
it 'Track usage' do
|
|
138
|
-
stub_gemini_generate_content question: question, answer: answer
|
|
139
|
-
|
|
140
|
-
tracked = []
|
|
141
|
-
tracker = ->(usage) { tracked << usage }
|
|
142
|
-
|
|
143
|
-
assistant = Rasti::AI::Gemini::Assistant.new usage_tracker: tracker
|
|
144
|
-
|
|
145
|
-
assistant.call question
|
|
146
|
-
|
|
147
|
-
assert_equal 1, tracked.count
|
|
148
|
-
|
|
149
|
-
usage = tracked[0]
|
|
150
|
-
assert_instance_of Rasti::AI::Usage, usage
|
|
151
|
-
assert_equal :gemini, usage.provider
|
|
152
|
-
assert_equal 'gemini-test', usage.model
|
|
153
|
-
assert_equal 10, usage.input_tokens
|
|
154
|
-
assert_equal 50, usage.output_tokens
|
|
155
|
-
assert_equal 0, usage.cached_tokens
|
|
156
|
-
assert_equal 0, usage.reasoning_tokens
|
|
157
|
-
end
|
|
158
|
-
|
|
159
|
-
it 'Track usage with tool calls' do
|
|
160
|
-
client = Minitest::Mock.new
|
|
161
|
-
|
|
162
|
-
tool_response = read_json_resource(
|
|
163
|
-
'gemini/tool_response.json',
|
|
164
|
-
name: 'goals_by_player',
|
|
165
|
-
arguments: {player: 'Lionel Messi', team: 'Barcelona'}
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
basic_resp = read_json_resource('gemini/basic_response.json', content: answer)
|
|
169
|
-
|
|
170
|
-
client.expect :generate_content, tool_response do |params| true end
|
|
171
|
-
client.expect :generate_content, basic_resp do |params| true end
|
|
172
|
-
|
|
173
|
-
tool = GoalsByPlayer.new
|
|
174
|
-
|
|
175
|
-
tracked = []
|
|
176
|
-
tracker = ->(usage) { tracked << usage }
|
|
177
|
-
|
|
178
|
-
assistant = Rasti::AI::Gemini::Assistant.new client: client, tools: [tool], usage_tracker: tracker
|
|
179
|
-
|
|
180
|
-
assistant.call question
|
|
181
|
-
|
|
182
|
-
assert_equal 2, tracked.count
|
|
183
|
-
assert_equal 20, tracked[0].input_tokens
|
|
184
|
-
assert_equal 10, tracked[0].output_tokens
|
|
185
|
-
assert_equal 10, tracked[1].input_tokens
|
|
186
|
-
assert_equal 50, tracked[1].output_tokens
|
|
187
|
-
|
|
188
|
-
client.verify
|
|
189
|
-
end
|
|
190
|
-
|
|
191
|
-
it 'Without tracker' do
|
|
192
|
-
stub_gemini_generate_content question: question, answer: answer
|
|
193
|
-
|
|
194
|
-
assistant = Rasti::AI::Gemini::Assistant.new
|
|
195
|
-
|
|
196
|
-
response = assistant.call question
|
|
197
|
-
|
|
198
|
-
assert_equal answer, response
|
|
199
|
-
end
|
|
200
|
-
|
|
201
|
-
end
|
|
202
136
|
|
|
203
137
|
describe 'Tools' do
|
|
204
138
|
|
data/spec/gemini/client_spec.rb
CHANGED
|
@@ -83,6 +83,56 @@ describe Rasti::AI::Gemini::Client do
|
|
|
83
83
|
refute_empty log_output.string
|
|
84
84
|
end
|
|
85
85
|
|
|
86
|
+
describe 'Usage tracker' do
|
|
87
|
+
|
|
88
|
+
it 'Track usage' do
|
|
89
|
+
stub_gemini_generate_content
|
|
90
|
+
|
|
91
|
+
tracked = []
|
|
92
|
+
tracker = ->(usage) { tracked << usage }
|
|
93
|
+
|
|
94
|
+
client = Rasti::AI::Gemini::Client.new usage_tracker: tracker
|
|
95
|
+
|
|
96
|
+
client.generate_content contents: [user_content(question)]
|
|
97
|
+
|
|
98
|
+
assert_equal 1, tracked.count
|
|
99
|
+
|
|
100
|
+
expected_raw = {
|
|
101
|
+
'promptTokenCount' => 4,
|
|
102
|
+
'candidatesTokenCount' => 18,
|
|
103
|
+
'totalTokenCount' => 275,
|
|
104
|
+
'promptTokensDetails' => [
|
|
105
|
+
{
|
|
106
|
+
'modality' => 'TEXT',
|
|
107
|
+
'tokenCount' => 4
|
|
108
|
+
}
|
|
109
|
+
],
|
|
110
|
+
'thoughtsTokenCount' => 253
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
usage = tracked[0]
|
|
114
|
+
assert_instance_of Rasti::AI::Usage, usage
|
|
115
|
+
assert_equal 'gemini', usage.provider
|
|
116
|
+
assert_equal 'gemini-test', usage.model
|
|
117
|
+
assert_equal 4, usage.input_tokens
|
|
118
|
+
assert_equal 18, usage.output_tokens
|
|
119
|
+
assert_equal 0, usage.cached_tokens
|
|
120
|
+
assert_equal 253, usage.reasoning_tokens
|
|
121
|
+
assert_equal expected_raw, usage.raw
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
it 'Without tracker' do
|
|
125
|
+
stub_gemini_generate_content
|
|
126
|
+
|
|
127
|
+
client = Rasti::AI::Gemini::Client.new
|
|
128
|
+
|
|
129
|
+
response = client.generate_content contents: [user_content(question)]
|
|
130
|
+
|
|
131
|
+
assert_response_content response, answer
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
end
|
|
135
|
+
|
|
86
136
|
end
|
|
87
137
|
|
|
88
138
|
it 'Request error' do
|
|
@@ -126,73 +126,7 @@ describe Rasti::AI::OpenAI::Assistant do
|
|
|
126
126
|
|
|
127
127
|
end
|
|
128
128
|
|
|
129
|
-
describe 'Usage tracker' do
|
|
130
129
|
|
|
131
|
-
it 'Track usage' do
|
|
132
|
-
stub_open_ai_chat_completions question: question, answer: answer
|
|
133
|
-
|
|
134
|
-
tracked = []
|
|
135
|
-
tracker = ->(usage) { tracked << usage }
|
|
136
|
-
|
|
137
|
-
assistant = Rasti::AI::OpenAI::Assistant.new usage_tracker: tracker
|
|
138
|
-
|
|
139
|
-
assistant.call question
|
|
140
|
-
|
|
141
|
-
assert_equal 1, tracked.count
|
|
142
|
-
|
|
143
|
-
usage = tracked[0]
|
|
144
|
-
assert_instance_of Rasti::AI::Usage, usage
|
|
145
|
-
assert_equal :open_ai, usage.provider
|
|
146
|
-
assert_equal 'gpt-4o-mini-2024-07-18', usage.model
|
|
147
|
-
assert_equal 27, usage.input_tokens
|
|
148
|
-
assert_equal 229, usage.output_tokens
|
|
149
|
-
assert_equal 0, usage.cached_tokens
|
|
150
|
-
assert_equal 0, usage.reasoning_tokens
|
|
151
|
-
end
|
|
152
|
-
|
|
153
|
-
it 'Track usage with tool calls' do
|
|
154
|
-
client = Minitest::Mock.new
|
|
155
|
-
|
|
156
|
-
tool_response = read_json_resource(
|
|
157
|
-
'open_ai/tool_response.json',
|
|
158
|
-
name: 'goals_by_player',
|
|
159
|
-
arguments: {player: 'Lionel Messi', team: 'Barcelona'}
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
basic_resp = read_json_resource('open_ai/basic_response.json', content: answer)
|
|
163
|
-
|
|
164
|
-
client.expect :chat_completions, tool_response do |params| true end
|
|
165
|
-
client.expect :chat_completions, basic_resp do |params| true end
|
|
166
|
-
|
|
167
|
-
tool = GoalsByPlayer.new
|
|
168
|
-
|
|
169
|
-
tracked = []
|
|
170
|
-
tracker = ->(usage) { tracked << usage }
|
|
171
|
-
|
|
172
|
-
assistant = Rasti::AI::OpenAI::Assistant.new client: client, tools: [tool], usage_tracker: tracker
|
|
173
|
-
|
|
174
|
-
assistant.call question
|
|
175
|
-
|
|
176
|
-
assert_equal 2, tracked.count
|
|
177
|
-
assert_equal 83, tracked[0].input_tokens
|
|
178
|
-
assert_equal 23, tracked[0].output_tokens
|
|
179
|
-
assert_equal 27, tracked[1].input_tokens
|
|
180
|
-
assert_equal 229, tracked[1].output_tokens
|
|
181
|
-
|
|
182
|
-
client.verify
|
|
183
|
-
end
|
|
184
|
-
|
|
185
|
-
it 'Without tracker' do
|
|
186
|
-
stub_open_ai_chat_completions question: question, answer: answer
|
|
187
|
-
|
|
188
|
-
assistant = Rasti::AI::OpenAI::Assistant.new
|
|
189
|
-
|
|
190
|
-
response = assistant.call question
|
|
191
|
-
|
|
192
|
-
assert_equal answer, response
|
|
193
|
-
end
|
|
194
|
-
|
|
195
|
-
end
|
|
196
130
|
|
|
197
131
|
describe 'Tools' do
|
|
198
132
|
|
data/spec/open_ai/client_spec.rb
CHANGED
|
@@ -83,6 +83,59 @@ describe Rasti::AI::OpenAI::Client do
|
|
|
83
83
|
refute_empty log_output.string
|
|
84
84
|
end
|
|
85
85
|
|
|
86
|
+
describe 'Usage tracker' do
|
|
87
|
+
|
|
88
|
+
it 'Track usage' do
|
|
89
|
+
stub_open_ai_chat_completions
|
|
90
|
+
|
|
91
|
+
tracked = []
|
|
92
|
+
tracker = ->(usage) { tracked << usage }
|
|
93
|
+
|
|
94
|
+
client = Rasti::AI::OpenAI::Client.new usage_tracker: tracker
|
|
95
|
+
|
|
96
|
+
client.chat_completions messages: [user_message(question)]
|
|
97
|
+
|
|
98
|
+
assert_equal 1, tracked.count
|
|
99
|
+
|
|
100
|
+
expected_raw = {
|
|
101
|
+
'prompt_tokens' => 27,
|
|
102
|
+
'completion_tokens' => 229,
|
|
103
|
+
'total_tokens' => 256,
|
|
104
|
+
'prompt_tokens_details' => {
|
|
105
|
+
'cached_tokens' => 0,
|
|
106
|
+
'audio_tokens' => 0
|
|
107
|
+
},
|
|
108
|
+
'completion_tokens_details' => {
|
|
109
|
+
'reasoning_tokens' => 0,
|
|
110
|
+
'audio_tokens' => 0,
|
|
111
|
+
'accepted_prediction_tokens' => 0,
|
|
112
|
+
'rejected_prediction_tokens' => 0
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
usage = tracked[0]
|
|
117
|
+
assert_instance_of Rasti::AI::Usage, usage
|
|
118
|
+
assert_equal 'open_ai', usage.provider
|
|
119
|
+
assert_equal 'gpt-4o-mini-2024-07-18', usage.model
|
|
120
|
+
assert_equal 27, usage.input_tokens
|
|
121
|
+
assert_equal 229, usage.output_tokens
|
|
122
|
+
assert_equal 0, usage.cached_tokens
|
|
123
|
+
assert_equal 0, usage.reasoning_tokens
|
|
124
|
+
assert_equal expected_raw, usage.raw
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
it 'Without tracker' do
|
|
128
|
+
stub_open_ai_chat_completions
|
|
129
|
+
|
|
130
|
+
client = Rasti::AI::OpenAI::Client.new
|
|
131
|
+
|
|
132
|
+
response = client.chat_completions messages: [user_message(question)]
|
|
133
|
+
|
|
134
|
+
assert_response_content response, answer
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
end
|
|
138
|
+
|
|
86
139
|
end
|
|
87
140
|
|
|
88
141
|
it 'Request error' do
|
|
@@ -14,9 +14,16 @@
|
|
|
14
14
|
}
|
|
15
15
|
],
|
|
16
16
|
"usageMetadata": {
|
|
17
|
-
"promptTokenCount":
|
|
18
|
-
"candidatesTokenCount":
|
|
19
|
-
"totalTokenCount":
|
|
17
|
+
"promptTokenCount": 4,
|
|
18
|
+
"candidatesTokenCount": 18,
|
|
19
|
+
"totalTokenCount": 275,
|
|
20
|
+
"promptTokensDetails": [
|
|
21
|
+
{
|
|
22
|
+
"modality": "TEXT",
|
|
23
|
+
"tokenCount": 4
|
|
24
|
+
}
|
|
25
|
+
],
|
|
26
|
+
"thoughtsTokenCount": 253
|
|
20
27
|
},
|
|
21
28
|
"modelVersion": "gemini-test"
|
|
22
29
|
}
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: rasti-ai
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 2.0.
|
|
4
|
+
version: 2.0.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Gabriel Naiman
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2026-03-
|
|
11
|
+
date: 2026-03-16 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: multi_require
|