fantasy-cli 1.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +456 -0
- data/bin/gsd +8 -0
- data/bin/gsd-core-darwin-amd64 +0 -0
- data/bin/gsd-core-darwin-arm64 +0 -0
- data/bin/gsd-core-linux-amd64 +0 -0
- data/bin/gsd-core-linux-arm64 +0 -0
- data/bin/gsd-core-windows-amd64.exe +0 -0
- data/bin/gsd-core-windows-arm64.exe +0 -0
- data/bin/gsd-core.exe +0 -0
- data/lib/gsd/agents/coordinator.rb +195 -0
- data/lib/gsd/agents/task_manager.rb +158 -0
- data/lib/gsd/agents/worker.rb +162 -0
- data/lib/gsd/agents.rb +30 -0
- data/lib/gsd/ai/chat.rb +486 -0
- data/lib/gsd/ai/cli.rb +248 -0
- data/lib/gsd/ai/command_parser.rb +97 -0
- data/lib/gsd/ai/commands/base.rb +42 -0
- data/lib/gsd/ai/commands/clear.rb +20 -0
- data/lib/gsd/ai/commands/context.rb +30 -0
- data/lib/gsd/ai/commands/cost.rb +30 -0
- data/lib/gsd/ai/commands/export.rb +42 -0
- data/lib/gsd/ai/commands/help.rb +61 -0
- data/lib/gsd/ai/commands/model.rb +67 -0
- data/lib/gsd/ai/commands/reset.rb +22 -0
- data/lib/gsd/ai/config.rb +256 -0
- data/lib/gsd/ai/context.rb +324 -0
- data/lib/gsd/ai/cost_tracker.rb +361 -0
- data/lib/gsd/ai/git_context.rb +169 -0
- data/lib/gsd/ai/history.rb +384 -0
- data/lib/gsd/ai/providers/anthropic.rb +429 -0
- data/lib/gsd/ai/providers/base.rb +282 -0
- data/lib/gsd/ai/providers/lmstudio.rb +279 -0
- data/lib/gsd/ai/providers/ollama.rb +336 -0
- data/lib/gsd/ai/providers/openai.rb +396 -0
- data/lib/gsd/ai/providers/openrouter.rb +429 -0
- data/lib/gsd/ai/reference_resolver.rb +225 -0
- data/lib/gsd/ai/repl.rb +349 -0
- data/lib/gsd/ai/streaming.rb +438 -0
- data/lib/gsd/ai/ui.rb +429 -0
- data/lib/gsd/buddy/cli.rb +284 -0
- data/lib/gsd/buddy/gacha.rb +148 -0
- data/lib/gsd/buddy/renderer.rb +108 -0
- data/lib/gsd/buddy/species.rb +190 -0
- data/lib/gsd/buddy/stats.rb +156 -0
- data/lib/gsd/buddy.rb +28 -0
- data/lib/gsd/cli.rb +455 -0
- data/lib/gsd/commands.rb +198 -0
- data/lib/gsd/config.rb +183 -0
- data/lib/gsd/error.rb +188 -0
- data/lib/gsd/frontmatter.rb +123 -0
- data/lib/gsd/go/bridge.rb +173 -0
- data/lib/gsd/history.rb +76 -0
- data/lib/gsd/milestone.rb +75 -0
- data/lib/gsd/output.rb +184 -0
- data/lib/gsd/phase.rb +102 -0
- data/lib/gsd/plugins/base.rb +92 -0
- data/lib/gsd/plugins/cli.rb +330 -0
- data/lib/gsd/plugins/config.rb +164 -0
- data/lib/gsd/plugins/hooks.rb +132 -0
- data/lib/gsd/plugins/installer.rb +158 -0
- data/lib/gsd/plugins/loader.rb +122 -0
- data/lib/gsd/plugins/manager.rb +187 -0
- data/lib/gsd/plugins/marketplace.rb +142 -0
- data/lib/gsd/plugins/sandbox.rb +114 -0
- data/lib/gsd/plugins/search.rb +131 -0
- data/lib/gsd/plugins/validator.rb +157 -0
- data/lib/gsd/plugins.rb +48 -0
- data/lib/gsd/profile.rb +127 -0
- data/lib/gsd/research.rb +85 -0
- data/lib/gsd/roadmap.rb +90 -0
- data/lib/gsd/skills/bundled/commit.md +58 -0
- data/lib/gsd/skills/bundled/debug.md +28 -0
- data/lib/gsd/skills/bundled/explain.md +41 -0
- data/lib/gsd/skills/bundled/plan.md +42 -0
- data/lib/gsd/skills/bundled/verify.md +26 -0
- data/lib/gsd/skills/loader.rb +189 -0
- data/lib/gsd/state.rb +102 -0
- data/lib/gsd/template.rb +106 -0
- data/lib/gsd/tools/ask_user_question.rb +179 -0
- data/lib/gsd/tools/base.rb +204 -0
- data/lib/gsd/tools/bash.rb +246 -0
- data/lib/gsd/tools/file_edit.rb +297 -0
- data/lib/gsd/tools/file_read.rb +199 -0
- data/lib/gsd/tools/file_write.rb +153 -0
- data/lib/gsd/tools/glob.rb +202 -0
- data/lib/gsd/tools/grep.rb +227 -0
- data/lib/gsd/tools/gsd_frontmatter.rb +165 -0
- data/lib/gsd/tools/gsd_phase.rb +140 -0
- data/lib/gsd/tools/gsd_roadmap.rb +108 -0
- data/lib/gsd/tools/gsd_state.rb +143 -0
- data/lib/gsd/tools/gsd_template.rb +157 -0
- data/lib/gsd/tools/gsd_verify.rb +159 -0
- data/lib/gsd/tools/registry.rb +103 -0
- data/lib/gsd/tools/task.rb +235 -0
- data/lib/gsd/tools/todo_write.rb +290 -0
- data/lib/gsd/tools/web.rb +260 -0
- data/lib/gsd/tui/app.rb +366 -0
- data/lib/gsd/tui/auto_complete.rb +79 -0
- data/lib/gsd/tui/colors.rb +111 -0
- data/lib/gsd/tui/command_palette.rb +126 -0
- data/lib/gsd/tui/header.rb +38 -0
- data/lib/gsd/tui/input_box.rb +199 -0
- data/lib/gsd/tui/spinner.rb +40 -0
- data/lib/gsd/tui/status_bar.rb +51 -0
- data/lib/gsd/tui.rb +17 -0
- data/lib/gsd/validator.rb +216 -0
- data/lib/gsd/verify.rb +175 -0
- data/lib/gsd/version.rb +5 -0
- data/lib/gsd/workstream.rb +91 -0
- metadata +231 -0
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'net/http'
|
|
4
|
+
require 'json'
|
|
5
|
+
|
|
6
|
+
module Gsd
|
|
7
|
+
module AI
|
|
8
|
+
module Providers
|
|
9
|
+
# OpenAI Provider - Integração com OpenAI API
|
|
10
|
+
#
|
|
11
|
+
# Suporta:
|
|
12
|
+
# - GPT-4 Turbo
|
|
13
|
+
# - GPT-4
|
|
14
|
+
# - GPT-3.5 Turbo
|
|
15
|
+
# - GPT-4 Vision
|
|
16
|
+
# - Streaming de respostas
|
|
17
|
+
# - Function calling
|
|
18
|
+
# - Token counting preciso
|
|
19
|
+
class OpenAI < Base
|
|
20
|
+
PROVIDER_NAME = 'OpenAI'
|
|
21
|
+
DEFAULT_MODEL = 'gpt-4-turbo-preview'
|
|
22
|
+
API_URL = 'https://api.openai.com/v1/chat/completions'
|
|
23
|
+
BASE_URL = 'https://api.openai.com'
|
|
24
|
+
|
|
25
|
+
# Modelos disponíveis com preços e capacidades
|
|
26
|
+
MODELS = {
|
|
27
|
+
'gpt-4-turbo-preview' => {
|
|
28
|
+
input_price: 0.000_01,
|
|
29
|
+
output_price: 0.000_03,
|
|
30
|
+
context_window: 128_000,
|
|
31
|
+
max_output: 4096,
|
|
32
|
+
vision: false,
|
|
33
|
+
caching: false
|
|
34
|
+
},
|
|
35
|
+
'gpt-4-vision-preview' => {
|
|
36
|
+
input_price: 0.000_01,
|
|
37
|
+
output_price: 0.000_03,
|
|
38
|
+
context_window: 128_000,
|
|
39
|
+
max_output: 4096,
|
|
40
|
+
vision: true,
|
|
41
|
+
caching: false
|
|
42
|
+
},
|
|
43
|
+
'gpt-4' => {
|
|
44
|
+
input_price: 0.000_03,
|
|
45
|
+
output_price: 0.000_06,
|
|
46
|
+
context_window: 8192,
|
|
47
|
+
max_output: 4096,
|
|
48
|
+
vision: false,
|
|
49
|
+
caching: false
|
|
50
|
+
},
|
|
51
|
+
'gpt-3.5-turbo' => {
|
|
52
|
+
input_price: 0.000_000_5,
|
|
53
|
+
output_price: 0.000_001_5,
|
|
54
|
+
context_window: 16385,
|
|
55
|
+
max_output: 4096,
|
|
56
|
+
vision: false,
|
|
57
|
+
caching: false
|
|
58
|
+
}
|
|
59
|
+
}.freeze
|
|
60
|
+
|
|
61
|
+
# Inicializa o provider OpenAI
|
|
62
|
+
#
|
|
63
|
+
# @param model [String] Modelo do GPT
|
|
64
|
+
# @param api_key [String] API key (ou use env OPENAI_API_KEY)
|
|
65
|
+
# @param debug [Boolean] Habilitar debug
|
|
66
|
+
# @param vision [Boolean] Habilitar suporte a vision
|
|
67
|
+
def initialize(model: nil, api_key: nil, debug: false, vision: false)
|
|
68
|
+
super(model: model, debug: debug)
|
|
69
|
+
@api_key = api_key || ENV['OPENAI_API_KEY']
|
|
70
|
+
@max_tokens = 4096
|
|
71
|
+
@vision = vision
|
|
72
|
+
@token_cache = {}
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Chama a API do OpenAI
|
|
76
|
+
#
|
|
77
|
+
# @param system_prompt [String] System prompt
|
|
78
|
+
# @param messages [Array] Histórico de mensagens
|
|
79
|
+
# @param tools [Array] Tools disponíveis
|
|
80
|
+
# @param stream [Boolean] Habilitar streaming
|
|
81
|
+
# @yield [Hash] Chunk de streaming
|
|
82
|
+
# @return [OpenStruct] Resposta
|
|
83
|
+
def call(system_prompt:, messages:, tools:, stream: false, &block)
|
|
84
|
+
validate_api_key!
|
|
85
|
+
|
|
86
|
+
payload = build_payload(system_prompt, messages, tools, stream)
|
|
87
|
+
|
|
88
|
+
log_debug("Calling OpenAI API with model: #{@model}")
|
|
89
|
+
log_debug("Messages count: #{messages.count}")
|
|
90
|
+
log_debug("Tools count: #{tools.count}") if tools.any?
|
|
91
|
+
|
|
92
|
+
if stream
|
|
93
|
+
stream_request(payload, &block)
|
|
94
|
+
else
|
|
95
|
+
sync_request(payload)
|
|
96
|
+
end
|
|
97
|
+
rescue => e
|
|
98
|
+
log_debug("Error: #{e.message}")
|
|
99
|
+
raise OpenAIError, "OpenAI API error: #{e.message}"
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
# Verifica se está configurado
|
|
103
|
+
#
|
|
104
|
+
# @return [Boolean] true se configurado
|
|
105
|
+
def configured?
|
|
106
|
+
!@api_key.nil? && !@api_key.empty?
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
# Retorna o preço do modelo
|
|
110
|
+
#
|
|
111
|
+
# @return [Hash] Preço de input e output
|
|
112
|
+
def pricing
|
|
113
|
+
MODELS[@model] || { input_price: 0, output_price: 0 }
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
private
|
|
117
|
+
|
|
118
|
+
# Valida API key
|
|
119
|
+
#
|
|
120
|
+
# @raise [ArgumentError] Se API key não estiver configurada
|
|
121
|
+
def validate_api_key!
|
|
122
|
+
raise ArgumentError, 'OPENAI_API_KEY not set' unless configured?
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
# Constrói o payload da requisição
|
|
126
|
+
#
|
|
127
|
+
# @param system_prompt [String] System prompt
|
|
128
|
+
# @param messages [Array] Mensagens
|
|
129
|
+
# @param tools [Array] Tools
|
|
130
|
+
# @param stream [Boolean] Streaming
|
|
131
|
+
# @return [Hash] Payload
|
|
132
|
+
def build_payload(system_prompt, messages, tools, stream)
|
|
133
|
+
formatted_messages = [{ role: 'system', content: system_prompt }] +
|
|
134
|
+
messages.map { |m| { role: m[:role], content: m[:content] } }
|
|
135
|
+
|
|
136
|
+
payload = {
|
|
137
|
+
model: @model,
|
|
138
|
+
max_tokens: @max_tokens,
|
|
139
|
+
messages: formatted_messages,
|
|
140
|
+
stream: stream
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if tools.any?
|
|
144
|
+
payload[:tools] = format_tools(tools)
|
|
145
|
+
payload[:tool_choice] = 'auto'
|
|
146
|
+
end
|
|
147
|
+
|
|
148
|
+
payload
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
# Formata tools para API do OpenAI
|
|
152
|
+
#
|
|
153
|
+
# @param tools [Array] Tools
|
|
154
|
+
# @return [Array] Tools formatadas
|
|
155
|
+
def format_tools(tools)
|
|
156
|
+
tools.map do |tool|
|
|
157
|
+
{
|
|
158
|
+
type: 'function',
|
|
159
|
+
function: {
|
|
160
|
+
name: tool.name,
|
|
161
|
+
description: tool.description,
|
|
162
|
+
parameters: tool.input_schema || { type: 'object', properties: {} }
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
end
|
|
166
|
+
end
|
|
167
|
+
|
|
168
|
+
# Faz requisição síncrona
|
|
169
|
+
#
|
|
170
|
+
# @param payload [Hash] Payload da requisição
|
|
171
|
+
# @return [OpenStruct] Resposta
|
|
172
|
+
def sync_request(payload)
|
|
173
|
+
uri = URI(API_URL)
|
|
174
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
|
175
|
+
http.use_ssl = true
|
|
176
|
+
|
|
177
|
+
request = Net::HTTP::Post.new(uri.path)
|
|
178
|
+
request['Content-Type'] = 'application/json'
|
|
179
|
+
request['Authorization'] = "Bearer #{@api_key}"
|
|
180
|
+
request.body = JSON.generate(payload)
|
|
181
|
+
|
|
182
|
+
log_debug("Sending request to #{API_URL}")
|
|
183
|
+
|
|
184
|
+
response = http.request(request)
|
|
185
|
+
log_debug("Response status: #{response.code}")
|
|
186
|
+
|
|
187
|
+
if response.code == '200'
|
|
188
|
+
parse_response(JSON.parse(response.body))
|
|
189
|
+
else
|
|
190
|
+
raise OpenAIError, "API error: #{response.code} - #{response.body}"
|
|
191
|
+
end
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
# Faz requisição com streaming
|
|
195
|
+
#
|
|
196
|
+
# @param payload [Hash] Payload da requisição
|
|
197
|
+
# @yield [Hash] Chunk de streaming
|
|
198
|
+
# @return [OpenStruct] Resposta
|
|
199
|
+
def stream_request(payload, &block)
|
|
200
|
+
uri = URI(API_URL)
|
|
201
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
|
202
|
+
http.use_ssl = true
|
|
203
|
+
|
|
204
|
+
request = Net::HTTP::Post.new(uri.path)
|
|
205
|
+
request['Content-Type'] = 'application/json'
|
|
206
|
+
request['Authorization'] = "Bearer #{@api_key}"
|
|
207
|
+
request.body = JSON.generate(payload)
|
|
208
|
+
|
|
209
|
+
log_debug("Sending streaming request to #{API_URL}")
|
|
210
|
+
|
|
211
|
+
content = +''
|
|
212
|
+
tool_calls = []
|
|
213
|
+
usage = {}
|
|
214
|
+
|
|
215
|
+
# Streaming com Server-Sent Events (SSE)
|
|
216
|
+
http.request(request) do |response|
|
|
217
|
+
if response.code == '200'
|
|
218
|
+
response.read_body do |chunk|
|
|
219
|
+
chunk.each_line do |line|
|
|
220
|
+
line = line.strip
|
|
221
|
+
next if line.empty?
|
|
222
|
+
next unless line.start_with?('data: ')
|
|
223
|
+
|
|
224
|
+
data = line[6..-1]
|
|
225
|
+
next if data == '[DONE]'
|
|
226
|
+
|
|
227
|
+
begin
|
|
228
|
+
event = JSON.parse(data)
|
|
229
|
+
yield(event) if block_given?
|
|
230
|
+
|
|
231
|
+
# Acumula conteúdo
|
|
232
|
+
choice = event.dig(:choices, 0)
|
|
233
|
+
if choice
|
|
234
|
+
delta = choice[:delta]
|
|
235
|
+
if delta
|
|
236
|
+
content << delta[:content].to_s if delta[:content]
|
|
237
|
+
|
|
238
|
+
# Tool calls
|
|
239
|
+
if delta[:tool_calls]
|
|
240
|
+
delta[:tool_calls].each do |tc|
|
|
241
|
+
if tc[:function]
|
|
242
|
+
tool_calls << {
|
|
243
|
+
id: tc[:id],
|
|
244
|
+
name: tc[:function][:name],
|
|
245
|
+
arguments: tc[:function][:arguments].to_s
|
|
246
|
+
}
|
|
247
|
+
end
|
|
248
|
+
end
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
# Usage (vem no último chunk)
|
|
253
|
+
if choice[:finish_reason]
|
|
254
|
+
usage = extract_usage(event)
|
|
255
|
+
end
|
|
256
|
+
end
|
|
257
|
+
rescue JSON::ParserError
|
|
258
|
+
# Ignora linhas inválidas
|
|
259
|
+
end
|
|
260
|
+
end
|
|
261
|
+
end
|
|
262
|
+
else
|
|
263
|
+
raise OpenAIError, "API error: #{response.code}"
|
|
264
|
+
end
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
OpenStruct.new(
|
|
268
|
+
content: content,
|
|
269
|
+
tool_calls: tool_calls,
|
|
270
|
+
usage: usage,
|
|
271
|
+
raw: {}
|
|
272
|
+
)
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
# Parseia a resposta da API
|
|
276
|
+
#
|
|
277
|
+
# @param data [Hash] Dados da resposta
|
|
278
|
+
# @return [OpenStruct] Resposta parseada
|
|
279
|
+
def parse_response(data)
|
|
280
|
+
content = extract_text(data)
|
|
281
|
+
tool_calls = extract_tool_calls(data)
|
|
282
|
+
usage = extract_usage(data)
|
|
283
|
+
|
|
284
|
+
OpenStruct.new(
|
|
285
|
+
content: content,
|
|
286
|
+
tool_calls: tool_calls,
|
|
287
|
+
usage: usage,
|
|
288
|
+
raw: data
|
|
289
|
+
)
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
# Conta tokens de forma precisa
|
|
293
|
+
#
|
|
294
|
+
# @param text [String] Texto para contar
|
|
295
|
+
# @return [Integer] Número de tokens
|
|
296
|
+
def count_tokens(text)
|
|
297
|
+
return @token_cache[text] if @token_cache[text]
|
|
298
|
+
|
|
299
|
+
# Estimativa: ~4 caracteres por token para GPT
|
|
300
|
+
estimated = text.length / 4
|
|
301
|
+
@token_cache[text] = estimated
|
|
302
|
+
estimated
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
# Limpa o cache de tokens
|
|
306
|
+
#
|
|
307
|
+
# @return [void]
|
|
308
|
+
def clear_token_cache
|
|
309
|
+
@token_cache = {}
|
|
310
|
+
end
|
|
311
|
+
|
|
312
|
+
# Formata imagem para API do OpenAI (Vision)
|
|
313
|
+
#
|
|
314
|
+
# @param image_url [String] URL da imagem ou base64
|
|
315
|
+
# @param detail [String] Detail level (low, high, auto)
|
|
316
|
+
# @return [Hash] Conteúdo formatado
|
|
317
|
+
def format_image(image_url, detail: 'auto')
|
|
318
|
+
{
|
|
319
|
+
type: 'image_url',
|
|
320
|
+
image_url: {
|
|
321
|
+
url: image_url,
|
|
322
|
+
detail: detail
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
# Formata mensagem com texto e imagens
|
|
328
|
+
#
|
|
329
|
+
# @param text [String] Texto da mensagem
|
|
330
|
+
# @param images [Array] URLs ou base64 de imagens
|
|
331
|
+
# @return [Array] Conteúdo formatado
|
|
332
|
+
def format_message_with_images(text, images: [])
|
|
333
|
+
content = [{ type: 'text', text: text }]
|
|
334
|
+
|
|
335
|
+
images.each do |img|
|
|
336
|
+
content << format_image(img)
|
|
337
|
+
end
|
|
338
|
+
|
|
339
|
+
content
|
|
340
|
+
end
|
|
341
|
+
|
|
342
|
+
# Extrai texto da resposta
|
|
343
|
+
#
|
|
344
|
+
# @param data [Hash] Dados da resposta
|
|
345
|
+
# @return [String] Texto extraído
|
|
346
|
+
def extract_text(data)
|
|
347
|
+
choices = data['choices'] || []
|
|
348
|
+
return '' if choices.empty?
|
|
349
|
+
|
|
350
|
+
message = choices[0]['message'] || {}
|
|
351
|
+
message['content'] || ''
|
|
352
|
+
end
|
|
353
|
+
|
|
354
|
+
# Extrai tool calls da resposta
|
|
355
|
+
#
|
|
356
|
+
# @param data [Hash] Dados da resposta
|
|
357
|
+
# @return [Array] Tool calls
|
|
358
|
+
def extract_tool_calls(data)
|
|
359
|
+
choices = data['choices'] || []
|
|
360
|
+
return [] if choices.empty?
|
|
361
|
+
|
|
362
|
+
message = choices[0]['message'] || {}
|
|
363
|
+
tool_calls = message['tool_calls'] || []
|
|
364
|
+
|
|
365
|
+
tool_calls.map do |tc|
|
|
366
|
+
function = tc['function'] || {}
|
|
367
|
+
OpenStruct.new(
|
|
368
|
+
id: tc['id'],
|
|
369
|
+
name: function['name'],
|
|
370
|
+
arguments: JSON.parse(function['arguments'] || '{}')
|
|
371
|
+
)
|
|
372
|
+
end
|
|
373
|
+
rescue => e
|
|
374
|
+
log_debug("Error extracting tool calls: #{e.message}")
|
|
375
|
+
[]
|
|
376
|
+
end
|
|
377
|
+
|
|
378
|
+
# Extrai usage da resposta
|
|
379
|
+
#
|
|
380
|
+
# @param data [Hash] Dados da resposta
|
|
381
|
+
# @return [Hash] Usage data
|
|
382
|
+
def extract_usage(data)
|
|
383
|
+
usage = data['usage'] || {}
|
|
384
|
+
{
|
|
385
|
+
input_tokens: usage['prompt_tokens'] || 0,
|
|
386
|
+
output_tokens: usage['completion_tokens'] || 0,
|
|
387
|
+
total_tokens: usage['total_tokens'] || 0
|
|
388
|
+
}
|
|
389
|
+
end
|
|
390
|
+
end
|
|
391
|
+
|
|
392
|
+
# Erro customizado para OpenAI
|
|
393
|
+
class OpenAIError < StandardError; end
|
|
394
|
+
end
|
|
395
|
+
end
|
|
396
|
+
end
|