gpt 0.0.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/README.md +130 -18
- data/lib/gpt/client.rb +6 -5
- data/lib/gpt/error.rb +1 -1
- data/lib/gpt/response_extender.rb +104 -0
- data/lib/gpt/responses.rb +54 -7
- data/lib/gpt/version.rb +5 -0
- data/lib/gpt.rb +282 -0
- metadata +6 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b725ab7bc465dae82acf06dff54ba12d1199ddd6b21c90d8107839c709da7ba9
|
4
|
+
data.tar.gz: 0b6ef69bac8fcaf22565aedbb3fff199a65957ce08514d7f85aff5fc2f3341ed
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9c68b407adf9d726b1bfc215c7523663be86cc0749337618c66d52679b628128a22f7d4ecff9cefd403e023a8b7cd65ad4db849898ff9a367ffe40e28f8a68b0
|
7
|
+
data.tar.gz: 213b43fb66a3ddd8e6d237a01f7aaefdff14a25100332db368c3801bf1967ebf4c3acd6d074fa7762d3326bc062303e831809b49d6065f50b6245c45d8813c66
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,15 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.1.0
|
4
|
+
- Suporte e exemplos para GPT-5 (reasoning minimal, text verbosity, custom tools e allowed_tools)
|
5
|
+
- Validação de `OPENAI_API_KEY` e ajuste de User-Agent
|
6
|
+
- README atualizado para GPT-5
|
7
|
+
|
8
|
+
## 0.1.1
|
9
|
+
- Adiciona `GPT.ask` para uso simplificado, com suporte a streaming
|
10
|
+
- Adiciona `GPT::ResponseExtender` com helpers (`content`, `usage`, `total_tokens`, `to_h`)
|
11
|
+
- `Responses#create/get` passam a estender a resposta com os helpers
|
12
|
+
|
3
13
|
## 0.0.1
|
4
14
|
- Primeira versão com cliente para API Responses (create/get/delete/cancel/input_items) e streaming SSE.
|
5
15
|
|
data/README.md
CHANGED
@@ -1,39 +1,93 @@
|
|
1
1
|
# gpt
|
2
2
|
|
3
|
-
Cliente Ruby simples para a API
|
3
|
+
Cliente Ruby simples para a Responses API, com foco no GPT-5, com uma API de alto nível inspirada no OpenAIExt.
|
4
4
|
|
5
|
-
Instalação
|
6
|
-
```
|
7
|
-
gem
|
5
|
+
## Instalação
|
6
|
+
```bash
|
7
|
+
gem install gpt
|
8
8
|
```
|
9
9
|
|
10
|
-
|
10
|
+
## Configuração
|
11
|
+
- Defina `OPENAI_API_KEY` ou `OPENAI_ACCESS_TOKEN` no ambiente.
|
12
|
+
- Opcional: `OPENAI_ORG_ID` ou `OPENAI_ORGANIZATION_ID`, `OPENAI_PROJECT_ID`.
|
13
|
+
- Opcional: `OPENAI_REQUEST_TIMEOUT` (segundos, padrão 120).
|
14
|
+
|
15
|
+
## Uso básico (GPT-5)
|
11
16
|
```ruby
|
12
17
|
require 'gpt'
|
13
18
|
|
14
|
-
|
19
|
+
res = GPT.ask('Diga olá em uma frase.', model: 'gpt-5')
|
20
|
+
puts res.content
|
21
|
+
```
|
22
|
+
|
23
|
+
## Reasoning mínimo (minimal)
|
24
|
+
```ruby
|
25
|
+
res = GPT.responses.create({
|
26
|
+
'model' => 'gpt-5',
|
27
|
+
'input' => 'Quanto ouro seria necessário para cobrir a Estátua da Liberdade com 1mm?',
|
28
|
+
'reasoning' => { 'effort' => 'minimal' }
|
29
|
+
})
|
30
|
+
```
|
31
|
+
|
32
|
+
## Verbosidade baixa
|
33
|
+
```ruby
|
34
|
+
res = GPT.responses.create({
|
35
|
+
'model' => 'gpt-5',
|
36
|
+
'input' => 'Qual é a resposta para a vida, o universo e tudo mais?',
|
37
|
+
'text' => { 'verbosity' => 'low' }
|
38
|
+
})
|
39
|
+
```
|
40
|
+
|
41
|
+
## Ferramentas personalizadas (custom tools)
|
42
|
+
```ruby
|
43
|
+
res = GPT.responses.create({
|
44
|
+
'model' => 'gpt-5',
|
45
|
+
'input' => 'Use a ferramenta code_exec para calcular a área de um círculo com raio igual ao número de letras r em blueberry',
|
46
|
+
'tools' => [
|
47
|
+
{ 'type' => 'custom', 'name' => 'code_exec', 'description' => 'Executa código Python arbitrário' }
|
48
|
+
]
|
49
|
+
})
|
50
|
+
```
|
51
|
+
|
52
|
+
## Restringindo ferramentas (allowed_tools)
|
53
|
+
```ruby
|
54
|
+
res = GPT.responses.create({
|
55
|
+
'model' => 'gpt-5',
|
56
|
+
'input' => 'Como está o tempo em São Paulo?',
|
57
|
+
'tools' => [ { 'type' => 'function', 'name' => 'get_weather' } ],
|
58
|
+
'tool_choice' => {
|
59
|
+
'type' => 'allowed_tools',
|
60
|
+
'mode' => 'auto',
|
61
|
+
'tools' => [ { 'type' => 'function', 'name' => 'get_weather' } ]
|
62
|
+
}
|
63
|
+
})
|
64
|
+
```
|
15
65
|
|
16
|
-
|
17
|
-
|
18
|
-
|
66
|
+
## Passando raciocínio prévio (previous_response_id)
|
67
|
+
```ruby
|
68
|
+
first = GPT.responses.create({
|
69
|
+
'model' => 'gpt-5',
|
70
|
+
'input' => 'Planeje passos para resolver X.'
|
19
71
|
})
|
20
72
|
|
21
|
-
|
73
|
+
followup = GPT.responses.create({
|
74
|
+
'model' => 'gpt-5',
|
75
|
+
'input' => 'Agora execute o primeiro passo.',
|
76
|
+
'previous_response_id' => first['id']
|
77
|
+
})
|
22
78
|
```
|
23
79
|
|
24
|
-
Streaming SSE
|
80
|
+
## Streaming SSE
|
25
81
|
```ruby
|
26
82
|
require 'gpt'
|
27
83
|
|
28
|
-
GPT.
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
print chunk
|
33
|
-
end
|
84
|
+
GPT.ask('Conte uma história curta.', model: 'gpt-5', stream: true) { |chunk| print chunk }
|
85
|
+
|
86
|
+
# Streaming de texto direto
|
87
|
+
GPT.ask('Conte uma história curta.', model: 'gpt-5', text_stream: true) { |text| print text }
|
34
88
|
```
|
35
89
|
|
36
|
-
Outras operações
|
90
|
+
## Outras operações
|
37
91
|
```ruby
|
38
92
|
id = res['id']
|
39
93
|
GPT.responses.get(id)
|
@@ -41,3 +95,61 @@ GPT.responses.input_items(id)
|
|
41
95
|
GPT.responses.cancel(id)
|
42
96
|
GPT.responses.delete(id)
|
43
97
|
```
|
98
|
+
|
99
|
+
## Helpers de resposta
|
100
|
+
```ruby
|
101
|
+
res = GPT.ask('Qual a capital da França?', model: 'gpt-5')
|
102
|
+
res.content
|
103
|
+
res.model
|
104
|
+
res.total_tokens
|
105
|
+
res.to_h
|
106
|
+
```
|
107
|
+
|
108
|
+
## Function calling
|
109
|
+
```ruby
|
110
|
+
require 'gpt'
|
111
|
+
|
112
|
+
# Passe ferramentas diretamente para GPT.ask
|
113
|
+
res = GPT.ask(
|
114
|
+
'Como está o tempo em São Paulo?',
|
115
|
+
model: 'gpt-5',
|
116
|
+
tools: [
|
117
|
+
{
|
118
|
+
'type' => 'function',
|
119
|
+
'name' => 'get_weather',
|
120
|
+
'description' => 'Obter clima atual',
|
121
|
+
'parameters' => {
|
122
|
+
'type' => 'object',
|
123
|
+
'properties' => {
|
124
|
+
'location' => { 'type' => 'string' },
|
125
|
+
'unit' => { 'type' => 'string', 'enum' => ['celsius', 'fahrenheit'] }
|
126
|
+
},
|
127
|
+
'required' => ['location']
|
128
|
+
}
|
129
|
+
}
|
130
|
+
]
|
131
|
+
)
|
132
|
+
|
133
|
+
puts res.content
|
134
|
+
```
|
135
|
+
|
136
|
+
### Executando funções chamadas pelo modelo
|
137
|
+
```ruby
|
138
|
+
# Se o modelo decidir acionar uma função, você pode inspecionar e executar:
|
139
|
+
if res.functions?
|
140
|
+
# Exemplo de contexto com um método compatível com o nome da função
|
141
|
+
class WeatherContext
|
142
|
+
def get_weather(location:, unit: 'celsius')
|
143
|
+
{ location: location, unit: unit, temp: 26 }
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
tool_messages = res.functions_run_all(context: WeatherContext.new)
|
148
|
+
|
149
|
+
# tool_messages é uma lista de hashes com:
|
150
|
+
# :tool_call_id, :role=>:tool, :name, :content (string/json)
|
151
|
+
# Em Chat Completions, você pode passar estes objetos diretamente em messages.
|
152
|
+
# Na Responses API, converta-os para input items compatíveis (ex.: tool_result).
|
153
|
+
p tool_messages
|
154
|
+
end
|
155
|
+
```
|
data/lib/gpt/client.rb
CHANGED
@@ -5,10 +5,10 @@ module GPT
|
|
5
5
|
|
6
6
|
attr_reader :api_key, :base_url, :timeout, :organization, :project
|
7
7
|
|
8
|
-
def initialize(api_key: ENV['OPENAI_API_KEY'], base_url: nil, timeout: nil, organization: ENV['OPENAI_ORG_ID'], project: ENV['OPENAI_PROJECT_ID'])
|
8
|
+
def initialize(api_key: ENV['OPENAI_API_KEY'] || ENV['OPENAI_ACCESS_TOKEN'], base_url: nil, timeout: nil, organization: ENV['OPENAI_ORG_ID'] || ENV['OPENAI_ORGANIZATION_ID'], project: ENV['OPENAI_PROJECT_ID'])
|
9
9
|
@api_key = api_key
|
10
10
|
@base_url = base_url || DEFAULT_BASE_URL
|
11
|
-
@timeout = (timeout || DEFAULT_TIMEOUT).to_i
|
11
|
+
@timeout = (timeout || ENV['OPENAI_REQUEST_TIMEOUT'] || DEFAULT_TIMEOUT).to_i
|
12
12
|
@organization = organization
|
13
13
|
@project = project
|
14
14
|
end
|
@@ -86,10 +86,13 @@ module GPT
|
|
86
86
|
end
|
87
87
|
|
88
88
|
def apply_headers(req)
|
89
|
+
if !api_key || api_key.empty?
|
90
|
+
raise GPT::Error.new('Defina OPENAI_API_KEY ou OPENAI_ACCESS_TOKEN')
|
91
|
+
end
|
89
92
|
req['Authorization'] = "Bearer #{api_key}"
|
90
93
|
req['OpenAI-Organization'] = organization if organization && !organization.empty?
|
91
94
|
req['OpenAI-Project'] = project if project && !project.empty?
|
92
|
-
req['User-Agent'] =
|
95
|
+
req['User-Agent'] = "gpt-ruby/#{GPT::VERSION}"
|
93
96
|
end
|
94
97
|
|
95
98
|
def parse_response(res)
|
@@ -117,5 +120,3 @@ module GPT
|
|
117
120
|
end
|
118
121
|
end
|
119
122
|
end
|
120
|
-
|
121
|
-
|
data/lib/gpt/error.rb
CHANGED
@@ -0,0 +1,104 @@
|
|
1
|
+
module GPT
|
2
|
+
module ResponseExtender
|
3
|
+
def message
|
4
|
+
if self['choices']
|
5
|
+
dig('choices', 0, 'message') || {}
|
6
|
+
else
|
7
|
+
output_message = if self['output'].is_a?(Array)
|
8
|
+
self['output'].find { |i| i['type'] == 'message' } || self['output'].first
|
9
|
+
end
|
10
|
+
output_message || {}
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def content
|
15
|
+
if self['choices']
|
16
|
+
dig('choices', 0, 'message', 'content')
|
17
|
+
else
|
18
|
+
msg = message
|
19
|
+
contents = msg && msg['content']
|
20
|
+
if contents.is_a?(Array)
|
21
|
+
text_item = contents.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
|
22
|
+
return text_item['text'] if text_item && text_item['text'] && !text_item['text'].empty?
|
23
|
+
end
|
24
|
+
if self['output'].is_a?(Array)
|
25
|
+
text_item = self['output'].find { |i| i['type'] == 'output_text' || i['type'] == 'text' }
|
26
|
+
return text_item['text'] if text_item && text_item['text'] && !text_item['text'].empty?
|
27
|
+
end
|
28
|
+
if self['output_text'].is_a?(String) && !self['output_text'].empty?
|
29
|
+
return self['output_text']
|
30
|
+
end
|
31
|
+
if self['content'].is_a?(String) && !self['content'].empty?
|
32
|
+
return self['content']
|
33
|
+
end
|
34
|
+
nil
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def content?
|
39
|
+
!content.nil? && !content.empty?
|
40
|
+
end
|
41
|
+
|
42
|
+
def usage
|
43
|
+
self['usage'] || {}
|
44
|
+
end
|
45
|
+
|
46
|
+
def prompt_tokens
|
47
|
+
usage['prompt_tokens'] || 0
|
48
|
+
end
|
49
|
+
|
50
|
+
def completion_tokens
|
51
|
+
usage['completion_tokens'] || 0
|
52
|
+
end
|
53
|
+
|
54
|
+
def total_tokens
|
55
|
+
usage['total_tokens'] || 0
|
56
|
+
end
|
57
|
+
|
58
|
+
def model
|
59
|
+
self['model']
|
60
|
+
end
|
61
|
+
|
62
|
+
def created_at
|
63
|
+
if self['created']
|
64
|
+
Time.at(self['created'])
|
65
|
+
elsif self['created_at']
|
66
|
+
Time.at(self['created_at'])
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def functions
|
71
|
+
return [] unless tool_calls?
|
72
|
+
|
73
|
+
tool_functions = tool_calls.select { |tool| tool['type'] == 'function' }
|
74
|
+
return [] if tool_functions.empty?
|
75
|
+
|
76
|
+
tool_functions.map { |function| build_function_object(function) }
|
77
|
+
end
|
78
|
+
|
79
|
+
def functions?
|
80
|
+
functions.any?
|
81
|
+
end
|
82
|
+
|
83
|
+
def functions_run_all(context:)
|
84
|
+
raise OpenAIExt::FunctionExecutionError, 'No functions to execute' if functions.empty?
|
85
|
+
raise OpenAIExt::FunctionExecutionError, 'Context cannot be nil' if context.nil?
|
86
|
+
|
87
|
+
functions.map { |function| function.run(context: context) }
|
88
|
+
end
|
89
|
+
|
90
|
+
def to_h
|
91
|
+
{
|
92
|
+
content: content,
|
93
|
+
role: message['role'],
|
94
|
+
model: model,
|
95
|
+
usage: usage,
|
96
|
+
created_at: created_at
|
97
|
+
}.compact
|
98
|
+
end
|
99
|
+
|
100
|
+
def to_s
|
101
|
+
content || self['output_text'] || '[No content]'
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
data/lib/gpt/responses.rb
CHANGED
@@ -5,16 +5,20 @@ module GPT
|
|
5
5
|
end
|
6
6
|
|
7
7
|
def create(payload)
|
8
|
-
@client.json_post('/v1/responses', body: payload)
|
8
|
+
res = @client.json_post('/v1/responses', body: payload)
|
9
|
+
res.extend(GPT::ResponseExtender) if res.is_a?(Hash)
|
10
|
+
res
|
9
11
|
end
|
10
12
|
|
11
13
|
def get(response_id, include: nil, include_obfuscation: nil, starting_after: nil, stream: nil)
|
12
|
-
query =
|
13
|
-
query['include[]']
|
14
|
-
query['include_obfuscation'
|
15
|
-
query['starting_after'
|
16
|
-
query['stream'
|
17
|
-
@client.json_get("/v1/responses/#{response_id}", query: query)
|
14
|
+
query = []
|
15
|
+
Array(include).each { |v| query << ['include[]', v] } if include
|
16
|
+
query << ['include_obfuscation', include_obfuscation] unless include_obfuscation.nil?
|
17
|
+
query << ['starting_after', starting_after] if starting_after
|
18
|
+
query << ['stream', stream] unless stream.nil?
|
19
|
+
res = @client.json_get("/v1/responses/#{response_id}", query: query)
|
20
|
+
res.extend(GPT::ResponseExtender) if res.is_a?(Hash)
|
21
|
+
res
|
18
22
|
end
|
19
23
|
|
20
24
|
def delete(response_id)
|
@@ -42,6 +46,49 @@ module GPT
|
|
42
46
|
yield chunk if block_given?
|
43
47
|
end
|
44
48
|
end
|
49
|
+
|
50
|
+
def stream_text(payload)
|
51
|
+
buffer = ''.dup
|
52
|
+
stream(payload) do |chunk|
|
53
|
+
buffer << chunk
|
54
|
+
parts = buffer.split("\n\n", -1)
|
55
|
+
buffer = parts.pop || ''.dup
|
56
|
+
parts.each do |raw_event|
|
57
|
+
lines = raw_event.split("\n")
|
58
|
+
event_name = nil
|
59
|
+
data_lines = []
|
60
|
+
lines.each do |line|
|
61
|
+
if line.start_with?('event:')
|
62
|
+
event_name = line.sub('event:', '').strip
|
63
|
+
elsif line.start_with?('data:')
|
64
|
+
data_lines << line.sub('data:', '').strip
|
65
|
+
end
|
66
|
+
end
|
67
|
+
next if data_lines.empty?
|
68
|
+
data = data_lines.join("\n")
|
69
|
+
next if data == '[DONE]'
|
70
|
+
begin
|
71
|
+
json = Oj.load(data)
|
72
|
+
rescue Oj::ParseError
|
73
|
+
next
|
74
|
+
end
|
75
|
+
case event_name
|
76
|
+
when 'response.output_text.delta'
|
77
|
+
delta = json['delta']
|
78
|
+
yield delta if delta && !delta.empty?
|
79
|
+
when 'response.delta'
|
80
|
+
delta = json.dig('delta', 'content')
|
81
|
+
if delta.is_a?(Array)
|
82
|
+
text_piece = delta.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
|
83
|
+
yield(text_piece['text']) if text_piece && text_piece['text'] && !text_piece['text'].empty?
|
84
|
+
end
|
85
|
+
else
|
86
|
+
# ignore other events
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
true
|
91
|
+
end
|
45
92
|
end
|
46
93
|
end
|
47
94
|
|
data/lib/gpt.rb
CHANGED
@@ -1,10 +1,14 @@
|
|
1
|
+
module GPT; end
|
2
|
+
|
1
3
|
require 'oj'
|
2
4
|
require 'net/http'
|
3
5
|
require 'uri'
|
4
6
|
|
7
|
+
require_relative 'gpt/version'
|
5
8
|
require_relative 'gpt/error'
|
6
9
|
require_relative 'gpt/client'
|
7
10
|
require_relative 'gpt/responses'
|
11
|
+
require_relative 'gpt/response_extender'
|
8
12
|
|
9
13
|
module GPT
|
10
14
|
def self.client
|
@@ -14,6 +18,284 @@ module GPT
|
|
14
18
|
def self.responses
|
15
19
|
@responses ||= Responses.new(client)
|
16
20
|
end
|
21
|
+
|
22
|
+
def self.deep_stringify(value)
|
23
|
+
case value
|
24
|
+
when Hash
|
25
|
+
value.each_with_object({}) { |(k, v), acc| acc[k.to_s] = deep_stringify(v) }
|
26
|
+
when Array
|
27
|
+
value.map { |v| deep_stringify(v) }
|
28
|
+
when Symbol
|
29
|
+
value.to_s
|
30
|
+
else
|
31
|
+
value
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.normalize_tools(tools)
|
36
|
+
return tools unless tools.is_a?(Array)
|
37
|
+
tools.map do |tool|
|
38
|
+
next tool unless tool.is_a?(Hash)
|
39
|
+
t = deep_stringify(tool)
|
40
|
+
if t['type'].to_s == 'function'
|
41
|
+
fn = t['function'] || {}
|
42
|
+
t['name'] = t['name'] || fn['name']
|
43
|
+
t['description'] = t['description'] || fn['description']
|
44
|
+
t['parameters'] = t['parameters'] || fn['parameters']
|
45
|
+
# Remover strict se for nil para evitar problemas
|
46
|
+
if t.key?('strict') || fn.key?('strict')
|
47
|
+
t['strict'] = t.key?('strict') ? t['strict'] : fn['strict']
|
48
|
+
end
|
49
|
+
t.delete('function')
|
50
|
+
end
|
51
|
+
t
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def self.ask(prompt, model: 'gpt-5', stream: false, text_stream: false, **opts, &block)
|
56
|
+
internal_tools_context = opts.key?(:tools_context) ? opts.delete(:tools_context) : opts.delete('tools_context')
|
57
|
+
internal_max_iters = opts.key?(:max_tool_iterations) ? opts.delete(:max_tool_iterations) : opts.delete('max_tool_iterations')
|
58
|
+
# Se tools_context foi fornecido, ativa a execução automática de tools
|
59
|
+
internal_auto = !!internal_tools_context
|
60
|
+
|
61
|
+
payload = { 'model' => model, 'input' => prompt }
|
62
|
+
opts.each { |k, v| payload[k.to_s] = v }
|
63
|
+
payload = deep_stringify(payload)
|
64
|
+
if payload['tools']
|
65
|
+
payload['tools'] = normalize_tools(payload['tools'])
|
66
|
+
end
|
67
|
+
|
68
|
+
if internal_auto
|
69
|
+
return ask_with_auto_tools(payload, prompt: prompt, tools_context: internal_tools_context, max_tool_iterations: internal_max_iters)
|
70
|
+
end
|
71
|
+
|
72
|
+
if stream
|
73
|
+
responses.stream(payload) { |chunk| yield chunk if block_given? }
|
74
|
+
elsif text_stream
|
75
|
+
responses.stream_text(payload) { |text| yield text if block_given? }
|
76
|
+
else
|
77
|
+
res = responses.create(payload)
|
78
|
+
res.extend(ResponseExtender)
|
79
|
+
normalize_response_output!(res)
|
80
|
+
wait_for_response_if_needed(res)
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.ask_with_auto_tools(base_payload, prompt:, tools_context:, max_tool_iterations: 5)
|
85
|
+
raise GPT::Error.new('auto_tools requer tools') unless base_payload['tools']
|
86
|
+
raise GPT::Error.new('auto_tools requer tools_context') unless tools_context
|
87
|
+
|
88
|
+
res = responses.create(base_payload)
|
89
|
+
res.extend(ResponseExtender)
|
90
|
+
normalize_response_output!(res)
|
91
|
+
res = wait_for_response_if_needed(res)
|
92
|
+
|
93
|
+
iterations = 0
|
94
|
+
last_results = []
|
95
|
+
loop do
|
96
|
+
iterations += 1
|
97
|
+
break if iterations > (max_tool_iterations || 5)
|
98
|
+
|
99
|
+
tool_calls = extract_tool_calls(res)
|
100
|
+
break if tool_calls.empty?
|
101
|
+
|
102
|
+
results = tool_calls.map do |c|
|
103
|
+
args = c['arguments'].is_a?(String) ? safe_parse_json(c['arguments']) : c['arguments']
|
104
|
+
args = args.is_a?(Hash) ? symbolize_keys(args) : {}
|
105
|
+
output = execute_tool(tools_context, c['name'], args)
|
106
|
+
build_tool_result(c['id'], c['name'], output)
|
107
|
+
end
|
108
|
+
last_results = results
|
109
|
+
|
110
|
+
# Construir uma mensagem completa com todo o contexto
|
111
|
+
tool_calls_description = tool_calls.map do |tc|
|
112
|
+
args = tc['arguments'].is_a?(String) ? tc['arguments'] : Oj.dump(tc['arguments'])
|
113
|
+
"Chamada de ferramenta: #{tc['name']} com argumentos: #{args}"
|
114
|
+
end.join("\n")
|
115
|
+
|
116
|
+
tool_results_text = results.map do |r|
|
117
|
+
text = r.dig('content', 0, 'text') || ''
|
118
|
+
"Resultado de #{r['name']}: #{text}"
|
119
|
+
end.join("\n")
|
120
|
+
|
121
|
+
# Criar um prompt completo com todo o contexto
|
122
|
+
full_context = [
|
123
|
+
"Usuário solicitou: #{prompt}",
|
124
|
+
"",
|
125
|
+
"Você chamou as seguintes ferramentas:",
|
126
|
+
tool_calls_description,
|
127
|
+
"",
|
128
|
+
"As ferramentas retornaram os seguintes resultados:",
|
129
|
+
tool_results_text,
|
130
|
+
"",
|
131
|
+
"Agora, forneça uma resposta completa e útil ao usuário baseada nos resultados das ferramentas executadas."
|
132
|
+
].join("\n")
|
133
|
+
|
134
|
+
# Manter as tools definidas para contexto, mas enviar como input simples
|
135
|
+
payload = base_payload.dup
|
136
|
+
payload['input'] = full_context
|
137
|
+
res = responses.create(payload)
|
138
|
+
res.extend(ResponseExtender)
|
139
|
+
normalize_response_output!(res)
|
140
|
+
res = wait_for_response_if_needed(res)
|
141
|
+
end
|
142
|
+
|
143
|
+
if (!res['output_text'] || res['output_text'].empty?) && last_results.is_a?(Array) && last_results.any?
|
144
|
+
texts = last_results.map { |r| r.dig('content', 0, 'text') }.compact.join
|
145
|
+
res['output_text'] = texts unless texts.empty?
|
146
|
+
end
|
147
|
+
res
|
148
|
+
end
|
149
|
+
|
150
|
+
def self.normalize_tool_call(call)
|
151
|
+
return call unless call.is_a?(Hash)
|
152
|
+
if call['type'] == 'function_call'
|
153
|
+
{
|
154
|
+
'id' => call['call_id'] || call['id'],
|
155
|
+
'type' => 'tool_call',
|
156
|
+
'name' => call['name'],
|
157
|
+
'arguments' => call['arguments']
|
158
|
+
}
|
159
|
+
else
|
160
|
+
call
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def self.extract_tool_calls(res)
|
165
|
+
if res.is_a?(Hash)
|
166
|
+
output = res['output']
|
167
|
+
if output.is_a?(Array)
|
168
|
+
calls = output.select { |i| i.is_a?(Hash) && (i['type'] == 'tool_call' || i['type'] == 'function_call') }
|
169
|
+
return calls.map { |c| normalize_tool_call(c) } if calls.any?
|
170
|
+
end
|
171
|
+
if res['choices'].is_a?(Array)
|
172
|
+
msg = res.dig('choices', 0, 'message') || {}
|
173
|
+
calls = msg['tool_calls']
|
174
|
+
if calls.is_a?(Array)
|
175
|
+
return calls.map do |c|
|
176
|
+
{
|
177
|
+
'id' => c['id'],
|
178
|
+
'type' => 'tool_call',
|
179
|
+
'name' => c.dig('function', 'name') || c['name'],
|
180
|
+
'arguments' => c.dig('function', 'arguments') || c['arguments']
|
181
|
+
}
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
186
|
+
[]
|
187
|
+
end
|
188
|
+
|
189
|
+
def self.build_tool_result(tool_call_id, name, output)
|
190
|
+
{
|
191
|
+
'type' => 'tool_result',
|
192
|
+
'tool_call_id' => tool_call_id,
|
193
|
+
'name' => name,
|
194
|
+
'content' => [{ 'type' => 'output_text', 'text' => output.to_s }]
|
195
|
+
}
|
196
|
+
end
|
197
|
+
|
198
|
+
def self.safe_parse_json(str)
|
199
|
+
return {} unless str.is_a?(String)
|
200
|
+
Oj.load(str)
|
201
|
+
rescue Oj::ParseError
|
202
|
+
{}
|
203
|
+
end
|
204
|
+
|
205
|
+
def self.symbolize_keys(h)
|
206
|
+
h.each_with_object({}) { |(k, v), acc| acc[(k.to_sym rescue k)] = v }
|
207
|
+
end
|
208
|
+
|
209
|
+
def self.execute_tool(ctx, name, args)
|
210
|
+
if ctx.respond_to?(name)
|
211
|
+
ctx.public_send(name, **args)
|
212
|
+
else
|
213
|
+
raise GPT::Error.new("Ferramenta não encontrada: #{name}")
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
def self.normalize_response_output!(res)
|
218
|
+
return unless res.is_a?(Hash)
|
219
|
+
return if res['output_text'].is_a?(String) && !res['output_text'].empty?
|
220
|
+
# 1) Responses API: output array
|
221
|
+
if res['output'].is_a?(Array)
|
222
|
+
texts = res['output'].map do |i|
|
223
|
+
if i.is_a?(Hash)
|
224
|
+
if i['type'] == 'output_text' || i['type'] == 'text'
|
225
|
+
i['text']
|
226
|
+
elsif i['type'] == 'message'
|
227
|
+
content = i['content']
|
228
|
+
if content.is_a?(Array)
|
229
|
+
item = content.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
|
230
|
+
item && item['text']
|
231
|
+
end
|
232
|
+
end
|
233
|
+
end
|
234
|
+
end.compact
|
235
|
+
combined = texts.join
|
236
|
+
res['output_text'] = combined unless combined.empty?
|
237
|
+
end
|
238
|
+
# 2) Chat style: choices[0].message.content can be string or array
|
239
|
+
if (!res['output_text'] || res['output_text'].empty?) && res['choices'].is_a?(Array)
|
240
|
+
msg = res.dig('choices', 0, 'message') || {}
|
241
|
+
if msg['content'].is_a?(String)
|
242
|
+
res['output_text'] = msg['content'] unless msg['content'].empty?
|
243
|
+
elsif msg['content'].is_a?(Array)
|
244
|
+
item = msg['content'].find { |c| c['type'] == 'text' || c['type'] == 'output_text' }
|
245
|
+
res['output_text'] = item['text'] if item && item['text'] && !item['text'].empty?
|
246
|
+
end
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
def self.wait_for_response_if_needed(res, poll_interval: 0.5, timeout_s: 60)
|
251
|
+
start_time = Time.now
|
252
|
+
loop do
|
253
|
+
if response_has_content_or_calls?(res)
|
254
|
+
return res
|
255
|
+
end
|
256
|
+
id = res['id']
|
257
|
+
break unless id
|
258
|
+
if Time.now - start_time > timeout_s
|
259
|
+
puts "DEBUG wait: Timeout atingido" if ENV['DEBUG_WAIT']
|
260
|
+
break
|
261
|
+
end
|
262
|
+
sleep poll_interval
|
263
|
+
refreshed = responses.get(id, include: ['output'])
|
264
|
+
refreshed.extend(ResponseExtender) if refreshed.is_a?(Hash)
|
265
|
+
normalize_response_output!(refreshed)
|
266
|
+
res = refreshed
|
267
|
+
end
|
268
|
+
res
|
269
|
+
end
|
270
|
+
|
271
|
+
def self.response_has_content_or_calls?(res)
|
272
|
+
return false unless res.is_a?(Hash)
|
273
|
+
ot = res['output_text']
|
274
|
+
return true if ot.is_a?(String) && !ot.empty?
|
275
|
+
out = res['output']
|
276
|
+
if out.is_a?(Array)
|
277
|
+
has_text = out.any? { |i| i.is_a?(Hash) && ((i['type'] == 'output_text' || i['type'] == 'text') && i['text'] && !i['text'].empty?) }
|
278
|
+
return true if has_text
|
279
|
+
has_message_text = out.any? do |i|
|
280
|
+
i.is_a?(Hash) && i['type'] == 'message' && i['content'].is_a?(Array) && i['content'].any? { |c| (c['type'] == 'text' || c['type'] == 'output_text') && c['text'] && !c['text'].empty? }
|
281
|
+
end
|
282
|
+
return true if has_message_text
|
283
|
+
has_calls = out.any? { |i| i.is_a?(Hash) && (i['type'] == 'tool_call' || i['type'] == 'function_call') }
|
284
|
+
return true if has_calls
|
285
|
+
end
|
286
|
+
if res['choices'].is_a?(Array)
|
287
|
+
msg = res.dig('choices', 0, 'message') || {}
|
288
|
+
cont = msg['content']
|
289
|
+
return true if cont.is_a?(String) && !cont.empty?
|
290
|
+
if cont.is_a?(Array)
|
291
|
+
item = cont.find { |c| (c['type'] == 'text' || c['type'] == 'output_text') && c['text'] && !c['text'].empty? }
|
292
|
+
return true if item
|
293
|
+
end
|
294
|
+
end
|
295
|
+
status = res['status']
|
296
|
+
return true if %w[completed failed cancelled error errored].include?(status.to_s)
|
297
|
+
false
|
298
|
+
end
|
17
299
|
end
|
18
300
|
|
19
301
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: gpt
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Gedean Dias
|
@@ -93,8 +93,8 @@ dependencies:
|
|
93
93
|
- - "~>"
|
94
94
|
- !ruby/object:Gem::Version
|
95
95
|
version: '0.9'
|
96
|
-
description:
|
97
|
-
|
96
|
+
description: Cliente Ruby simples para a Responses API com suporte a reasoning, tools
|
97
|
+
(inclui autoexec) e chat.
|
98
98
|
email: gedean.dias@gmail.com
|
99
99
|
executables: []
|
100
100
|
extensions: []
|
@@ -106,7 +106,9 @@ files:
|
|
106
106
|
- lib/gpt.rb
|
107
107
|
- lib/gpt/client.rb
|
108
108
|
- lib/gpt/error.rb
|
109
|
+
- lib/gpt/response_extender.rb
|
109
110
|
- lib/gpt/responses.rb
|
111
|
+
- lib/gpt/version.rb
|
110
112
|
homepage: https://github.com/gedean/openaiext
|
111
113
|
licenses:
|
112
114
|
- MIT
|
@@ -127,5 +129,5 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
127
129
|
requirements: []
|
128
130
|
rubygems_version: 3.7.1
|
129
131
|
specification_version: 4
|
130
|
-
summary: GPT
|
132
|
+
summary: Cliente Ruby para GPT-5 (Responses API)
|
131
133
|
test_files: []
|