gpt 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 840186b5978b9ff54b0aa11256b238bfe03dba6d50020e70ace54dc9ad69e95c
4
- data.tar.gz: 63ea299d5a2d5f0c119e99605cde359294bc60de72fe84eadfd704288c03eb06
3
+ metadata.gz: b725ab7bc465dae82acf06dff54ba12d1199ddd6b21c90d8107839c709da7ba9
4
+ data.tar.gz: 0b6ef69bac8fcaf22565aedbb3fff199a65957ce08514d7f85aff5fc2f3341ed
5
5
  SHA512:
6
- metadata.gz: 7aa46b3f49131432ee7e33303d9581a391d7eea15e75b9ff7e5e1ef78ed33a5c720f337cdafb9641d62a8a11bac427b222fcaa8e9d473f4040559cd7c6e8769c
7
- data.tar.gz: f8a4865378bd4981362d751e9c4140618bb2451b90c408a92a3ba95ee391138168c3fe26dcbe73211b867d7547b3ac6803f57b5a989d19e4021488e4c5365e74
6
+ metadata.gz: 9c68b407adf9d726b1bfc215c7523663be86cc0749337618c66d52679b628128a22f7d4ecff9cefd403e023a8b7cd65ad4db849898ff9a367ffe40e28f8a68b0
7
+ data.tar.gz: 213b43fb66a3ddd8e6d237a01f7aaefdff14a25100332db368c3801bf1967ebf4c3acd6d074fa7762d3326bc062303e831809b49d6065f50b6245c45d8813c66
data/README.md CHANGED
@@ -3,8 +3,8 @@
3
3
  Cliente Ruby simples para a Responses API, com foco no GPT-5, com uma API de alto nível inspirada no OpenAIExt.
4
4
 
5
5
  ## Instalação
6
- ```
7
- bash build_and_install.sh
6
+ ```bash
7
+ gem install gpt
8
8
  ```
9
9
 
10
10
  ## Configuração
@@ -104,3 +104,52 @@ res.model
104
104
  res.total_tokens
105
105
  res.to_h
106
106
  ```
107
+
108
+ ## Function calling
109
+ ```ruby
110
+ require 'gpt'
111
+
112
+ # Passe ferramentas diretamente para GPT.ask
113
+ res = GPT.ask(
114
+ 'Como está o tempo em São Paulo?',
115
+ model: 'gpt-5',
116
+ tools: [
117
+ {
118
+ 'type' => 'function',
119
+ 'name' => 'get_weather',
120
+ 'description' => 'Obter clima atual',
121
+ 'parameters' => {
122
+ 'type' => 'object',
123
+ 'properties' => {
124
+ 'location' => { 'type' => 'string' },
125
+ 'unit' => { 'type' => 'string', 'enum' => ['celsius', 'fahrenheit'] }
126
+ },
127
+ 'required' => ['location']
128
+ }
129
+ }
130
+ ]
131
+ )
132
+
133
+ puts res.content
134
+ ```
135
+
136
+ ### Executando funções chamadas pelo modelo
137
+ ```ruby
138
+ # Se o modelo decidir acionar uma função, você pode inspecionar e executar:
139
+ if res.functions?
140
+ # Exemplo de contexto com um método compatível com o nome da função
141
+ class WeatherContext
142
+ def get_weather(location:, unit: 'celsius')
143
+ { location: location, unit: unit, temp: 26 }
144
+ end
145
+ end
146
+
147
+ tool_messages = res.functions_run_all(context: WeatherContext.new)
148
+
149
+ # tool_messages é uma lista de hashes com:
150
+ # :tool_call_id, :role=>:tool, :name, :content (string/json)
151
+ # Em Chat Completions, você pode passar estes objetos diretamente em messages.
152
+ # Na Responses API, converta-os para input items compatíveis (ex.: tool_result).
153
+ p tool_messages
154
+ end
155
+ ```
data/lib/gpt/client.rb CHANGED
@@ -120,5 +120,3 @@ module GPT
120
120
  end
121
121
  end
122
122
  end
123
-
124
-
data/lib/gpt/error.rb CHANGED
@@ -9,4 +9,4 @@ class GPT::Error < StandardError
9
9
  end
10
10
  end
11
11
 
12
-
12
+ class GPT::FunctionExecutionError < GPT::Error; end
@@ -17,9 +17,21 @@ module GPT
17
17
  else
18
18
  msg = message
19
19
  contents = msg && msg['content']
20
- return nil unless contents.is_a?(Array)
21
- text_item = contents.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
22
- text_item && text_item['text']
20
+ if contents.is_a?(Array)
21
+ text_item = contents.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
22
+ return text_item['text'] if text_item && text_item['text'] && !text_item['text'].empty?
23
+ end
24
+ if self['output'].is_a?(Array)
25
+ text_item = self['output'].find { |i| i['type'] == 'output_text' || i['type'] == 'text' }
26
+ return text_item['text'] if text_item && text_item['text'] && !text_item['text'].empty?
27
+ end
28
+ if self['output_text'].is_a?(String) && !self['output_text'].empty?
29
+ return self['output_text']
30
+ end
31
+ if self['content'].is_a?(String) && !self['content'].empty?
32
+ return self['content']
33
+ end
34
+ nil
23
35
  end
24
36
  end
25
37
 
@@ -54,6 +66,26 @@ module GPT
54
66
  Time.at(self['created_at'])
55
67
  end
56
68
  end
69
+
70
+ def functions
71
+ return [] unless tool_calls?
72
+
73
+ tool_functions = tool_calls.select { |tool| tool['type'] == 'function' }
74
+ return [] if tool_functions.empty?
75
+
76
+ tool_functions.map { |function| build_function_object(function) }
77
+ end
78
+
79
+ def functions?
80
+ functions.any?
81
+ end
82
+
83
+ def functions_run_all(context:)
84
+ raise OpenAIExt::FunctionExecutionError, 'No functions to execute' if functions.empty?
85
+ raise OpenAIExt::FunctionExecutionError, 'Context cannot be nil' if context.nil?
86
+
87
+ functions.map { |function| function.run(context: context) }
88
+ end
57
89
 
58
90
  def to_h
59
91
  {
@@ -66,9 +98,7 @@ module GPT
66
98
  end
67
99
 
68
100
  def to_s
69
- content || '[No content]'
101
+ content || self['output_text'] || '[No content]'
70
102
  end
71
103
  end
72
104
  end
73
-
74
-
data/lib/gpt/responses.rb CHANGED
@@ -11,11 +11,11 @@ module GPT
11
11
  end
12
12
 
13
13
  def get(response_id, include: nil, include_obfuscation: nil, starting_after: nil, stream: nil)
14
- query = {}
15
- query['include[]'] = include if include
16
- query['include_obfuscation'] = include_obfuscation unless include_obfuscation.nil?
17
- query['starting_after'] = starting_after if starting_after
18
- query['stream'] = stream unless stream.nil?
14
+ query = []
15
+ Array(include).each { |v| query << ['include[]', v] } if include
16
+ query << ['include_obfuscation', include_obfuscation] unless include_obfuscation.nil?
17
+ query << ['starting_after', starting_after] if starting_after
18
+ query << ['stream', stream] unless stream.nil?
19
19
  res = @client.json_get("/v1/responses/#{response_id}", query: query)
20
20
  res.extend(GPT::ResponseExtender) if res.is_a?(Hash)
21
21
  res
data/lib/gpt/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  module GPT
2
- VERSION = '0.1.1'
2
+ VERSION = '0.2.0'
3
3
  end
4
4
 
5
5
 
data/lib/gpt.rb CHANGED
@@ -19,22 +19,282 @@ module GPT
19
19
  @responses ||= Responses.new(client)
20
20
  end
21
21
 
22
+ def self.deep_stringify(value)
23
+ case value
24
+ when Hash
25
+ value.each_with_object({}) { |(k, v), acc| acc[k.to_s] = deep_stringify(v) }
26
+ when Array
27
+ value.map { |v| deep_stringify(v) }
28
+ when Symbol
29
+ value.to_s
30
+ else
31
+ value
32
+ end
33
+ end
34
+
35
+ def self.normalize_tools(tools)
36
+ return tools unless tools.is_a?(Array)
37
+ tools.map do |tool|
38
+ next tool unless tool.is_a?(Hash)
39
+ t = deep_stringify(tool)
40
+ if t['type'].to_s == 'function'
41
+ fn = t['function'] || {}
42
+ t['name'] = t['name'] || fn['name']
43
+ t['description'] = t['description'] || fn['description']
44
+ t['parameters'] = t['parameters'] || fn['parameters']
45
+ # Remover strict se for nil para evitar problemas
46
+ if t.key?('strict') || fn.key?('strict')
47
+ t['strict'] = t.key?('strict') ? t['strict'] : fn['strict']
48
+ end
49
+ t.delete('function')
50
+ end
51
+ t
52
+ end
53
+ end
54
+
22
55
  def self.ask(prompt, model: 'gpt-5', stream: false, text_stream: false, **opts, &block)
56
+ internal_tools_context = opts.key?(:tools_context) ? opts.delete(:tools_context) : opts.delete('tools_context')
57
+ internal_max_iters = opts.key?(:max_tool_iterations) ? opts.delete(:max_tool_iterations) : opts.delete('max_tool_iterations')
58
+ # Se tools_context foi fornecido, ativa a execução automática de tools
59
+ internal_auto = !!internal_tools_context
60
+
23
61
  payload = { 'model' => model, 'input' => prompt }
24
62
  opts.each { |k, v| payload[k.to_s] = v }
63
+ payload = deep_stringify(payload)
64
+ if payload['tools']
65
+ payload['tools'] = normalize_tools(payload['tools'])
66
+ end
67
+
68
+ if internal_auto
69
+ return ask_with_auto_tools(payload, prompt: prompt, tools_context: internal_tools_context, max_tool_iterations: internal_max_iters)
70
+ end
71
+
25
72
  if stream
26
- responses.stream(payload) do |chunk|
27
- yield chunk if block_given?
28
- end
73
+ responses.stream(payload) { |chunk| yield chunk if block_given? }
29
74
  elsif text_stream
30
- responses.stream_text(payload) do |text|
31
- yield text if block_given?
32
- end
75
+ responses.stream_text(payload) { |text| yield text if block_given? }
33
76
  else
34
77
  res = responses.create(payload)
35
78
  res.extend(ResponseExtender)
36
- res
79
+ normalize_response_output!(res)
80
+ wait_for_response_if_needed(res)
81
+ end
82
+ end
83
+
84
+ def self.ask_with_auto_tools(base_payload, prompt:, tools_context:, max_tool_iterations: 5)
85
+ raise GPT::Error.new('auto_tools requer tools') unless base_payload['tools']
86
+ raise GPT::Error.new('auto_tools requer tools_context') unless tools_context
87
+
88
+ res = responses.create(base_payload)
89
+ res.extend(ResponseExtender)
90
+ normalize_response_output!(res)
91
+ res = wait_for_response_if_needed(res)
92
+
93
+ iterations = 0
94
+ last_results = []
95
+ loop do
96
+ iterations += 1
97
+ break if iterations > (max_tool_iterations || 5)
98
+
99
+ tool_calls = extract_tool_calls(res)
100
+ break if tool_calls.empty?
101
+
102
+ results = tool_calls.map do |c|
103
+ args = c['arguments'].is_a?(String) ? safe_parse_json(c['arguments']) : c['arguments']
104
+ args = args.is_a?(Hash) ? symbolize_keys(args) : {}
105
+ output = execute_tool(tools_context, c['name'], args)
106
+ build_tool_result(c['id'], c['name'], output)
107
+ end
108
+ last_results = results
109
+
110
+ # Construir uma mensagem completa com todo o contexto
111
+ tool_calls_description = tool_calls.map do |tc|
112
+ args = tc['arguments'].is_a?(String) ? tc['arguments'] : Oj.dump(tc['arguments'])
113
+ "Chamada de ferramenta: #{tc['name']} com argumentos: #{args}"
114
+ end.join("\n")
115
+
116
+ tool_results_text = results.map do |r|
117
+ text = r.dig('content', 0, 'text') || ''
118
+ "Resultado de #{r['name']}: #{text}"
119
+ end.join("\n")
120
+
121
+ # Criar um prompt completo com todo o contexto
122
+ full_context = [
123
+ "Usuário solicitou: #{prompt}",
124
+ "",
125
+ "Você chamou as seguintes ferramentas:",
126
+ tool_calls_description,
127
+ "",
128
+ "As ferramentas retornaram os seguintes resultados:",
129
+ tool_results_text,
130
+ "",
131
+ "Agora, forneça uma resposta completa e útil ao usuário baseada nos resultados das ferramentas executadas."
132
+ ].join("\n")
133
+
134
+ # Manter as tools definidas para contexto, mas enviar como input simples
135
+ payload = base_payload.dup
136
+ payload['input'] = full_context
137
+ res = responses.create(payload)
138
+ res.extend(ResponseExtender)
139
+ normalize_response_output!(res)
140
+ res = wait_for_response_if_needed(res)
141
+ end
142
+
143
+ if (!res['output_text'] || res['output_text'].empty?) && last_results.is_a?(Array) && last_results.any?
144
+ texts = last_results.map { |r| r.dig('content', 0, 'text') }.compact.join
145
+ res['output_text'] = texts unless texts.empty?
146
+ end
147
+ res
148
+ end
149
+
150
+ def self.normalize_tool_call(call)
151
+ return call unless call.is_a?(Hash)
152
+ if call['type'] == 'function_call'
153
+ {
154
+ 'id' => call['call_id'] || call['id'],
155
+ 'type' => 'tool_call',
156
+ 'name' => call['name'],
157
+ 'arguments' => call['arguments']
158
+ }
159
+ else
160
+ call
161
+ end
162
+ end
163
+
164
+ def self.extract_tool_calls(res)
165
+ if res.is_a?(Hash)
166
+ output = res['output']
167
+ if output.is_a?(Array)
168
+ calls = output.select { |i| i.is_a?(Hash) && (i['type'] == 'tool_call' || i['type'] == 'function_call') }
169
+ return calls.map { |c| normalize_tool_call(c) } if calls.any?
170
+ end
171
+ if res['choices'].is_a?(Array)
172
+ msg = res.dig('choices', 0, 'message') || {}
173
+ calls = msg['tool_calls']
174
+ if calls.is_a?(Array)
175
+ return calls.map do |c|
176
+ {
177
+ 'id' => c['id'],
178
+ 'type' => 'tool_call',
179
+ 'name' => c.dig('function', 'name') || c['name'],
180
+ 'arguments' => c.dig('function', 'arguments') || c['arguments']
181
+ }
182
+ end
183
+ end
184
+ end
185
+ end
186
+ []
187
+ end
188
+
189
+ def self.build_tool_result(tool_call_id, name, output)
190
+ {
191
+ 'type' => 'tool_result',
192
+ 'tool_call_id' => tool_call_id,
193
+ 'name' => name,
194
+ 'content' => [{ 'type' => 'output_text', 'text' => output.to_s }]
195
+ }
196
+ end
197
+
198
+ def self.safe_parse_json(str)
199
+ return {} unless str.is_a?(String)
200
+ Oj.load(str)
201
+ rescue Oj::ParseError
202
+ {}
203
+ end
204
+
205
+ def self.symbolize_keys(h)
206
+ h.each_with_object({}) { |(k, v), acc| acc[(k.to_sym rescue k)] = v }
207
+ end
208
+
209
+ def self.execute_tool(ctx, name, args)
210
+ if ctx.respond_to?(name)
211
+ ctx.public_send(name, **args)
212
+ else
213
+ raise GPT::Error.new("Ferramenta não encontrada: #{name}")
214
+ end
215
+ end
216
+
217
+ def self.normalize_response_output!(res)
218
+ return unless res.is_a?(Hash)
219
+ return if res['output_text'].is_a?(String) && !res['output_text'].empty?
220
+ # 1) Responses API: output array
221
+ if res['output'].is_a?(Array)
222
+ texts = res['output'].map do |i|
223
+ if i.is_a?(Hash)
224
+ if i['type'] == 'output_text' || i['type'] == 'text'
225
+ i['text']
226
+ elsif i['type'] == 'message'
227
+ content = i['content']
228
+ if content.is_a?(Array)
229
+ item = content.find { |c| c['type'] == 'output_text' || c['type'] == 'text' }
230
+ item && item['text']
231
+ end
232
+ end
233
+ end
234
+ end.compact
235
+ combined = texts.join
236
+ res['output_text'] = combined unless combined.empty?
237
+ end
238
+ # 2) Chat style: choices[0].message.content can be string or array
239
+ if (!res['output_text'] || res['output_text'].empty?) && res['choices'].is_a?(Array)
240
+ msg = res.dig('choices', 0, 'message') || {}
241
+ if msg['content'].is_a?(String)
242
+ res['output_text'] = msg['content'] unless msg['content'].empty?
243
+ elsif msg['content'].is_a?(Array)
244
+ item = msg['content'].find { |c| c['type'] == 'text' || c['type'] == 'output_text' }
245
+ res['output_text'] = item['text'] if item && item['text'] && !item['text'].empty?
246
+ end
247
+ end
248
+ end
249
+
250
+ def self.wait_for_response_if_needed(res, poll_interval: 0.5, timeout_s: 60)
251
+ start_time = Time.now
252
+ loop do
253
+ if response_has_content_or_calls?(res)
254
+ return res
255
+ end
256
+ id = res['id']
257
+ break unless id
258
+ if Time.now - start_time > timeout_s
259
+ puts "DEBUG wait: Timeout atingido" if ENV['DEBUG_WAIT']
260
+ break
261
+ end
262
+ sleep poll_interval
263
+ refreshed = responses.get(id, include: ['output'])
264
+ refreshed.extend(ResponseExtender) if refreshed.is_a?(Hash)
265
+ normalize_response_output!(refreshed)
266
+ res = refreshed
267
+ end
268
+ res
269
+ end
270
+
271
+ def self.response_has_content_or_calls?(res)
272
+ return false unless res.is_a?(Hash)
273
+ ot = res['output_text']
274
+ return true if ot.is_a?(String) && !ot.empty?
275
+ out = res['output']
276
+ if out.is_a?(Array)
277
+ has_text = out.any? { |i| i.is_a?(Hash) && ((i['type'] == 'output_text' || i['type'] == 'text') && i['text'] && !i['text'].empty?) }
278
+ return true if has_text
279
+ has_message_text = out.any? do |i|
280
+ i.is_a?(Hash) && i['type'] == 'message' && i['content'].is_a?(Array) && i['content'].any? { |c| (c['type'] == 'text' || c['type'] == 'output_text') && c['text'] && !c['text'].empty? }
281
+ end
282
+ return true if has_message_text
283
+ has_calls = out.any? { |i| i.is_a?(Hash) && (i['type'] == 'tool_call' || i['type'] == 'function_call') }
284
+ return true if has_calls
285
+ end
286
+ if res['choices'].is_a?(Array)
287
+ msg = res.dig('choices', 0, 'message') || {}
288
+ cont = msg['content']
289
+ return true if cont.is_a?(String) && !cont.empty?
290
+ if cont.is_a?(Array)
291
+ item = cont.find { |c| (c['type'] == 'text' || c['type'] == 'output_text') && c['text'] && !c['text'].empty? }
292
+ return true if item
293
+ end
37
294
  end
295
+ status = res['status']
296
+ return true if %w[completed failed cancelled error errored].include?(status.to_s)
297
+ false
38
298
  end
39
299
  end
40
300
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gpt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Gedean Dias
@@ -93,8 +93,8 @@ dependencies:
93
93
  - - "~>"
94
94
  - !ruby/object:Gem::Version
95
95
  version: '0.9'
96
- description: Cliente Ruby simples para a Responses API com suporte aos recursos do
97
- GPT-5 (reasoning, verbosity, tools).
96
+ description: Cliente Ruby simples para a Responses API com suporte a reasoning, tools
97
+ (inclui autoexec) e chat.
98
98
  email: gedean.dias@gmail.com
99
99
  executables: []
100
100
  extensions: []