swarm_sdk 2.7.10 → 2.7.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/swarm_sdk/ruby_llm_patches/chat_callbacks_patch.rb +284 -0
- data/lib/swarm_sdk/ruby_llm_patches/configuration_patch.rb +41 -0
- data/lib/swarm_sdk/ruby_llm_patches/connection_patch.rb +47 -0
- data/lib/swarm_sdk/ruby_llm_patches/init.rb +41 -0
- data/lib/swarm_sdk/ruby_llm_patches/io_endpoint_patch.rb +40 -0
- data/lib/swarm_sdk/ruby_llm_patches/message_management_patch.rb +23 -0
- data/lib/swarm_sdk/ruby_llm_patches/responses_api_patch.rb +599 -0
- data/lib/swarm_sdk/ruby_llm_patches/tool_concurrency_patch.rb +218 -0
- data/lib/swarm_sdk/tools/delegate.rb +2 -2
- data/lib/swarm_sdk/version.rb +1 -1
- data/lib/swarm_sdk.rb +22 -2
- metadata +12 -18
|
@@ -0,0 +1,599 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Adds OpenAI Responses API support to RubyLLM
|
|
4
|
+
# - ResponsesSession class for session management
|
|
5
|
+
# - OpenAIResponses provider (uses v1/responses endpoint)
|
|
6
|
+
# - Chat methods: with_responses_api, restore_responses_session, responses_api_enabled?
|
|
7
|
+
#
|
|
8
|
+
# Fork Reference: Commit 5f77903
|
|
9
|
+
|
|
10
|
+
module RubyLLM
|
|
11
|
+
# Responses API specific errors
|
|
12
|
+
class ResponsesApiError < Error; end
|
|
13
|
+
class ResponseIdNotFoundError < ResponsesApiError; end
|
|
14
|
+
class ResponseFailedError < ResponsesApiError; end
|
|
15
|
+
class ResponseInProgressError < ResponsesApiError; end
|
|
16
|
+
class ResponseCancelledError < ResponsesApiError; end
|
|
17
|
+
class ResponseIncompleteError < ResponsesApiError; end
|
|
18
|
+
|
|
19
|
+
# Manages state for OpenAI Responses API stateful conversations.
|
|
20
|
+
# Tracks response IDs, session validity, and failure recovery.
|
|
21
|
+
class ResponsesSession
|
|
22
|
+
RESPONSE_ID_TTL = 300 # 5 minutes
|
|
23
|
+
MAX_FAILURES = 2
|
|
24
|
+
|
|
25
|
+
attr_reader :response_id, :last_activity, :failure_count
|
|
26
|
+
|
|
27
|
+
def initialize(response_id: nil, last_activity: nil, failure_count: 0, disabled: false)
|
|
28
|
+
@response_id = response_id
|
|
29
|
+
@last_activity = last_activity
|
|
30
|
+
@failure_count = failure_count
|
|
31
|
+
@disabled = disabled
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def reset!
|
|
35
|
+
@response_id = nil
|
|
36
|
+
@last_activity = nil
|
|
37
|
+
@failure_count = 0
|
|
38
|
+
@disabled = false
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def update(new_response_id)
|
|
42
|
+
@response_id = new_response_id
|
|
43
|
+
@last_activity = Time.now
|
|
44
|
+
@failure_count = 0
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def valid?
|
|
48
|
+
return false if @disabled
|
|
49
|
+
return false unless @response_id
|
|
50
|
+
return false unless @last_activity
|
|
51
|
+
|
|
52
|
+
(Time.now - @last_activity) < RESPONSE_ID_TTL
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def record_failure!
|
|
56
|
+
@failure_count += 1
|
|
57
|
+
|
|
58
|
+
if @failure_count >= MAX_FAILURES
|
|
59
|
+
@disabled = true
|
|
60
|
+
else
|
|
61
|
+
@response_id = nil
|
|
62
|
+
@last_activity = nil
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def disabled?
|
|
67
|
+
@disabled
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def to_h
|
|
71
|
+
{
|
|
72
|
+
response_id: @response_id,
|
|
73
|
+
last_activity: @last_activity&.iso8601,
|
|
74
|
+
failure_count: @failure_count,
|
|
75
|
+
disabled: @disabled,
|
|
76
|
+
}
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
def self.from_h(hash) # rubocop:disable Style/ClassMethodsDefinitions
|
|
80
|
+
hash = hash.transform_keys(&:to_sym)
|
|
81
|
+
last_activity = hash[:last_activity] ? Time.parse(hash[:last_activity]) : nil
|
|
82
|
+
|
|
83
|
+
new(
|
|
84
|
+
response_id: hash[:response_id],
|
|
85
|
+
last_activity: last_activity,
|
|
86
|
+
failure_count: hash[:failure_count] || 0,
|
|
87
|
+
disabled: hash[:disabled] || false,
|
|
88
|
+
)
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
module Providers
|
|
93
|
+
# OpenAI Responses API provider.
|
|
94
|
+
# Uses v1/responses endpoint instead of v1/chat/completions.
|
|
95
|
+
# Inherits from OpenAI and overrides only what differs.
|
|
96
|
+
class OpenAIResponses < OpenAI
|
|
97
|
+
attr_reader :responses_session, :responses_config
|
|
98
|
+
|
|
99
|
+
def initialize(config, responses_session = nil, responses_config = {})
|
|
100
|
+
@responses_session = responses_session || ResponsesSession.new
|
|
101
|
+
@responses_config = {
|
|
102
|
+
stateful: false,
|
|
103
|
+
store: true,
|
|
104
|
+
truncation: :disabled,
|
|
105
|
+
include: [],
|
|
106
|
+
}.merge(responses_config)
|
|
107
|
+
|
|
108
|
+
super(config)
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
# Override endpoint URL
|
|
112
|
+
def completion_url
|
|
113
|
+
"responses"
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Override complete to handle response ID failures
|
|
117
|
+
def complete(messages, tools:, temperature:, model:, params: {}, headers: {}, schema: nil, thinking: nil, &block)
|
|
118
|
+
super
|
|
119
|
+
rescue BadRequestError => e
|
|
120
|
+
raise unless response_id_not_found_error?(e)
|
|
121
|
+
|
|
122
|
+
handle_response_id_failure
|
|
123
|
+
retry
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
# Override render_payload for Responses API format
|
|
127
|
+
def render_payload(messages, tools:, temperature:, model:, stream: false, schema: nil, thinking: nil)
|
|
128
|
+
system_msgs, other_msgs = partition_messages(messages)
|
|
129
|
+
|
|
130
|
+
payload = build_base_payload(model, stream)
|
|
131
|
+
add_instructions(payload, system_msgs)
|
|
132
|
+
add_input(payload, other_msgs)
|
|
133
|
+
add_temperature(payload, temperature)
|
|
134
|
+
add_tools(payload, tools)
|
|
135
|
+
add_schema(payload, schema)
|
|
136
|
+
add_optional_parameters(payload)
|
|
137
|
+
add_stream_options(payload, stream)
|
|
138
|
+
|
|
139
|
+
payload
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
# Override parse_completion_response for Responses API format
|
|
143
|
+
def parse_completion_response(response)
|
|
144
|
+
data = response.body
|
|
145
|
+
return if data.nil? || !data.is_a?(Hash) || data.empty?
|
|
146
|
+
|
|
147
|
+
case data["status"]
|
|
148
|
+
when "completed"
|
|
149
|
+
parse_completed_response(data, response)
|
|
150
|
+
when "failed"
|
|
151
|
+
raise ResponseFailedError.new(response, data.dig("error", "message") || "Response failed")
|
|
152
|
+
when "in_progress", "queued"
|
|
153
|
+
raise ResponseInProgressError.new(response, "Response still processing: #{data["id"]}")
|
|
154
|
+
when "cancelled"
|
|
155
|
+
raise ResponseCancelledError.new(response, "Response was cancelled: #{data["id"]}")
|
|
156
|
+
when "incomplete"
|
|
157
|
+
parse_incomplete_response(data, response)
|
|
158
|
+
else
|
|
159
|
+
raise Error.new(response, data.dig("error", "message")) if data.dig("error", "message")
|
|
160
|
+
|
|
161
|
+
parse_completed_response(data, response)
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
# Override tool_for for flat format (not nested under 'function')
|
|
166
|
+
def tool_for(tool)
|
|
167
|
+
parameters_schema = parameters_schema_for(tool)
|
|
168
|
+
|
|
169
|
+
definition = {
|
|
170
|
+
type: "function",
|
|
171
|
+
name: tool.name,
|
|
172
|
+
description: tool.description,
|
|
173
|
+
parameters: parameters_schema,
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
return definition if tool.provider_params.empty?
|
|
177
|
+
|
|
178
|
+
RubyLLM::Utils.deep_merge(definition, tool.provider_params)
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Override build_chunk for Responses API streaming events
|
|
182
|
+
def build_chunk(data)
|
|
183
|
+
if responses_api_event?(data)
|
|
184
|
+
build_responses_chunk(data)
|
|
185
|
+
else
|
|
186
|
+
super
|
|
187
|
+
end
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
private
|
|
191
|
+
|
|
192
|
+
def stateful_mode?
|
|
193
|
+
@responses_config[:stateful] == true
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
def partition_messages(messages)
|
|
197
|
+
system_msgs = messages.select { |m| m.role == :system }
|
|
198
|
+
other_msgs = messages.reject { |m| m.role == :system }
|
|
199
|
+
[system_msgs, other_msgs]
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
def build_base_payload(model, stream)
|
|
203
|
+
{
|
|
204
|
+
model: model.id,
|
|
205
|
+
stream: stream,
|
|
206
|
+
store: @responses_config[:store],
|
|
207
|
+
}
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
def add_instructions(payload, system_msgs)
|
|
211
|
+
payload[:instructions] = format_instructions(system_msgs) if system_msgs.any?
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
def add_input(payload, other_msgs)
|
|
215
|
+
if stateful_mode? && @responses_session.valid?
|
|
216
|
+
payload[:previous_response_id] = @responses_session.response_id
|
|
217
|
+
payload[:input] = format_new_input_only(other_msgs)
|
|
218
|
+
else
|
|
219
|
+
payload[:input] = format_responses_input(other_msgs)
|
|
220
|
+
end
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
def add_temperature(payload, temperature)
|
|
224
|
+
payload[:temperature] = temperature unless temperature.nil?
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
def add_tools(payload, tools)
|
|
228
|
+
payload[:tools] = tools.map { |_, tool| tool_for(tool) } if tools.any?
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
def add_schema(payload, schema)
|
|
232
|
+
return unless schema
|
|
233
|
+
|
|
234
|
+
payload[:text] = {
|
|
235
|
+
format: {
|
|
236
|
+
type: "json_schema",
|
|
237
|
+
name: "response",
|
|
238
|
+
schema: schema,
|
|
239
|
+
strict: schema[:strict] != false,
|
|
240
|
+
},
|
|
241
|
+
}
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
# Responses API does not support stream_options (Chat Completions API only).
|
|
245
|
+
# Usage is included automatically in the response.completed streaming event.
|
|
246
|
+
def add_stream_options(payload, stream)
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
def response_id_not_found_error?(error)
|
|
250
|
+
return false unless @responses_session.response_id
|
|
251
|
+
|
|
252
|
+
error.message.include?("not found")
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
def handle_response_id_failure
|
|
256
|
+
@responses_session.record_failure!
|
|
257
|
+
|
|
258
|
+
if @responses_session.disabled?
|
|
259
|
+
RubyLLM.logger.warn("Responses API: Disabling stateful mode after repeated failures")
|
|
260
|
+
else
|
|
261
|
+
RubyLLM.logger.debug("Responses API: Response ID not found, retrying fresh")
|
|
262
|
+
end
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def format_instructions(system_messages)
|
|
266
|
+
system_messages.map { |m| m.content.to_s }.join("\n\n")
|
|
267
|
+
end
|
|
268
|
+
|
|
269
|
+
def format_responses_input(messages)
|
|
270
|
+
messages.each_with_object([]) do |msg, items|
|
|
271
|
+
case msg.role
|
|
272
|
+
when :user
|
|
273
|
+
items << {
|
|
274
|
+
type: "message",
|
|
275
|
+
role: "user",
|
|
276
|
+
content: format_input_content(msg.content),
|
|
277
|
+
}
|
|
278
|
+
when :assistant
|
|
279
|
+
format_assistant_input(msg, items)
|
|
280
|
+
when :tool
|
|
281
|
+
items << {
|
|
282
|
+
type: "function_call_output",
|
|
283
|
+
call_id: msg.tool_call_id,
|
|
284
|
+
output: msg.content.to_s,
|
|
285
|
+
}
|
|
286
|
+
end
|
|
287
|
+
end
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
def format_assistant_input(msg, items)
|
|
291
|
+
# Emit function_call items for each tool call (required before function_call_output)
|
|
292
|
+
if msg.tool_calls && !msg.tool_calls.empty?
|
|
293
|
+
msg.tool_calls.each_value do |tc|
|
|
294
|
+
items << {
|
|
295
|
+
type: "function_call",
|
|
296
|
+
call_id: tc.id,
|
|
297
|
+
name: tc.name,
|
|
298
|
+
arguments: tc.arguments.is_a?(String) ? tc.arguments : tc.arguments.to_json,
|
|
299
|
+
}
|
|
300
|
+
end
|
|
301
|
+
end
|
|
302
|
+
|
|
303
|
+
# Emit assistant message content if present
|
|
304
|
+
return if msg.content.nil? || msg.content.to_s.strip.empty?
|
|
305
|
+
|
|
306
|
+
items << {
|
|
307
|
+
type: "message",
|
|
308
|
+
role: "assistant",
|
|
309
|
+
content: format_output_content(msg.content),
|
|
310
|
+
}
|
|
311
|
+
end
|
|
312
|
+
|
|
313
|
+
def format_new_input_only(messages)
|
|
314
|
+
formatted = []
|
|
315
|
+
last_assistant_idx = messages.rindex { |msg| msg.role == :assistant }
|
|
316
|
+
|
|
317
|
+
if last_assistant_idx
|
|
318
|
+
new_messages = messages[(last_assistant_idx + 1)..]
|
|
319
|
+
new_messages.each do |msg|
|
|
320
|
+
case msg.role
|
|
321
|
+
when :tool
|
|
322
|
+
formatted << {
|
|
323
|
+
type: "function_call_output",
|
|
324
|
+
call_id: msg.tool_call_id,
|
|
325
|
+
output: msg.content.to_s,
|
|
326
|
+
}
|
|
327
|
+
when :user
|
|
328
|
+
formatted << {
|
|
329
|
+
type: "message",
|
|
330
|
+
role: "user",
|
|
331
|
+
content: format_input_content(msg.content),
|
|
332
|
+
}
|
|
333
|
+
end
|
|
334
|
+
end
|
|
335
|
+
else
|
|
336
|
+
messages.each do |msg|
|
|
337
|
+
next unless msg.role == :user
|
|
338
|
+
|
|
339
|
+
formatted << {
|
|
340
|
+
type: "message",
|
|
341
|
+
role: "user",
|
|
342
|
+
content: format_input_content(msg.content),
|
|
343
|
+
}
|
|
344
|
+
end
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
formatted
|
|
348
|
+
end
|
|
349
|
+
|
|
350
|
+
def format_input_content(content)
|
|
351
|
+
case content
|
|
352
|
+
when String
|
|
353
|
+
[{ type: "input_text", text: content }]
|
|
354
|
+
when Content
|
|
355
|
+
parts = []
|
|
356
|
+
parts << { type: "input_text", text: content.text } if content.text && !content.text.empty?
|
|
357
|
+
content.attachments.each do |attachment|
|
|
358
|
+
parts << format_input_attachment(attachment)
|
|
359
|
+
end
|
|
360
|
+
parts
|
|
361
|
+
when Content::Raw
|
|
362
|
+
content.value
|
|
363
|
+
else
|
|
364
|
+
[{ type: "input_text", text: content.to_s }]
|
|
365
|
+
end
|
|
366
|
+
end
|
|
367
|
+
|
|
368
|
+
def format_output_content(content)
|
|
369
|
+
if content.is_a?(String)
|
|
370
|
+
[{ type: "output_text", text: content }]
|
|
371
|
+
elsif content.is_a?(Content)
|
|
372
|
+
[{ type: "output_text", text: content.text || "" }]
|
|
373
|
+
else
|
|
374
|
+
[{ type: "output_text", text: content.to_s }]
|
|
375
|
+
end
|
|
376
|
+
end
|
|
377
|
+
|
|
378
|
+
def format_input_attachment(attachment)
|
|
379
|
+
case attachment.type
|
|
380
|
+
when :image
|
|
381
|
+
if attachment.url?
|
|
382
|
+
{ type: "input_image", image_url: attachment.source.to_s }
|
|
383
|
+
else
|
|
384
|
+
{ type: "input_image", image_url: attachment.for_llm }
|
|
385
|
+
end
|
|
386
|
+
when :file, :pdf
|
|
387
|
+
{ type: "input_file", file_data: attachment.encoded, filename: attachment.filename }
|
|
388
|
+
else
|
|
389
|
+
{ type: "input_text", text: "[Unsupported attachment: #{attachment.type}]" }
|
|
390
|
+
end
|
|
391
|
+
end
|
|
392
|
+
|
|
393
|
+
def add_optional_parameters(payload)
|
|
394
|
+
if @responses_config[:truncation] && @responses_config[:truncation] != :disabled
|
|
395
|
+
payload[:truncation] = @responses_config[:truncation].to_s
|
|
396
|
+
end
|
|
397
|
+
|
|
398
|
+
if @responses_config[:include] && !@responses_config[:include].empty?
|
|
399
|
+
payload[:include] = @responses_config[:include].map { |i| i.to_s.tr("_", ".") }
|
|
400
|
+
end
|
|
401
|
+
|
|
402
|
+
payload[:service_tier] = @responses_config[:service_tier].to_s if @responses_config[:service_tier]
|
|
403
|
+
payload[:max_tool_calls] = @responses_config[:max_tool_calls] if @responses_config[:max_tool_calls]
|
|
404
|
+
end
|
|
405
|
+
|
|
406
|
+
def parse_completed_response(data, response)
|
|
407
|
+
output = data["output"] || []
|
|
408
|
+
content_parts = []
|
|
409
|
+
tool_calls = {}
|
|
410
|
+
|
|
411
|
+
output.each do |item|
|
|
412
|
+
case item["type"]
|
|
413
|
+
when "message"
|
|
414
|
+
content_parts << extract_message_content(item)
|
|
415
|
+
when "function_call"
|
|
416
|
+
tool_calls[item["call_id"]] = ToolCall.new(
|
|
417
|
+
id: item["call_id"],
|
|
418
|
+
name: item["name"],
|
|
419
|
+
arguments: parse_tool_arguments(item["arguments"]),
|
|
420
|
+
)
|
|
421
|
+
end
|
|
422
|
+
end
|
|
423
|
+
|
|
424
|
+
usage = data["usage"] || {}
|
|
425
|
+
|
|
426
|
+
Message.new(
|
|
427
|
+
role: :assistant,
|
|
428
|
+
content: content_parts.join("\n"),
|
|
429
|
+
tool_calls: tool_calls.empty? ? nil : tool_calls,
|
|
430
|
+
response_id: data["id"],
|
|
431
|
+
reasoning_summary: data.dig("reasoning", "summary"),
|
|
432
|
+
reasoning_tokens: usage.dig("output_tokens_details", "reasoning_tokens"),
|
|
433
|
+
input_tokens: usage["input_tokens"] || 0,
|
|
434
|
+
output_tokens: usage["output_tokens"] || 0,
|
|
435
|
+
cached_tokens: usage.dig("prompt_tokens_details", "cached_tokens"),
|
|
436
|
+
cache_creation_tokens: 0,
|
|
437
|
+
model_id: data["model"],
|
|
438
|
+
raw: response,
|
|
439
|
+
)
|
|
440
|
+
end
|
|
441
|
+
|
|
442
|
+
def parse_tool_arguments(arguments)
|
|
443
|
+
if arguments.nil? || arguments.empty?
|
|
444
|
+
{}
|
|
445
|
+
elsif arguments.is_a?(String)
|
|
446
|
+
JSON.parse(arguments)
|
|
447
|
+
else
|
|
448
|
+
arguments
|
|
449
|
+
end
|
|
450
|
+
rescue JSON::ParserError
|
|
451
|
+
{}
|
|
452
|
+
end
|
|
453
|
+
|
|
454
|
+
def parse_incomplete_response(data, response)
|
|
455
|
+
message = parse_completed_response(data, response)
|
|
456
|
+
RubyLLM.logger.warn("Responses API: Incomplete response: #{data["incomplete_details"]}")
|
|
457
|
+
message
|
|
458
|
+
end
|
|
459
|
+
|
|
460
|
+
def extract_message_content(item)
|
|
461
|
+
return "" unless item["content"].is_a?(Array)
|
|
462
|
+
|
|
463
|
+
item["content"].filter_map do |content_item|
|
|
464
|
+
content_item["text"] if content_item["type"] == "output_text"
|
|
465
|
+
end.join
|
|
466
|
+
end
|
|
467
|
+
|
|
468
|
+
def responses_api_event?(data)
|
|
469
|
+
data.is_a?(Hash) && data["type"]&.start_with?("response.")
|
|
470
|
+
end
|
|
471
|
+
|
|
472
|
+
def build_responses_chunk(data)
|
|
473
|
+
case data["type"]
|
|
474
|
+
when "response.output_text.delta"
|
|
475
|
+
# delta is a plain string in the Responses API
|
|
476
|
+
Chunk.new(
|
|
477
|
+
role: :assistant,
|
|
478
|
+
content: data["delta"] || "",
|
|
479
|
+
model_id: nil,
|
|
480
|
+
input_tokens: nil,
|
|
481
|
+
output_tokens: nil,
|
|
482
|
+
)
|
|
483
|
+
when "response.completed"
|
|
484
|
+
usage = data.dig("response", "usage") || {}
|
|
485
|
+
Chunk.new(
|
|
486
|
+
role: :assistant,
|
|
487
|
+
content: nil,
|
|
488
|
+
model_id: data.dig("response", "model"),
|
|
489
|
+
input_tokens: usage["input_tokens"],
|
|
490
|
+
output_tokens: usage["output_tokens"],
|
|
491
|
+
cached_tokens: usage.dig("prompt_tokens_details", "cached_tokens"),
|
|
492
|
+
cache_creation_tokens: 0,
|
|
493
|
+
)
|
|
494
|
+
when "response.function_call_arguments.delta"
|
|
495
|
+
# Tool call argument streaming - accumulate via tool_calls
|
|
496
|
+
Chunk.new(
|
|
497
|
+
role: :assistant,
|
|
498
|
+
content: nil,
|
|
499
|
+
model_id: nil,
|
|
500
|
+
input_tokens: nil,
|
|
501
|
+
output_tokens: nil,
|
|
502
|
+
tool_calls: parse_function_call_delta(data),
|
|
503
|
+
)
|
|
504
|
+
when "response.output_item.done"
|
|
505
|
+
build_output_item_done_chunk(data)
|
|
506
|
+
else
|
|
507
|
+
# No-op chunk for unrecognized events (response.created, response.in_progress, etc.)
|
|
508
|
+
Chunk.new(role: :assistant, content: nil, model_id: nil, input_tokens: nil, output_tokens: nil)
|
|
509
|
+
end
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
def parse_function_call_delta(data)
|
|
513
|
+
call_id = data["call_id"] || data["item_id"]
|
|
514
|
+
return {} unless call_id
|
|
515
|
+
|
|
516
|
+
{ call_id => ToolCall.new(id: nil, name: nil, arguments: data["delta"] || "") }
|
|
517
|
+
end
|
|
518
|
+
|
|
519
|
+
def build_output_item_done_chunk(data)
|
|
520
|
+
item = data["item"] || {}
|
|
521
|
+
if item["type"] == "function_call"
|
|
522
|
+
tool_calls = {
|
|
523
|
+
item["call_id"] => ToolCall.new(
|
|
524
|
+
id: item["call_id"],
|
|
525
|
+
name: item["name"],
|
|
526
|
+
arguments: parse_tool_arguments(item["arguments"]),
|
|
527
|
+
),
|
|
528
|
+
}
|
|
529
|
+
Chunk.new(
|
|
530
|
+
role: :assistant,
|
|
531
|
+
content: nil,
|
|
532
|
+
model_id: nil,
|
|
533
|
+
input_tokens: nil,
|
|
534
|
+
output_tokens: nil,
|
|
535
|
+
tool_calls: tool_calls,
|
|
536
|
+
)
|
|
537
|
+
else
|
|
538
|
+
Chunk.new(role: :assistant, content: nil, model_id: nil, input_tokens: nil, output_tokens: nil)
|
|
539
|
+
end
|
|
540
|
+
end
|
|
541
|
+
end
|
|
542
|
+
end
|
|
543
|
+
|
|
544
|
+
class Chat
|
|
545
|
+
# Enable OpenAI Responses API for this chat
|
|
546
|
+
#
|
|
547
|
+
# @param stateful [Boolean] Enable stateful mode (uses previous_response_id)
|
|
548
|
+
# @param store [Boolean] Store responses for retrieval
|
|
549
|
+
# @param truncation [Symbol] Truncation strategy (:disabled, :auto)
|
|
550
|
+
# @param include [Array] Additional data to include in responses
|
|
551
|
+
# @return [self] for chaining
|
|
552
|
+
def with_responses_api(stateful: false, store: true, truncation: :disabled, include: [])
|
|
553
|
+
responses_config = {
|
|
554
|
+
stateful: stateful,
|
|
555
|
+
store: store,
|
|
556
|
+
truncation: truncation,
|
|
557
|
+
include: include,
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
@responses_session ||= ResponsesSession.new
|
|
561
|
+
@provider = Providers::OpenAIResponses.new(@config, @responses_session, responses_config)
|
|
562
|
+
@connection = @provider.connection
|
|
563
|
+
@responses_api_enabled = true
|
|
564
|
+
self
|
|
565
|
+
end
|
|
566
|
+
|
|
567
|
+
# Restore a Responses API session from saved state
|
|
568
|
+
#
|
|
569
|
+
# @param session_hash [Hash] Session state from ResponsesSession#to_h
|
|
570
|
+
# @return [self] for chaining
|
|
571
|
+
def restore_responses_session(session_hash)
|
|
572
|
+
@responses_session = ResponsesSession.from_h(session_hash)
|
|
573
|
+
|
|
574
|
+
if @provider.is_a?(Providers::OpenAIResponses)
|
|
575
|
+
# Re-create provider with restored session
|
|
576
|
+
@provider = Providers::OpenAIResponses.new(
|
|
577
|
+
@config,
|
|
578
|
+
@responses_session,
|
|
579
|
+
@provider.responses_config,
|
|
580
|
+
)
|
|
581
|
+
@connection = @provider.connection
|
|
582
|
+
end
|
|
583
|
+
|
|
584
|
+
self
|
|
585
|
+
end
|
|
586
|
+
|
|
587
|
+
# Check if Responses API is enabled
|
|
588
|
+
#
|
|
589
|
+
# @return [Boolean] true if Responses API is enabled
|
|
590
|
+
def responses_api_enabled?
|
|
591
|
+
@responses_api_enabled == true
|
|
592
|
+
end
|
|
593
|
+
|
|
594
|
+
# Get the current Responses API session
|
|
595
|
+
#
|
|
596
|
+
# @return [ResponsesSession, nil] The session if Responses API is enabled
|
|
597
|
+
attr_reader :responses_session
|
|
598
|
+
end
|
|
599
|
+
end
|