ruby_llm-responses_api 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +28 -0
- data/LICENSE.txt +21 -0
- data/README.md +108 -0
- data/lib/ruby_llm/providers/openai_responses/active_record_extension.rb +76 -0
- data/lib/ruby_llm/providers/openai_responses/background.rb +98 -0
- data/lib/ruby_llm/providers/openai_responses/base.rb +14 -0
- data/lib/ruby_llm/providers/openai_responses/built_in_tools.rb +184 -0
- data/lib/ruby_llm/providers/openai_responses/capabilities.rb +226 -0
- data/lib/ruby_llm/providers/openai_responses/chat.rb +265 -0
- data/lib/ruby_llm/providers/openai_responses/media.rb +114 -0
- data/lib/ruby_llm/providers/openai_responses/message_extension.rb +32 -0
- data/lib/ruby_llm/providers/openai_responses/model_registry.rb +257 -0
- data/lib/ruby_llm/providers/openai_responses/models.rb +48 -0
- data/lib/ruby_llm/providers/openai_responses/state.rb +56 -0
- data/lib/ruby_llm/providers/openai_responses/streaming.rb +128 -0
- data/lib/ruby_llm/providers/openai_responses/tools.rb +193 -0
- data/lib/ruby_llm/providers/openai_responses.rb +94 -0
- data/lib/ruby_llm-responses_api.rb +4 -0
- data/lib/rubyllm_responses_api.rb +44 -0
- metadata +177 -0
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class OpenAIResponses
|
|
6
|
+
# Registers OpenAI Responses API models with RubyLLM
|
|
7
|
+
# Models updated January 2026 based on OpenAI documentation
|
|
8
|
+
module ModelRegistry
|
|
9
|
+
MODELS = [
|
|
10
|
+
# ===================
|
|
11
|
+
# GPT-5.2 Series (Latest flagship - December 2025)
|
|
12
|
+
# ===================
|
|
13
|
+
{
|
|
14
|
+
id: 'gpt-5.2',
|
|
15
|
+
name: 'GPT-5.2',
|
|
16
|
+
provider: 'openai_responses',
|
|
17
|
+
family: 'gpt-5.2',
|
|
18
|
+
context_window: 400_000,
|
|
19
|
+
max_output_tokens: 128_000,
|
|
20
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
21
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
22
|
+
},
|
|
23
|
+
|
|
24
|
+
# ===================
|
|
25
|
+
# GPT-5.1 Series (November 2025)
|
|
26
|
+
# ===================
|
|
27
|
+
{
|
|
28
|
+
id: 'gpt-5.1',
|
|
29
|
+
name: 'GPT-5.1',
|
|
30
|
+
provider: 'openai_responses',
|
|
31
|
+
family: 'gpt-5.1',
|
|
32
|
+
context_window: 400_000,
|
|
33
|
+
max_output_tokens: 128_000,
|
|
34
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
35
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
id: 'gpt-5.1-codex-max',
|
|
39
|
+
name: 'GPT-5.1 Codex Max',
|
|
40
|
+
provider: 'openai_responses',
|
|
41
|
+
family: 'gpt-5.1',
|
|
42
|
+
context_window: 400_000,
|
|
43
|
+
max_output_tokens: 128_000,
|
|
44
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
45
|
+
capabilities: %w[streaming function_calling structured_output reasoning web_search]
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
id: 'gpt-5.1-codex',
|
|
49
|
+
name: 'GPT-5.1 Codex',
|
|
50
|
+
provider: 'openai_responses',
|
|
51
|
+
family: 'gpt-5.1',
|
|
52
|
+
context_window: 400_000,
|
|
53
|
+
max_output_tokens: 128_000,
|
|
54
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
55
|
+
capabilities: %w[streaming function_calling structured_output reasoning]
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
id: 'gpt-5.1-codex-mini',
|
|
59
|
+
name: 'GPT-5.1 Codex Mini',
|
|
60
|
+
provider: 'openai_responses',
|
|
61
|
+
family: 'gpt-5.1',
|
|
62
|
+
context_window: 400_000,
|
|
63
|
+
max_output_tokens: 128_000,
|
|
64
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
65
|
+
capabilities: %w[streaming function_calling structured_output reasoning]
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
id: 'gpt-5.1-chat',
|
|
69
|
+
name: 'GPT-5.1 Chat',
|
|
70
|
+
provider: 'openai_responses',
|
|
71
|
+
family: 'gpt-5.1',
|
|
72
|
+
context_window: 128_000,
|
|
73
|
+
max_output_tokens: 16_384,
|
|
74
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
75
|
+
capabilities: %w[streaming function_calling structured_output vision]
|
|
76
|
+
},
|
|
77
|
+
|
|
78
|
+
# ===================
|
|
79
|
+
# GPT-5 Series (August 2025)
|
|
80
|
+
# ===================
|
|
81
|
+
{
|
|
82
|
+
id: 'gpt-5',
|
|
83
|
+
name: 'GPT-5',
|
|
84
|
+
provider: 'openai_responses',
|
|
85
|
+
family: 'gpt-5',
|
|
86
|
+
context_window: 400_000,
|
|
87
|
+
max_output_tokens: 128_000,
|
|
88
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
89
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
id: 'gpt-5-pro',
|
|
93
|
+
name: 'GPT-5 Pro',
|
|
94
|
+
provider: 'openai_responses',
|
|
95
|
+
family: 'gpt-5',
|
|
96
|
+
context_window: 400_000,
|
|
97
|
+
max_output_tokens: 128_000,
|
|
98
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
99
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
id: 'gpt-5-mini',
|
|
103
|
+
name: 'GPT-5 Mini',
|
|
104
|
+
provider: 'openai_responses',
|
|
105
|
+
family: 'gpt-5',
|
|
106
|
+
context_window: 400_000,
|
|
107
|
+
max_output_tokens: 128_000,
|
|
108
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
109
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
id: 'gpt-5-nano',
|
|
113
|
+
name: 'GPT-5 Nano',
|
|
114
|
+
provider: 'openai_responses',
|
|
115
|
+
family: 'gpt-5',
|
|
116
|
+
context_window: 400_000,
|
|
117
|
+
max_output_tokens: 128_000,
|
|
118
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
119
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning]
|
|
120
|
+
},
|
|
121
|
+
|
|
122
|
+
# ===================
|
|
123
|
+
# O-Series Reasoning Models
|
|
124
|
+
# ===================
|
|
125
|
+
{
|
|
126
|
+
id: 'o4-mini',
|
|
127
|
+
name: 'O4 Mini',
|
|
128
|
+
provider: 'openai_responses',
|
|
129
|
+
family: 'o4',
|
|
130
|
+
context_window: 200_000,
|
|
131
|
+
max_output_tokens: 100_000,
|
|
132
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
133
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
id: 'o3-pro',
|
|
137
|
+
name: 'O3 Pro',
|
|
138
|
+
provider: 'openai_responses',
|
|
139
|
+
family: 'o3',
|
|
140
|
+
context_window: 200_000,
|
|
141
|
+
max_output_tokens: 100_000,
|
|
142
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
143
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
id: 'o3',
|
|
147
|
+
name: 'O3',
|
|
148
|
+
provider: 'openai_responses',
|
|
149
|
+
family: 'o3',
|
|
150
|
+
context_window: 200_000,
|
|
151
|
+
max_output_tokens: 100_000,
|
|
152
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
153
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning web_search code_interpreter]
|
|
154
|
+
},
|
|
155
|
+
{
|
|
156
|
+
id: 'o3-mini',
|
|
157
|
+
name: 'O3 Mini',
|
|
158
|
+
provider: 'openai_responses',
|
|
159
|
+
family: 'o3',
|
|
160
|
+
context_window: 200_000,
|
|
161
|
+
max_output_tokens: 100_000,
|
|
162
|
+
modalities: { input: ['text'], output: ['text'] },
|
|
163
|
+
capabilities: %w[streaming function_calling structured_output reasoning]
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
id: 'o1',
|
|
167
|
+
name: 'O1',
|
|
168
|
+
provider: 'openai_responses',
|
|
169
|
+
family: 'o1',
|
|
170
|
+
context_window: 200_000,
|
|
171
|
+
max_output_tokens: 100_000,
|
|
172
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
173
|
+
capabilities: %w[streaming function_calling structured_output vision reasoning]
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
id: 'o1-mini',
|
|
177
|
+
name: 'O1 Mini',
|
|
178
|
+
provider: 'openai_responses',
|
|
179
|
+
family: 'o1',
|
|
180
|
+
context_window: 128_000,
|
|
181
|
+
max_output_tokens: 65_536,
|
|
182
|
+
modalities: { input: ['text'], output: ['text'] },
|
|
183
|
+
capabilities: %w[streaming function_calling structured_output reasoning]
|
|
184
|
+
},
|
|
185
|
+
|
|
186
|
+
# ===================
|
|
187
|
+
# GPT-4.1 Series (Legacy - still supported)
|
|
188
|
+
# ===================
|
|
189
|
+
{
|
|
190
|
+
id: 'gpt-4.1',
|
|
191
|
+
name: 'GPT-4.1',
|
|
192
|
+
provider: 'openai_responses',
|
|
193
|
+
family: 'gpt-4.1',
|
|
194
|
+
context_window: 1_000_000,
|
|
195
|
+
max_output_tokens: 32_768,
|
|
196
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
197
|
+
capabilities: %w[streaming function_calling structured_output vision web_search code_interpreter]
|
|
198
|
+
},
|
|
199
|
+
{
|
|
200
|
+
id: 'gpt-4.1-mini',
|
|
201
|
+
name: 'GPT-4.1 Mini',
|
|
202
|
+
provider: 'openai_responses',
|
|
203
|
+
family: 'gpt-4.1',
|
|
204
|
+
context_window: 1_000_000,
|
|
205
|
+
max_output_tokens: 32_768,
|
|
206
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
207
|
+
capabilities: %w[streaming function_calling structured_output vision web_search code_interpreter]
|
|
208
|
+
},
|
|
209
|
+
{
|
|
210
|
+
id: 'gpt-4.1-nano',
|
|
211
|
+
name: 'GPT-4.1 Nano',
|
|
212
|
+
provider: 'openai_responses',
|
|
213
|
+
family: 'gpt-4.1',
|
|
214
|
+
context_window: 1_000_000,
|
|
215
|
+
max_output_tokens: 32_768,
|
|
216
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
217
|
+
capabilities: %w[streaming function_calling structured_output vision web_search code_interpreter]
|
|
218
|
+
},
|
|
219
|
+
|
|
220
|
+
# ===================
|
|
221
|
+
# GPT-4o Series (Legacy - still widely used)
|
|
222
|
+
# ===================
|
|
223
|
+
{
|
|
224
|
+
id: 'gpt-4o',
|
|
225
|
+
name: 'GPT-4o',
|
|
226
|
+
provider: 'openai_responses',
|
|
227
|
+
family: 'gpt-4o',
|
|
228
|
+
context_window: 128_000,
|
|
229
|
+
max_output_tokens: 16_384,
|
|
230
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
231
|
+
capabilities: %w[streaming function_calling structured_output vision web_search code_interpreter]
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
id: 'gpt-4o-mini',
|
|
235
|
+
name: 'GPT-4o Mini',
|
|
236
|
+
provider: 'openai_responses',
|
|
237
|
+
family: 'gpt-4o',
|
|
238
|
+
context_window: 128_000,
|
|
239
|
+
max_output_tokens: 16_384,
|
|
240
|
+
modalities: { input: %w[text image], output: ['text'] },
|
|
241
|
+
capabilities: %w[streaming function_calling structured_output vision web_search code_interpreter]
|
|
242
|
+
}
|
|
243
|
+
].freeze
|
|
244
|
+
|
|
245
|
+
module_function
|
|
246
|
+
|
|
247
|
+
def register_all!
|
|
248
|
+
MODELS.each do |model_data|
|
|
249
|
+
model = RubyLLM::Model::Info.new(model_data)
|
|
250
|
+
existing = RubyLLM::Models.instance.all.find { |m| m.id == model.id && m.provider == model.provider }
|
|
251
|
+
RubyLLM::Models.instance.all << model unless existing
|
|
252
|
+
end
|
|
253
|
+
end
|
|
254
|
+
end
|
|
255
|
+
end
|
|
256
|
+
end
|
|
257
|
+
end
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class OpenAIResponses
|
|
6
|
+
# Model listing methods for the OpenAI Responses API.
|
|
7
|
+
module Models
|
|
8
|
+
module_function
|
|
9
|
+
|
|
10
|
+
def models_url
|
|
11
|
+
'models'
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def parse_list_models_response(response, slug, capabilities)
|
|
15
|
+
models_data = response.body
|
|
16
|
+
models_data = models_data['data'] if models_data.is_a?(Hash) && models_data['data']
|
|
17
|
+
|
|
18
|
+
Array(models_data).filter_map do |model_data|
|
|
19
|
+
model_id = model_data['id']
|
|
20
|
+
|
|
21
|
+
# Only include models that support the Responses API
|
|
22
|
+
next unless capabilities.supports_responses_api?(model_id)
|
|
23
|
+
|
|
24
|
+
Model::Info.new(
|
|
25
|
+
id: model_id,
|
|
26
|
+
name: capabilities.format_display_name(model_id),
|
|
27
|
+
provider: slug,
|
|
28
|
+
family: capabilities.model_family(model_id),
|
|
29
|
+
context_window: capabilities.context_window_for(model_id),
|
|
30
|
+
max_output_tokens: capabilities.max_tokens_for(model_id),
|
|
31
|
+
modalities: capabilities.modalities_for(model_id),
|
|
32
|
+
capabilities: capabilities.capabilities_for(model_id),
|
|
33
|
+
pricing: capabilities.pricing_for(model_id),
|
|
34
|
+
metadata: {
|
|
35
|
+
created_at: model_data['created'] ? Time.at(model_data['created']) : nil,
|
|
36
|
+
owned_by: model_data['owned_by'],
|
|
37
|
+
supports_responses_api: true,
|
|
38
|
+
supports_web_search: capabilities.supports_web_search?(model_id),
|
|
39
|
+
supports_code_interpreter: capabilities.supports_code_interpreter?(model_id),
|
|
40
|
+
reasoning_model: capabilities.reasoning_model?(model_id)
|
|
41
|
+
}.compact
|
|
42
|
+
)
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class OpenAIResponses
|
|
6
|
+
# Statefulness support for the OpenAI Responses API.
|
|
7
|
+
# Handles conversation state via previous_response_id and store options.
|
|
8
|
+
module State
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
# Add state parameters to payload
|
|
12
|
+
# @param payload [Hash] The request payload
|
|
13
|
+
# @param params [Hash] Additional parameters that may contain state options
|
|
14
|
+
# @return [Hash] Updated payload with state parameters
|
|
15
|
+
def apply_state_params(payload, params)
|
|
16
|
+
# Handle previous_response_id for conversation chaining
|
|
17
|
+
payload[:previous_response_id] = params[:previous_response_id] if params[:previous_response_id]
|
|
18
|
+
|
|
19
|
+
# Handle store option (defaults to true in Responses API)
|
|
20
|
+
payload[:store] = params[:store] if params.key?(:store)
|
|
21
|
+
|
|
22
|
+
# Handle metadata
|
|
23
|
+
payload[:metadata] = params[:metadata] if params[:metadata]
|
|
24
|
+
|
|
25
|
+
payload
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Extract response ID from a completed response for chaining
|
|
29
|
+
# @param response [Hash] The API response
|
|
30
|
+
# @return [String, nil] The response ID
|
|
31
|
+
def extract_response_id(response)
|
|
32
|
+
response['id']
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Check if a response was stored
|
|
36
|
+
# @param response [Hash] The API response
|
|
37
|
+
# @return [Boolean]
|
|
38
|
+
def response_stored?(response)
|
|
39
|
+
# Responses are stored by default unless store: false was set
|
|
40
|
+
response['store'] != false
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Build parameters for continuing a conversation
|
|
44
|
+
# @param previous_response_id [String] The ID of the previous response
|
|
45
|
+
# @param store [Boolean] Whether to store this response (default: true)
|
|
46
|
+
# @return [Hash] Parameters to pass to the next request
|
|
47
|
+
def continuation_params(previous_response_id, store: true)
|
|
48
|
+
{
|
|
49
|
+
previous_response_id: previous_response_id,
|
|
50
|
+
store: store
|
|
51
|
+
}
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class OpenAIResponses
|
|
6
|
+
# Streaming methods for the OpenAI Responses API.
|
|
7
|
+
# Handles SSE events with typed event format.
|
|
8
|
+
module Streaming
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
def stream_url
|
|
12
|
+
'responses'
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def build_chunk(data) # rubocop:disable Metrics/AbcSize,Metrics/CyclomaticComplexity,Metrics/MethodLength
|
|
16
|
+
event_type = data['type']
|
|
17
|
+
|
|
18
|
+
case event_type
|
|
19
|
+
when 'response.output_text.delta'
|
|
20
|
+
# Text content delta
|
|
21
|
+
Chunk.new(
|
|
22
|
+
role: :assistant,
|
|
23
|
+
content: data['delta'],
|
|
24
|
+
model_id: data.dig('response', 'model')
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
when 'response.function_call_arguments.delta'
|
|
28
|
+
# Function call arguments streaming
|
|
29
|
+
Chunk.new(
|
|
30
|
+
role: :assistant,
|
|
31
|
+
content: nil,
|
|
32
|
+
tool_calls: build_streaming_tool_call(data),
|
|
33
|
+
model_id: data.dig('response', 'model')
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
when 'response.completed'
|
|
37
|
+
# Final response with usage stats
|
|
38
|
+
response_data = data['response'] || {}
|
|
39
|
+
usage = response_data['usage'] || {}
|
|
40
|
+
cached_tokens = usage.dig('input_tokens_details', 'cached_tokens')
|
|
41
|
+
|
|
42
|
+
Chunk.new(
|
|
43
|
+
role: :assistant,
|
|
44
|
+
content: nil,
|
|
45
|
+
input_tokens: usage['input_tokens'],
|
|
46
|
+
output_tokens: usage['output_tokens'],
|
|
47
|
+
cached_tokens: cached_tokens,
|
|
48
|
+
cache_creation_tokens: 0,
|
|
49
|
+
model_id: response_data['model'],
|
|
50
|
+
response_id: response_data['id']
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
when 'response.output_item.added'
|
|
54
|
+
# New output item started (function call, message, etc.)
|
|
55
|
+
item = data['item'] || {}
|
|
56
|
+
if item['type'] == 'function_call'
|
|
57
|
+
Chunk.new(
|
|
58
|
+
role: :assistant,
|
|
59
|
+
content: nil,
|
|
60
|
+
tool_calls: {
|
|
61
|
+
item['call_id'] => ToolCall.new(
|
|
62
|
+
id: item['call_id'],
|
|
63
|
+
name: item['name'],
|
|
64
|
+
arguments: ''
|
|
65
|
+
)
|
|
66
|
+
}
|
|
67
|
+
)
|
|
68
|
+
else
|
|
69
|
+
# Other item types - return empty chunk
|
|
70
|
+
Chunk.new(role: :assistant, content: nil)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
when 'response.content_part.added', 'response.content_part.done',
|
|
74
|
+
'response.output_item.done', 'response.output_text.done',
|
|
75
|
+
'response.function_call_arguments.done', 'response.created',
|
|
76
|
+
'response.in_progress'
|
|
77
|
+
# Status events - return empty chunk
|
|
78
|
+
Chunk.new(role: :assistant, content: nil)
|
|
79
|
+
|
|
80
|
+
when 'error'
|
|
81
|
+
# Error event
|
|
82
|
+
error_data = data['error'] || {}
|
|
83
|
+
raise RubyLLM::Error.new(nil, error_data['message'] || 'Unknown streaming error')
|
|
84
|
+
|
|
85
|
+
else
|
|
86
|
+
# Unknown event type - return empty chunk
|
|
87
|
+
Chunk.new(role: :assistant, content: nil)
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def build_streaming_tool_call(data)
|
|
92
|
+
call_id = data['call_id'] || data['item_id']
|
|
93
|
+
return nil unless call_id
|
|
94
|
+
|
|
95
|
+
{
|
|
96
|
+
call_id => ToolCall.new(
|
|
97
|
+
id: call_id,
|
|
98
|
+
name: data['name'],
|
|
99
|
+
arguments: data['delta'] || ''
|
|
100
|
+
)
|
|
101
|
+
}
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def parse_streaming_error(data)
|
|
105
|
+
error_data = JSON.parse(data)
|
|
106
|
+
return unless error_data['error'] || error_data['type'] == 'error'
|
|
107
|
+
|
|
108
|
+
error = error_data['error'] || error_data
|
|
109
|
+
error_type = error['type'] || error['code']
|
|
110
|
+
error_message = error['message']
|
|
111
|
+
|
|
112
|
+
case error_type
|
|
113
|
+
when 'server_error', 'internal_error'
|
|
114
|
+
[500, error_message]
|
|
115
|
+
when 'rate_limit_exceeded', 'insufficient_quota'
|
|
116
|
+
[429, error_message]
|
|
117
|
+
when 'invalid_request_error', 'invalid_api_key'
|
|
118
|
+
[400, error_message]
|
|
119
|
+
else
|
|
120
|
+
[400, error_message]
|
|
121
|
+
end
|
|
122
|
+
rescue JSON::ParserError
|
|
123
|
+
[500, data]
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
end
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RubyLLM
|
|
4
|
+
module Providers
|
|
5
|
+
class OpenAIResponses
|
|
6
|
+
# Tools/function calling methods for the OpenAI Responses API.
|
|
7
|
+
# Handles both custom function tools and built-in tools.
|
|
8
|
+
module Tools
|
|
9
|
+
module_function
|
|
10
|
+
|
|
11
|
+
EMPTY_PARAMETERS_SCHEMA = {
|
|
12
|
+
'type' => 'object',
|
|
13
|
+
'properties' => {},
|
|
14
|
+
'required' => [],
|
|
15
|
+
'additionalProperties' => false
|
|
16
|
+
}.freeze
|
|
17
|
+
|
|
18
|
+
# Built-in tool type constants
|
|
19
|
+
BUILT_IN_TOOLS = {
|
|
20
|
+
web_search: { type: 'web_search_preview' },
|
|
21
|
+
file_search: ->(vector_store_ids) { { type: 'file_search', vector_store_ids: vector_store_ids } },
|
|
22
|
+
code_interpreter: { type: 'code_interpreter', container: { type: 'auto' } },
|
|
23
|
+
image_generation: { type: 'image_generation' },
|
|
24
|
+
computer_use: ->(opts) { { type: 'computer_use_preview', **opts } }
|
|
25
|
+
}.freeze
|
|
26
|
+
|
|
27
|
+
def tool_for(tool)
|
|
28
|
+
# Check if it's a built-in tool specification
|
|
29
|
+
return tool if tool.is_a?(Hash) && tool[:type]
|
|
30
|
+
|
|
31
|
+
# Handle symbol references to built-in tools
|
|
32
|
+
if tool.is_a?(Symbol) && BUILT_IN_TOOLS.key?(tool)
|
|
33
|
+
built_in = BUILT_IN_TOOLS[tool]
|
|
34
|
+
return built_in.is_a?(Proc) ? built_in.call([]) : built_in
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
# Standard function tool
|
|
38
|
+
parameters_schema = parameters_schema_for(tool)
|
|
39
|
+
|
|
40
|
+
definition = {
|
|
41
|
+
type: 'function',
|
|
42
|
+
name: tool.name,
|
|
43
|
+
description: tool.description,
|
|
44
|
+
parameters: parameters_schema
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Add strict mode if schema supports it
|
|
48
|
+
definition[:strict] = true if parameters_schema['additionalProperties'] == false
|
|
49
|
+
|
|
50
|
+
return definition if tool.respond_to?(:provider_params) && tool.provider_params.empty?
|
|
51
|
+
|
|
52
|
+
if tool.respond_to?(:provider_params) && tool.provider_params.any?
|
|
53
|
+
RubyLLM::Utils.deep_merge(definition, tool.provider_params)
|
|
54
|
+
else
|
|
55
|
+
definition
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def parameters_schema_for(tool)
|
|
60
|
+
if tool.respond_to?(:params_schema) && tool.params_schema
|
|
61
|
+
tool.params_schema
|
|
62
|
+
elsif tool.respond_to?(:parameters)
|
|
63
|
+
schema_from_parameters(tool.parameters)
|
|
64
|
+
else
|
|
65
|
+
EMPTY_PARAMETERS_SCHEMA
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def schema_from_parameters(parameters)
|
|
70
|
+
return EMPTY_PARAMETERS_SCHEMA if parameters.nil? || parameters.empty?
|
|
71
|
+
|
|
72
|
+
if defined?(RubyLLM::Tool::SchemaDefinition)
|
|
73
|
+
schema_definition = RubyLLM::Tool::SchemaDefinition.from_parameters(parameters)
|
|
74
|
+
schema_definition&.json_schema || EMPTY_PARAMETERS_SCHEMA
|
|
75
|
+
else
|
|
76
|
+
# Fallback for older RubyLLM versions
|
|
77
|
+
build_schema_from_parameters(parameters)
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def build_schema_from_parameters(parameters)
|
|
82
|
+
properties = {}
|
|
83
|
+
required = []
|
|
84
|
+
|
|
85
|
+
parameters.each do |name, param|
|
|
86
|
+
properties[name.to_s] = {
|
|
87
|
+
type: param.type || 'string',
|
|
88
|
+
description: param.description
|
|
89
|
+
}.compact
|
|
90
|
+
|
|
91
|
+
required << name.to_s if param.required
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
{
|
|
95
|
+
'type' => 'object',
|
|
96
|
+
'properties' => properties,
|
|
97
|
+
'required' => required,
|
|
98
|
+
'additionalProperties' => false
|
|
99
|
+
}
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def format_tool_calls(tool_calls)
|
|
103
|
+
return nil unless tool_calls&.any?
|
|
104
|
+
|
|
105
|
+
tool_calls.map do |_, tc|
|
|
106
|
+
{
|
|
107
|
+
type: 'function_call',
|
|
108
|
+
call_id: tc.id,
|
|
109
|
+
name: tc.name,
|
|
110
|
+
arguments: tc.arguments.is_a?(String) ? tc.arguments : JSON.generate(tc.arguments)
|
|
111
|
+
}
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def parse_tool_calls(tool_calls, parse_arguments: true)
|
|
116
|
+
return nil unless tool_calls&.any?
|
|
117
|
+
|
|
118
|
+
tool_calls.to_h do |tc|
|
|
119
|
+
call_id = tc['call_id'] || tc['id']
|
|
120
|
+
[
|
|
121
|
+
call_id,
|
|
122
|
+
ToolCall.new(
|
|
123
|
+
id: call_id,
|
|
124
|
+
name: tc['name'],
|
|
125
|
+
arguments: if parse_arguments
|
|
126
|
+
parse_tool_call_arguments(tc)
|
|
127
|
+
else
|
|
128
|
+
tc['arguments']
|
|
129
|
+
end
|
|
130
|
+
)
|
|
131
|
+
]
|
|
132
|
+
end
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def parse_tool_call_arguments(tool_call)
|
|
136
|
+
arguments = tool_call['arguments']
|
|
137
|
+
|
|
138
|
+
if arguments.nil? || arguments.empty?
|
|
139
|
+
{}
|
|
140
|
+
elsif arguments.is_a?(Hash)
|
|
141
|
+
arguments
|
|
142
|
+
else
|
|
143
|
+
JSON.parse(arguments)
|
|
144
|
+
end
|
|
145
|
+
rescue JSON::ParserError
|
|
146
|
+
{ raw: arguments }
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
# Helper to create built-in tool configurations
|
|
150
|
+
def web_search_tool(search_context_size: nil)
|
|
151
|
+
tool = { type: 'web_search_preview' }
|
|
152
|
+
tool[:search_context_size] = search_context_size if search_context_size
|
|
153
|
+
tool
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def file_search_tool(vector_store_ids:, max_num_results: nil, ranking_options: nil)
|
|
157
|
+
tool = {
|
|
158
|
+
type: 'file_search',
|
|
159
|
+
vector_store_ids: Array(vector_store_ids)
|
|
160
|
+
}
|
|
161
|
+
tool[:max_num_results] = max_num_results if max_num_results
|
|
162
|
+
tool[:ranking_options] = ranking_options if ranking_options
|
|
163
|
+
tool
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def code_interpreter_tool(container_type: 'auto')
|
|
167
|
+
{
|
|
168
|
+
type: 'code_interpreter',
|
|
169
|
+
container: { type: container_type }
|
|
170
|
+
}
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
def image_generation_tool(partial_images: nil)
|
|
174
|
+
tool = { type: 'image_generation' }
|
|
175
|
+
tool[:partial_images] = partial_images if partial_images
|
|
176
|
+
tool
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
def mcp_tool(server_label:, server_url:, require_approval: 'never', allowed_tools: nil, headers: nil)
|
|
180
|
+
tool = {
|
|
181
|
+
type: 'mcp',
|
|
182
|
+
server_label: server_label,
|
|
183
|
+
server_url: server_url,
|
|
184
|
+
require_approval: require_approval
|
|
185
|
+
}
|
|
186
|
+
tool[:allowed_tools] = allowed_tools if allowed_tools
|
|
187
|
+
tool[:headers] = headers if headers
|
|
188
|
+
tool
|
|
189
|
+
end
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
end
|
|
193
|
+
end
|