ruby_llm-responses_api 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ba5d18029ff9e07a0577fa0fc92b5946fe0b43768e495b5550dc6e3b396520b5
4
- data.tar.gz: 67f47ef8f9ca80c54ad4765f0ade6f84a2dbdc04dfc9c9e42136c5c5aa8d1fe3
3
+ metadata.gz: '09bb1acfd81c2fac2bfecfbaae9780ca64a3d6e13f1692237df82372f42f9263'
4
+ data.tar.gz: bcfcbd5d83280b319748b58ae2533f8963d0a4eeb8c829bd645cfe5c08245ff0
5
5
  SHA512:
6
- metadata.gz: d943d82abf6f1afe6c268fbdbc8b9c0179321f67f188c3c1c25058387591673e91356616b1fafdf868da79ee1084aca90ad0117d0835fcd3dd0a238fb96cb712
7
- data.tar.gz: 6b5906f1a604543b7a7cd34e25eb21faf500b2dd14558b174c6165da60cad656576b12af5e72cc49cd6960e9ad5e1210a6836bcc706be92e0b9ac77fea45c3ca
6
+ metadata.gz: 1f1f210817358d55bb76ad1169e7ffcbb510b54d57111aee18c2aa748a5ad0a1c8b3bb10b0b7e54a4d767234bd388c6936b2ab15c0c1ae8fa7ae8370e91e57b6
7
+ data.tar.gz: 047b530a9923948c8c25813192566c27a94dad2954ff3fbf26dfa14944a52f6a5df4a6d8839ad94ca9447194f3a1eea7b8ca46fec07e036bf3b9776465d10596
data/CHANGELOG.md CHANGED
@@ -5,6 +5,31 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.3.0] - 2026-02-11
9
+
10
+ ### Added
11
+
12
+ - **Shell tool** support for executing commands in hosted or local terminal environments
13
+ - Auto-provisioned containers (`container_auto`), reusable containers (`container_reference`), and local execution (`local`)
14
+ - Container networking with domain allowlists and domain-scoped secrets
15
+ - Configurable memory limits (`1g`, `4g`, `16g`, `64g`)
16
+ - `BuiltInTools.shell` helper and `parse_shell_call_results` parser
17
+ - **Server-side compaction** for multi-hour agent runs without hitting context limits
18
+ - `Compaction.compaction_params(compact_threshold:)` helper
19
+ - Pass via `chat.with_params(context_management: [{ type: 'compaction', compact_threshold: 200_000 }])`
20
+ - **Containers API** for managing persistent execution environments
21
+ - `create_container`, `retrieve_container`, `delete_container`
22
+ - `list_container_files`, `retrieve_container_file`, `retrieve_container_file_content`
23
+ - **Apply Patch tool** for structured diff-based file editing
24
+ - `BuiltInTools.apply_patch` helper and `parse_apply_patch_results` parser
25
+
26
+ ## [0.2.0] - 2026-01-15
27
+
28
+ ### Added
29
+
30
+ - Legacy ActiveRecord support
31
+ - CI compatibility fixes
32
+
8
33
  ## [0.1.0] - 2025-01-03
9
34
 
10
35
  ### Added
data/README.md CHANGED
@@ -60,13 +60,17 @@ chat.ask("What's my name?") # => "Alice"
60
60
 
61
61
  ## Built-in Tools
62
62
 
63
- The Responses API provides built-in tools that don't require custom implementation.
63
+ The Responses API provides built-in tools that don't require custom implementation. Pass them as hashes via `with_params`, or use the `BuiltInTools` helper module.
64
64
 
65
65
  ### Web Search
66
66
 
67
67
  ```ruby
68
68
  chat.with_params(tools: [{ type: 'web_search_preview' }])
69
69
  chat.ask("Latest news about Ruby 3.4?")
70
+
71
+ # Or with helper
72
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.web_search(search_context_size: 'high')
73
+ chat.with_params(tools: [tool])
70
74
  ```
71
75
 
72
76
  ### Code Interpreter
@@ -87,21 +91,181 @@ chat.with_params(tools: [{ type: 'file_search', vector_store_ids: ['vs_abc123']
87
91
  chat.ask("What does the documentation say about authentication?")
88
92
  ```
89
93
 
94
+ ### Shell
95
+
96
+ Execute commands in hosted containers or local terminal environments. Requires GPT-5 family models.
97
+
98
+ ```ruby
99
+ # Auto-provisioned container (default)
100
+ chat = RubyLLM.chat(model: 'gpt-5.2', provider: :openai_responses)
101
+ chat.with_params(tools: [{ type: 'shell', environment: { type: 'container_auto' } }])
102
+ chat.ask("List all Python files in the project")
103
+
104
+ # Using helper
105
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell
106
+ chat.with_params(tools: [tool])
107
+
108
+ # Reuse an existing container
109
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell(container_id: 'cntr_abc123')
110
+
111
+ # With networking (allow specific domains)
112
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell(
113
+ network_policy: {
114
+ type: 'allowlist',
115
+ allowed_domains: ['pypi.org', 'github.com'],
116
+ domain_secrets: [
117
+ { domain: 'github.com', name: 'GITHUB_TOKEN', value: ENV['GITHUB_TOKEN'] }
118
+ ]
119
+ }
120
+ )
121
+
122
+ # With memory limit
123
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell(memory_limit: '4g')
124
+
125
+ # Local execution (you handle running commands yourself)
126
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell(environment_type: 'local')
127
+ ```
128
+
129
+ ### Apply Patch
130
+
131
+ Structured diff-based file editing. Requires GPT-5 family models.
132
+
133
+ ```ruby
134
+ chat = RubyLLM.chat(model: 'gpt-5.2', provider: :openai_responses)
135
+ chat.with_params(tools: [{ type: 'apply_patch' }])
136
+ chat.ask("Add error handling to the User#save method")
137
+
138
+ # Using helper
139
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.apply_patch
140
+ chat.with_params(tools: [tool])
141
+ ```
142
+
143
+ ### Image Generation
144
+
145
+ ```ruby
146
+ chat.with_params(tools: [{ type: 'image_generation' }])
147
+ chat.ask("Generate an image of a sunset over mountains")
148
+ ```
149
+
150
+ ### MCP (Model Context Protocol)
151
+
152
+ ```ruby
153
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.mcp(
154
+ server_label: 'github',
155
+ server_url: 'https://api.github.com/mcp',
156
+ require_approval: 'never'
157
+ )
158
+ chat.with_params(tools: [tool])
159
+ ```
160
+
90
161
  ### Combining Tools
91
162
 
92
163
  ```ruby
93
164
  chat.with_params(tools: [
94
165
  { type: 'web_search_preview' },
95
- { type: 'code_interpreter' }
166
+ { type: 'code_interpreter' },
167
+ { type: 'shell', environment: { type: 'container_auto' } }
96
168
  ])
97
- chat.ask("Find the latest Bitcoin price and plot a chart")
169
+ chat.ask("Research the latest sorting algorithms and benchmark them")
170
+ ```
171
+
172
+ ## Server-Side Compaction
173
+
174
+ For multi-hour agent runs, enable server-side compaction to automatically compress conversation context when it exceeds a token threshold:
175
+
176
+ ```ruby
177
+ chat = RubyLLM.chat(model: 'gpt-4o', provider: :openai_responses)
178
+
179
+ # Pass directly
180
+ chat.with_params(
181
+ context_management: [{ type: 'compaction', compact_threshold: 200_000 }]
182
+ )
183
+
184
+ # Or use the helper
185
+ chat.with_params(
186
+ **RubyLLM::ResponsesAPI::Compaction.compaction_params(compact_threshold: 150_000)
187
+ )
188
+
189
+ # Now run a long agent loop without worrying about context limits
190
+ loop do
191
+ response = chat.ask(next_prompt)
192
+ break if done?(response)
193
+ end
194
+ ```
195
+
196
+ When the token count crosses the threshold, the server automatically compacts the conversation. The compacted state is carried forward transparently via `previous_response_id`.
197
+
198
+ ## Containers API
199
+
200
+ Manage persistent execution environments for the shell tool and code interpreter:
201
+
202
+ ```ruby
203
+ chat = RubyLLM.chat(model: 'gpt-5.2', provider: :openai_responses)
204
+ provider = chat.instance_variable_get(:@provider)
205
+
206
+ # Create a container
207
+ container = provider.create_container(
208
+ name: 'my-analysis-env',
209
+ expires_after: { anchor: 'last_active_at', minutes: 60 },
210
+ memory_limit: '4g'
211
+ )
212
+
213
+ # Use it with the shell tool
214
+ tool = RubyLLM::ResponsesAPI::BuiltInTools.shell(container_id: container['id'])
215
+ chat.with_params(tools: [tool])
216
+ chat.ask("Install pandas and run my analysis script")
217
+
218
+ # List files created in the container
219
+ files = provider.list_container_files(container['id'])
220
+
221
+ # Retrieve a specific file
222
+ content = provider.retrieve_container_file_content(container['id'], file_id)
223
+
224
+ # Clean up
225
+ provider.delete_container(container['id'])
226
+ ```
227
+
228
+ ## Background Mode
229
+
230
+ For long-running tasks:
231
+
232
+ ```ruby
233
+ chat = RubyLLM.chat(model: 'gpt-4o', provider: :openai_responses)
234
+ chat.with_params(background: true)
235
+ response = chat.ask("Analyze this large dataset...")
236
+
237
+ # Poll for completion
238
+ provider = chat.instance_variable_get(:@provider)
239
+ result = provider.poll_response(response.response_id, interval: 2.0) do |status|
240
+ puts "Status: #{status['status']}"
241
+ end
242
+ ```
243
+
244
+ ## Parsing Built-in Tool Results
245
+
246
+ When the API returns results from built-in tools, use the parsers to extract structured data:
247
+
248
+ ```ruby
249
+ # Access raw response output (available via response.raw)
250
+ output = response.raw.body['output']
251
+
252
+ # Parse results by tool type
253
+ web_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_web_search_results(output)
254
+ code_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_code_interpreter_results(output)
255
+ file_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_file_search_results(output)
256
+ shell_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_shell_call_results(output)
257
+ patch_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_apply_patch_results(output)
258
+ image_results = RubyLLM::ResponsesAPI::BuiltInTools.parse_image_generation_results(output)
259
+ citations = RubyLLM::ResponsesAPI::BuiltInTools.extract_citations(message_content)
98
260
  ```
99
261
 
100
262
  ## Why Use the Responses API?
101
263
 
102
- - **Built-in tools** - Web search, code execution, file search without custom implementation
264
+ - **Built-in tools** - Web search, code execution, file search, shell, apply patch without custom implementation
103
265
  - **Stateful conversations** - OpenAI stores context server-side via `previous_response_id`
104
266
  - **Simpler multi-turn** - No need to send full message history on each request
267
+ - **Server-side compaction** - Run multi-hour agent sessions without hitting context limits
268
+ - **Containers** - Persistent execution environments with networking and file management
105
269
 
106
270
  ## License
107
271
 
@@ -85,6 +85,31 @@ module RubyLLM
85
85
  }
86
86
  end
87
87
 
88
+ # Shell tool configuration
89
+ # @param environment_type [String] 'container_auto', 'container_reference', or 'local'
90
+ # @param container_id [String, nil] Container ID for 'container_reference' type
91
+ # @param network_policy [Hash, nil] Network policy (e.g. { type: 'allowlist', allowed_domains: [...] })
92
+ # @param memory_limit [String, nil] Memory limit: '1g', '4g', '16g', '64g'
93
+ def shell(environment_type: 'container_auto', container_id: nil,
94
+ network_policy: nil, memory_limit: nil)
95
+ env = if container_id
96
+ { type: 'container_reference', container_id: container_id }
97
+ else
98
+ { type: environment_type }
99
+ end
100
+
101
+ env[:network_policy] = network_policy if network_policy
102
+ env[:memory_limit] = memory_limit if memory_limit
103
+
104
+ { type: 'shell', environment: env }
105
+ end
106
+
107
+ # Apply Patch tool configuration
108
+ # Enables the model to create, update, and delete files using structured diffs.
109
+ def apply_patch
110
+ { type: 'apply_patch' }
111
+ end
112
+
88
113
  # Parse web search results from output
89
114
  # @param output [Array] Response output array
90
115
  # @return [Array<Hash>] Parsed search results with citations
@@ -146,6 +171,39 @@ module RubyLLM
146
171
  end
147
172
  end
148
173
 
174
+ # Parse apply_patch call results from output
175
+ # @param output [Array] Response output array
176
+ # @return [Array<Hash>] Parsed apply_patch call results
177
+ def parse_apply_patch_results(output)
178
+ output
179
+ .select { |item| item['type'] == 'apply_patch_call' }
180
+ .map do |item|
181
+ {
182
+ id: item['id'],
183
+ call_id: item['call_id'],
184
+ status: item['status'],
185
+ operation: item['operation']
186
+ }
187
+ end
188
+ end
189
+
190
+ # Parse shell call results from output
191
+ # @param output [Array] Response output array
192
+ # @return [Array<Hash>] Parsed shell call results
193
+ def parse_shell_call_results(output)
194
+ output
195
+ .select { |item| item['type'] == 'shell_call' }
196
+ .map do |item|
197
+ {
198
+ id: item['id'],
199
+ call_id: item['call_id'],
200
+ status: item['status'],
201
+ action: item['action'],
202
+ container_id: item['container_id']
203
+ }
204
+ end
205
+ end
206
+
149
207
  # Extract all citations from message content
150
208
  # @param content [Array] Message content array
151
209
  # @return [Array<Hash>] All citations/annotations
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAIResponses
6
+ # Server-side compaction support for long-running agent sessions.
7
+ # Automatically compacts conversation context when token count exceeds threshold.
8
+ module Compaction
9
+ module_function
10
+
11
+ # Build context_management parameter for compaction
12
+ # @param compact_threshold [Integer] Token count threshold to trigger compaction (minimum 1000)
13
+ # @return [Hash] Parameters to merge into request payload via with_params
14
+ def compaction_params(compact_threshold: 200_000)
15
+ {
16
+ context_management: [
17
+ { type: 'compaction', compact_threshold: compact_threshold }
18
+ ]
19
+ }
20
+ end
21
+
22
+ # Apply compaction settings to payload
23
+ # @param payload [Hash] The request payload
24
+ # @param params [Hash] Additional parameters that may contain compaction options
25
+ # @return [Hash] Updated payload with context_management
26
+ def apply_compaction(payload, params)
27
+ if params[:compact_threshold]
28
+ payload[:context_management] = [
29
+ { type: 'compaction', compact_threshold: params[:compact_threshold] }
30
+ ]
31
+ elsif params[:context_management]
32
+ payload[:context_management] = params[:context_management]
33
+ end
34
+
35
+ payload
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyLLM
4
+ module Providers
5
+ class OpenAIResponses
6
+ # Containers API support for managing persistent execution environments.
7
+ # Containers can be used with the shell tool and code interpreter.
8
+ module Containers
9
+ module_function
10
+
11
+ # URL helpers
12
+ def containers_url
13
+ 'containers'
14
+ end
15
+
16
+ def container_url(container_id)
17
+ "containers/#{container_id}"
18
+ end
19
+
20
+ def container_files_url(container_id)
21
+ "containers/#{container_id}/files"
22
+ end
23
+
24
+ def container_file_url(container_id, file_id)
25
+ "containers/#{container_id}/files/#{file_id}"
26
+ end
27
+
28
+ def container_file_content_url(container_id, file_id)
29
+ "containers/#{container_id}/files/#{file_id}/content"
30
+ end
31
+
32
+ # Build create container payload
33
+ # @param name [String, nil] Name for the container
34
+ # @param expires_after [Hash, nil] Expiry config, e.g. { anchor: 'last_active_at', minutes: 60 }
35
+ # @param file_ids [Array<String>, nil] Files to copy into the container
36
+ # @param memory_limit [String, nil] Memory limit: '1g', '4g', '16g', '64g'
37
+ # @return [Hash] Create container payload
38
+ def create_payload(name: nil, expires_after: nil, file_ids: nil, memory_limit: nil)
39
+ payload = {}
40
+ payload[:name] = name if name
41
+ payload[:expires_after] = expires_after if expires_after
42
+ payload[:file_ids] = file_ids if file_ids
43
+ payload[:memory_limit] = memory_limit if memory_limit
44
+ payload
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
@@ -21,7 +21,9 @@ module RubyLLM
21
21
  file_search: ->(vector_store_ids) { { type: 'file_search', vector_store_ids: vector_store_ids } },
22
22
  code_interpreter: { type: 'code_interpreter', container: { type: 'auto' } },
23
23
  image_generation: { type: 'image_generation' },
24
- computer_use: ->(opts) { { type: 'computer_use_preview', **opts } }
24
+ computer_use: ->(opts) { { type: 'computer_use_preview', **opts } },
25
+ shell: { type: 'shell', environment: { type: 'container_auto' } },
26
+ apply_patch: { type: 'apply_patch' }
25
27
  }.freeze
26
28
 
27
29
  def tool_for(tool)
@@ -187,6 +189,24 @@ module RubyLLM
187
189
  tool[:headers] = headers if headers
188
190
  tool
189
191
  end
192
+
193
+ def shell_tool(environment_type: 'container_auto', container_id: nil,
194
+ network_policy: nil, memory_limit: nil)
195
+ env = if container_id
196
+ { type: 'container_reference', container_id: container_id }
197
+ else
198
+ { type: environment_type }
199
+ end
200
+
201
+ env[:network_policy] = network_policy if network_policy
202
+ env[:memory_limit] = memory_limit if memory_limit
203
+
204
+ { type: 'shell', environment: env }
205
+ end
206
+
207
+ def apply_patch_tool
208
+ { type: 'apply_patch' }
209
+ end
190
210
  end
191
211
  end
192
212
  end
@@ -44,7 +44,7 @@ module RubyLLM
44
44
  # @param response_id [String] The response ID to delete
45
45
  # @return [Hash] The deletion result
46
46
  def delete_response(response_id)
47
- response = @connection.delete(Background.retrieve_url(response_id))
47
+ response = delete_request(Background.retrieve_url(response_id))
48
48
  response.body
49
49
  end
50
50
 
@@ -76,6 +76,77 @@ module RubyLLM
76
76
  end
77
77
  end
78
78
 
79
+ # --- Container Management ---
80
+
81
+ # Create a new container
82
+ # @param name [String, nil] Container name
83
+ # @param expires_after [Hash, nil] Expiry configuration
84
+ # @param file_ids [Array<String>, nil] File IDs to copy into container
85
+ # @param memory_limit [String, nil] Memory limit: '1g', '4g', '16g', '64g'
86
+ # @return [Hash] Created container data
87
+ def create_container(name: nil, expires_after: nil, file_ids: nil, memory_limit: nil)
88
+ payload = Containers.create_payload(
89
+ name: name, expires_after: expires_after,
90
+ file_ids: file_ids, memory_limit: memory_limit
91
+ )
92
+ response = @connection.post(Containers.containers_url, payload)
93
+ response.body
94
+ end
95
+
96
+ # Retrieve a container by ID
97
+ # @param container_id [String] The container ID
98
+ # @return [Hash] Container data
99
+ def retrieve_container(container_id)
100
+ response = @connection.get(Containers.container_url(container_id))
101
+ response.body
102
+ end
103
+
104
+ # Delete a container
105
+ # @param container_id [String] The container ID
106
+ # @return [Hash] Deletion result
107
+ def delete_container(container_id)
108
+ response = delete_request(Containers.container_url(container_id))
109
+ response.body
110
+ end
111
+
112
+ # List files in a container
113
+ # @param container_id [String] The container ID
114
+ # @return [Hash] File listing
115
+ def list_container_files(container_id)
116
+ response = @connection.get(Containers.container_files_url(container_id))
117
+ response.body
118
+ end
119
+
120
+ # Retrieve a specific file from a container
121
+ # @param container_id [String] The container ID
122
+ # @param file_id [String] The file ID
123
+ # @return [Hash] File metadata
124
+ def retrieve_container_file(container_id, file_id)
125
+ response = @connection.get(Containers.container_file_url(container_id, file_id))
126
+ response.body
127
+ end
128
+
129
+ # Get file content from a container
130
+ # @param container_id [String] The container ID
131
+ # @param file_id [String] The file ID
132
+ # @return [String] File content
133
+ def retrieve_container_file_content(container_id, file_id)
134
+ response = @connection.get(Containers.container_file_content_url(container_id, file_id))
135
+ response.body
136
+ end
137
+
138
+ private
139
+
140
+ # DELETE request via the underlying Faraday connection
141
+ # RubyLLM::Connection only exposes get/post, so we use Faraday directly
142
+ def delete_request(url)
143
+ @connection.connection.delete(url) do |req|
144
+ req.headers.merge!(headers)
145
+ end
146
+ end
147
+
148
+ public
149
+
79
150
  class << self
80
151
  def capabilities
81
152
  OpenAIResponses::Capabilities
@@ -17,6 +17,8 @@ require_relative 'ruby_llm/providers/openai_responses/chat'
17
17
  require_relative 'ruby_llm/providers/openai_responses/built_in_tools'
18
18
  require_relative 'ruby_llm/providers/openai_responses/state'
19
19
  require_relative 'ruby_llm/providers/openai_responses/background'
20
+ require_relative 'ruby_llm/providers/openai_responses/compaction'
21
+ require_relative 'ruby_llm/providers/openai_responses/containers'
20
22
  require_relative 'ruby_llm/providers/openai_responses/message_extension'
21
23
  require_relative 'ruby_llm/providers/openai_responses/model_registry'
22
24
  require_relative 'ruby_llm/providers/openai_responses/active_record_extension'
@@ -34,11 +36,13 @@ RubyLLM::Providers::OpenAIResponses::ModelRegistry.register_all!
34
36
  module RubyLLM
35
37
  # ResponsesAPI namespace for direct access to helpers and version
36
38
  module ResponsesAPI
37
- VERSION = '0.1.0'
39
+ VERSION = '0.3.0'
38
40
 
39
41
  # Shorthand access to built-in tool helpers
40
42
  BuiltInTools = Providers::OpenAIResponses::BuiltInTools
41
43
  State = Providers::OpenAIResponses::State
42
44
  Background = Providers::OpenAIResponses::Background
45
+ Compaction = Providers::OpenAIResponses::Compaction
46
+ Containers = Providers::OpenAIResponses::Containers
43
47
  end
44
48
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby_llm-responses_api
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Chris Hasinski
@@ -122,8 +122,9 @@ dependencies:
122
122
  - !ruby/object:Gem::Version
123
123
  version: '3.0'
124
124
  description: A RubyLLM provider that implements OpenAI's Responses API, providing
125
- access to built-in tools (web search, code interpreter, file search), stateful conversations,
126
- background mode, and MCP support.
125
+ access to built-in tools (web search, code interpreter, file search, shell, apply
126
+ patch), stateful conversations, server-side compaction, containers API, background
127
+ mode, and MCP support.
127
128
  email:
128
129
  - krzysztof.hasinski@gmail.com
129
130
  executables: []
@@ -141,6 +142,8 @@ files:
141
142
  - lib/ruby_llm/providers/openai_responses/built_in_tools.rb
142
143
  - lib/ruby_llm/providers/openai_responses/capabilities.rb
143
144
  - lib/ruby_llm/providers/openai_responses/chat.rb
145
+ - lib/ruby_llm/providers/openai_responses/compaction.rb
146
+ - lib/ruby_llm/providers/openai_responses/containers.rb
144
147
  - lib/ruby_llm/providers/openai_responses/media.rb
145
148
  - lib/ruby_llm/providers/openai_responses/message_extension.rb
146
149
  - lib/ruby_llm/providers/openai_responses/model_registry.rb