claude_swarm 1.0.8 → 1.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cb4d46c1724d7488d304465b03250af098e1db3c2f73005cdb48c1092920628f
4
- data.tar.gz: '031668b2d8abfcddf0fae98e14364259c7a8d66bbb201a67eba7a58063203a7a'
3
+ metadata.gz: 1b5d5d686096a33cf7a16511f728091f92e2bb33d9c5eeacdf430718a1a8548e
4
+ data.tar.gz: 63e3445e6279abc98b4202b582cfc48a4e17b610339241ba0872532a61174697
5
5
  SHA512:
6
- metadata.gz: b1187c0f7a3f73d65832e83236a4c17967835bd70764fd782111e4b80fe20ad61c9dd0ab7c4c3bec755df387e1221597695302a05c3044dca427b08c40da7a1a
7
- data.tar.gz: b5242fd046e72ced817feab59b628d8013c8b674ddee698b99f32a87936acc5a470d007a16dcdbea0a75656a238099380aa261022fa5e75be08b8c0b51e9d5a2
6
+ metadata.gz: 8521d7386607e3391915dfeb68f82ca9df8361566832724fe8e59357444137530a4b825e9c665f328a6157a0ee6a7202ad536409e59253d4f7fdb909482bc178
7
+ data.tar.gz: c34358c386fac205277cab662b3041330b5006878520f8c8ae265a316bc239e71d396e5ed4ea77c897bc7bf5b465eddf810cb227be13ba189cc27169496e6d78
data/CHANGELOG.md CHANGED
@@ -1,3 +1,11 @@
1
+ ## [1.0.9]
2
+
3
+ ### Added
4
+ - **Zero Data Retention (ZDR) support for OpenAI Responses API**: Added ZDR mode to disable conversation continuity for privacy-focused use cases
5
+ - New `zdr` configuration parameter (boolean) that can be set in YAML configuration files
6
+ - When enabled, sets `previous_response_id` to nil, ensuring each API call is independent
7
+ - Supported in Configuration, CLI (`--zdr` flag), and MCP generator
8
+
1
9
  ## [1.0.8]
2
10
 
3
11
  ### Changed
data/docs/v1/README.md CHANGED
@@ -372,6 +372,7 @@ When using `provider: openai`, the following additional fields are available:
372
372
  - **openai_token_env**: Environment variable name for OpenAI API key (default: "OPENAI_API_KEY")
373
373
  - **base_url**: Custom base URL for OpenAI API (optional)
374
374
  - **reasoning_effort**: Reasoning effort for O-series models only - "low", "medium", or "high"
375
+ - **zdr**: Zero Data Retention mode (boolean) - when set to `true`, disables conversation continuity by setting `previous_response_id` to nil (responses API only)
375
376
 
376
377
  **Important Notes:**
377
378
 
@@ -428,6 +429,15 @@ reasoning_instance:
428
429
  reasoning_effort: medium # Only for O-series models
429
430
  api_version: responses # Can use either API version
430
431
  prompt: "You are a reasoning assistant for complex problem solving"
432
+
433
+ # Instance with Zero Data Retention mode
434
+ zdr_instance:
435
+ description: "Privacy-focused assistant with no conversation memory"
436
+ provider: openai
437
+ model: gpt-4o
438
+ api_version: responses # ZDR only works with responses API
439
+ zdr: true # Disables conversation continuity
440
+ prompt: "You are a privacy-focused assistant that handles sensitive data"
431
441
  ```
432
442
 
433
443
  ### MCP Server Types
@@ -36,6 +36,7 @@ module ClaudeSwarm
36
36
  openai_token_env: instance_config[:openai_token_env],
37
37
  base_url: instance_config[:base_url],
38
38
  reasoning_effort: instance_config[:reasoning_effort],
39
+ zdr: instance_config[:zdr],
39
40
  )
40
41
  else
41
42
  # Default Claude behavior - always use SDK
@@ -172,6 +172,10 @@ module ClaudeSwarm
172
172
  method_option :reasoning_effort,
173
173
  type: :string,
174
174
  desc: "Reasoning effort for OpenAI models"
175
+ method_option :zdr,
176
+ type: :boolean,
177
+ default: false,
178
+ desc: "Enable ZDR for OpenAI models"
175
179
  def mcp_serve
176
180
  # Validate reasoning_effort if provided
177
181
  if options[:reasoning_effort]
@@ -208,6 +212,7 @@ module ClaudeSwarm
208
212
  openai_token_env: options[:openai_token_env],
209
213
  base_url: options[:base_url],
210
214
  reasoning_effort: options[:reasoning_effort],
215
+ zdr: options[:zdr],
211
216
  }
212
217
 
213
218
  begin
@@ -4,7 +4,7 @@ module ClaudeSwarm
4
4
  class Configuration
5
5
  # Frozen constants for validation
6
6
  VALID_PROVIDERS = ["claude", "openai"].freeze
7
- OPENAI_SPECIFIC_FIELDS = ["temperature", "api_version", "openai_token_env", "base_url", "reasoning_effort"].freeze
7
+ OPENAI_SPECIFIC_FIELDS = ["temperature", "api_version", "openai_token_env", "base_url", "reasoning_effort", "zdr"].freeze
8
8
  VALID_API_VERSIONS = ["chat_completion", "responses"].freeze
9
9
  VALID_REASONING_EFFORTS = ["low", "medium", "high"].freeze
10
10
 
@@ -232,6 +232,7 @@ module ClaudeSwarm
232
232
  instance_config[:openai_token_env] = config["openai_token_env"] || "OPENAI_API_KEY"
233
233
  instance_config[:base_url] = config["base_url"]
234
234
  instance_config[:reasoning_effort] = config["reasoning_effort"] if config["reasoning_effort"]
235
+ instance_config[:zdr] = config["zdr"] if config.key?("zdr")
235
236
  # Default vibe to true for OpenAI instances if not specified
236
237
  instance_config[:vibe] = true if config["vibe"].nil?
237
238
  elsif config["vibe"].nil?
@@ -174,6 +174,7 @@ module ClaudeSwarm
174
174
  args.push("--api-version", instance[:api_version]) if instance[:api_version]
175
175
  args.push("--openai-token-env", instance[:openai_token_env]) if instance[:openai_token_env]
176
176
  args.push("--base-url", instance[:base_url]) if instance[:base_url]
177
+ args.push("--zdr", instance[:zdr].to_s) if instance.key?(:zdr)
177
178
  end
178
179
  end
179
180
 
@@ -5,7 +5,7 @@ module ClaudeSwarm
5
5
  class ChatCompletion
6
6
  MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
7
7
 
8
- def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
8
+ def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil, zdr: false)
9
9
  @openai_client = openai_client
10
10
  @mcp_client = mcp_client
11
11
  @available_tools = available_tools
@@ -14,6 +14,7 @@ module ClaudeSwarm
14
14
  @model = model
15
15
  @temperature = temperature
16
16
  @reasoning_effort = reasoning_effort
17
+ @zdr = zdr # Not used in chat_completion API, but kept for compatibility
17
18
  @conversation_messages = []
18
19
  end
19
20
 
@@ -31,7 +31,7 @@ module ClaudeSwarm
31
31
  instance_name: nil, instance_id: nil, calling_instance: nil, calling_instance_id: nil,
32
32
  claude_session_id: nil, additional_directories: [], debug: false,
33
33
  temperature: nil, api_version: "chat_completion", openai_token_env: "OPENAI_API_KEY",
34
- base_url: nil, reasoning_effort: nil)
34
+ base_url: nil, reasoning_effort: nil, zdr: false)
35
35
  # Call parent initializer for common attributes
36
36
  super(
37
37
  working_directory: working_directory,
@@ -52,6 +52,7 @@ module ClaudeSwarm
52
52
  @api_version = api_version
53
53
  @base_url = base_url
54
54
  @reasoning_effort = reasoning_effort
55
+ @zdr = zdr
55
56
 
56
57
  # Conversation state for maintaining context
57
58
  @conversation_messages = []
@@ -162,6 +163,7 @@ module ClaudeSwarm
162
163
  model: @model,
163
164
  temperature: @temperature,
164
165
  reasoning_effort: @reasoning_effort,
166
+ zdr: @zdr,
165
167
  }
166
168
 
167
169
  if @api_version == "responses"
@@ -5,7 +5,7 @@ module ClaudeSwarm
5
5
  class Responses
6
6
  MAX_TURNS_WITH_TOOLS = 100_000 # virtually infinite
7
7
 
8
- def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil)
8
+ def initialize(openai_client:, mcp_client:, available_tools:, executor:, instance_name:, model:, temperature: nil, reasoning_effort: nil, zdr: false)
9
9
  @openai_client = openai_client
10
10
  @mcp_client = mcp_client
11
11
  @available_tools = available_tools
@@ -14,6 +14,7 @@ module ClaudeSwarm
14
14
  @model = model
15
15
  @temperature = temperature
16
16
  @reasoning_effort = reasoning_effort
17
+ @zdr = zdr
17
18
  @system_prompt = nil
18
19
  end
19
20
 
@@ -58,6 +59,7 @@ module ClaudeSwarm
58
59
  else
59
60
  input
60
61
  end
62
+ conversation_array << { role: "user", content: parameters[:input] }
61
63
  else
62
64
  # Follow-up call with conversation array (function calls + outputs)
63
65
  parameters[:input] = conversation_array
@@ -70,8 +72,8 @@ module ClaudeSwarm
70
72
  @executor.logger.info { "Conversation item IDs: #{conversation_ids.inspect}" }
71
73
  end
72
74
 
73
- # Add previous response ID for conversation continuity
74
- parameters[:previous_response_id] = previous_response_id if previous_response_id
75
+ # Add previous response ID for conversation continuity (unless zdr is enabled)
76
+ parameters[:previous_response_id] = @zdr ? nil : previous_response_id
75
77
 
76
78
  # Add tools if available
77
79
  if @available_tools&.any?
@@ -106,7 +108,7 @@ module ClaudeSwarm
106
108
  @executor.logger.error { "Request parameters: #{JsonHandler.pretty_generate!(parameters)}" }
107
109
 
108
110
  # Try to extract and log the response body for better debugging
109
- if e.respond_to?(:response)
111
+ if e.respond_to?(:response) && e.response
110
112
  begin
111
113
  error_body = e.response[:body]
112
114
  @executor.logger.error { "Error response body: #{error_body}" }
@@ -122,7 +124,7 @@ module ClaudeSwarm
122
124
  error: {
123
125
  class: e.class.to_s,
124
126
  message: e.message,
125
- response_body: e.respond_to?(:response) ? e.response[:body] : nil,
127
+ response_body: e.respond_to?(:response) && e.response ? e.response[:body] : nil,
126
128
  backtrace: e.backtrace.first(5),
127
129
  },
128
130
  })
@@ -146,33 +148,21 @@ module ClaudeSwarm
146
148
 
147
149
  # Handle response based on output structure
148
150
  output = response["output"]
149
-
150
151
  if output.nil?
151
152
  @executor.logger.error { "No output in response" }
152
153
  return "Error: No output in OpenAI response"
153
154
  end
154
155
 
155
156
  # Check if output is an array (as per documentation)
156
- if output.is_a?(Array) && !output.empty?
157
+ if output.is_a?(Array) && output.any?
158
+ new_conversation = conversation_array.dup
159
+ new_conversation.concat(output)
157
160
  # Check if there are function calls
158
161
  function_calls = output.select { |item| item["type"] == "function_call" }
159
-
160
162
  if function_calls.any?
161
- # Check if we already have a conversation going
162
- if conversation_array.empty?
163
- # First depth - build new conversation
164
- new_conversation = build_conversation_with_outputs(function_calls)
165
- else
166
- # Subsequent depth - append to existing conversation
167
- # Don't re-add function calls, just add the new ones and their outputs
168
- new_conversation = conversation_array.dup
169
- append_new_outputs(function_calls, new_conversation)
170
- end
171
-
172
- # Recursively process with updated conversation
163
+ append_new_outputs(function_calls, new_conversation)
173
164
  process_responses_api(nil, new_conversation, response_id, depth + 1)
174
165
  else
175
- # Look for text response
176
166
  extract_text_response(output)
177
167
  end
178
168
  else
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module ClaudeSwarm
4
- VERSION = "1.0.8"
4
+ VERSION = "1.0.9"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: claude_swarm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.8
4
+ version: 1.0.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - Paulo Arruda