diagram-to-iac 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,6 +21,7 @@ from pydantic import BaseModel, Field, field_validator
21
21
  from langchain_core.tools import tool
22
22
 
23
23
  from diagram_to_iac.core.memory import create_memory
24
+ from diagram_to_iac.core.config_loader import get_config, get_config_value
24
25
  from diagram_to_iac.tools.shell import get_shell_executor, ShellExecInput
25
26
 
26
27
 
@@ -147,20 +148,51 @@ class GitExecutor:
147
148
  config_path = os.path.join(base_dir, 'git_config.yaml')
148
149
  self.logger.debug(f"Default config path set to: {config_path}")
149
150
 
151
+ # Load configuration using centralized system with fallback to direct file loading
150
152
  try:
151
- with open(config_path, 'r') as f:
152
- self.config = yaml.safe_load(f)
153
- if self.config is None:
154
- self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
153
+ # Use centralized configuration loading with hierarchical merging
154
+ base_config = get_config()
155
+
156
+ # Load tool-specific config if provided
157
+ tool_config = {}
158
+ if config_path and os.path.exists(config_path):
159
+ with open(config_path, 'r') as f:
160
+ tool_config = yaml.safe_load(f) or {}
161
+
162
+ # Deep merge base config with tool-specific overrides
163
+ merged_config = self._deep_merge(base_config, tool_config)
164
+
165
+ # Ensure the config has the expected nested structure for backward compatibility
166
+ if 'git_executor' not in merged_config and merged_config:
167
+ # For now, assume the centralized config doesn't have git_executor nested properly
168
+ # Fall back to default config to maintain test compatibility
169
+ self.logger.warning("Centralized config doesn't have expected git_executor structure. Using defaults.")
155
170
  self._set_default_config()
156
171
  else:
157
- self.logger.info(f"Git configuration loaded successfully from {config_path}")
158
- except FileNotFoundError:
159
- self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
160
- self._set_default_config()
161
- except yaml.YAMLError as e:
162
- self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
163
- self._set_default_config()
172
+ self.config = merged_config
173
+ self.logger.info("Configuration loaded from centralized system")
174
+ except Exception as e:
175
+ self.logger.warning(f"Failed to load from centralized config: {e}. Falling back to direct file loading.")
176
+ # Fallback to direct file loading for backward compatibility
177
+ if config_path is None:
178
+ base_dir = os.path.dirname(os.path.abspath(__file__))
179
+ config_path = os.path.join(base_dir, 'git_config.yaml')
180
+ self.logger.debug(f"Default config path set to: {config_path}")
181
+
182
+ try:
183
+ with open(config_path, 'r') as f:
184
+ self.config = yaml.safe_load(f)
185
+ if self.config is None:
186
+ self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
187
+ self._set_default_config()
188
+ else:
189
+ self.logger.info(f"Git configuration loaded successfully from {config_path}")
190
+ except FileNotFoundError:
191
+ self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
192
+ self._set_default_config()
193
+ except yaml.YAMLError as e:
194
+ self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
195
+ self._set_default_config()
164
196
 
165
197
  # Initialize memory system following our pattern
166
198
  self.memory = create_memory(memory_type)
@@ -177,13 +209,13 @@ class GitExecutor:
177
209
  self.logger.info(f"Auth failure patterns: {len(git_config.get('auth_failure_patterns', []))}")
178
210
 
179
211
  def _set_default_config(self):
180
- """Set default configuration following our established pattern."""
212
+ """Set default configuration using centralized system."""
181
213
  self.logger.info("Setting default configuration for GitExecutor.")
182
214
  self.config = {
183
215
  'git_executor': {
184
- 'default_workspace': '/workspace',
185
- 'default_clone_depth': 1,
186
- 'default_timeout': 300,
216
+ 'default_workspace': get_config_value("system.workspace_base", '/workspace'),
217
+ 'default_clone_depth': get_config_value("tools.git.default_clone_depth", 1),
218
+ 'default_timeout': get_config_value("network.github_timeout", 300),
187
219
  'auth_failure_patterns': [
188
220
  'Authentication failed',
189
221
  'Permission denied',
@@ -194,9 +226,9 @@ class GitExecutor:
194
226
  'Please make sure you have the correct access rights'
195
227
  ],
196
228
  'repo_path_template': '{workspace}/{repo_name}',
197
- 'sanitize_repo_names': True,
198
- 'enable_detailed_logging': True,
199
- 'store_operations_in_memory': True
229
+ 'sanitize_repo_names': get_config_value("tools.git.sanitize_repo_names", True),
230
+ 'enable_detailed_logging': get_config_value("tools.git.enable_detailed_logging", True),
231
+ 'store_operations_in_memory': get_config_value("tools.git.store_operations_in_memory", True)
200
232
  },
201
233
  'error_messages': {
202
234
  'invalid_repo_url': "Git executor: Invalid repository URL '{repo_url}'",
@@ -221,6 +253,25 @@ class GitExecutor:
221
253
  }
222
254
  }
223
255
 
256
+ def _deep_merge(self, base: dict, overlay: dict) -> dict:
257
+ """
258
+ Deep merge two dictionaries, with overlay taking precedence.
259
+
260
+ Args:
261
+ base: Base dictionary
262
+ overlay: Dictionary to overlay on base
263
+
264
+ Returns:
265
+ Merged dictionary
266
+ """
267
+ result = base.copy()
268
+ for key, value in overlay.items():
269
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
270
+ result[key] = self._deep_merge(result[key], value)
271
+ else:
272
+ result[key] = value
273
+ return result
274
+
224
275
  def _extract_repo_name(self, repo_url: str) -> str:
225
276
  """Extract repository name from URL following our pattern."""
226
277
  try:
@@ -1,66 +1,279 @@
1
- # src/diagram_to_iac/tools/secrets.py
1
+ # src/diagram_to_iac/tools/sec_utils.py
2
2
 
3
3
  """
4
- Decode /run/secrets.yaml (Base64‑encoded values) into real env vars.
4
+ Load and decode secrets from environment variables or secrets.yaml file.
5
5
 
6
- Mount it into the container with:
7
- docker run -v "$PWD/config/secrets.yaml":/run/secrets.yaml:ro
6
+ For GitHub Actions (.github/actions/r2d/Dockerfile):
7
+ - Secrets are expected to be provided as base64-encoded environment variables
8
+ - Will halt execution if required secrets are missing or empty
9
+
10
+ For dev containers (docker/dev/Dockerfile):
11
+ - First tries to load from environment variables
12
+ - Falls back to /run/secrets.yaml file if env vars not present
13
+ - All values (env and file) are expected to be base64 encoded
8
14
 
9
- If the file is missing (e.g. CI injects GitHub Secrets directly),
10
- load_yaml_secrets() is a no‑op.
15
+ Mount secrets.yaml into dev container with:
16
+ docker run -v "$PWD/config/secrets.yaml":/run/secrets.yaml:ro …
11
17
  """
12
18
 
13
19
  import os
20
+ import sys
14
21
  import base64
15
- import yaml
16
22
  import pathlib
17
23
  import binascii
18
24
 
19
- # Path inside container where the encoded YAML is mounted
25
+ # Import yaml with fallback
26
+ try:
27
+ import yaml
28
+ except ImportError:
29
+ yaml = None
30
+
31
+ # Import typing with fallback
32
+ try:
33
+ from typing import Dict, List, Optional
34
+ except ImportError:
35
+ pass
36
+
37
+ # Path inside container where the encoded YAML is mounted (dev only)
20
38
  _YAML_PATH = pathlib.Path("/run/secrets.yaml")
21
39
 
40
+ # Expected secrets based on secrets_example.yaml
41
+ EXPECTED_SECRETS = [
42
+ "DOCKERHUB_API_KEY",
43
+ "DOCKERHUB_USERNAME",
44
+ "TF_API_KEY",
45
+ "PYPI_API_KEY",
46
+ "OPENAI_API_KEY",
47
+ "GOOGLE_API_KEY",
48
+ "ANTHROPIC_API_KEY",
49
+ "GROK_API_KEY",
50
+ "REPO_API_KEY"
51
+ ]
52
+
53
+ # Required secrets that must be present (others are optional)
54
+ REQUIRED_SECRETS = [
55
+ "REPO_API_KEY" # GITHUB_TOKEN is required for repo operations
56
+ ]
57
+
58
+ # Optional AI API secrets (at least one should be present for AI functionality)
59
+ AI_API_SECRETS = [
60
+ "OPENAI_API_KEY",
61
+ "GOOGLE_API_KEY",
62
+ "ANTHROPIC_API_KEY",
63
+ "GROK_API_KEY"
64
+ ]
65
+
66
+ # Map internal secret names to environment variable names
67
+ SECRET_ENV_MAPPING = {
68
+ "REPO_API_KEY": "GITHUB_TOKEN",
69
+ "TF_API_KEY": "TFE_TOKEN",
70
+ "DOCKERHUB_API_KEY": "DOCKERHUB_API_KEY",
71
+ "DOCKERHUB_USERNAME": "DOCKERHUB_USERNAME",
72
+ "PYPI_API_KEY": "PYPI_API_KEY",
73
+ "OPENAI_API_KEY": "OPENAI_API_KEY",
74
+ "GOOGLE_API_KEY": "GOOGLE_API_KEY",
75
+ "ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY",
76
+ "GROK_API_KEY": "GROK_API_KEY"
77
+ }
78
+
22
79
 
23
80
  def _decode_b64(enc: str) -> str:
24
81
  """Robust Base64 decode: fixes padding, falls back if invalid."""
25
82
  enc = enc.strip()
26
83
  if not enc:
27
84
  return ""
28
- # Fix missing padding
29
- enc += "=" * (-len(enc) % 4)
85
+
86
+ # Fix missing padding - add only the minimum required
87
+ padding_needed = 4 - (len(enc) % 4)
88
+ if padding_needed != 4: # 4 means no padding needed
89
+ enc += "=" * padding_needed
90
+
30
91
  try:
31
- return base64.b64decode(enc).decode("utf-8").strip()
92
+ decoded = base64.b64decode(enc).decode("utf-8").strip()
93
+ # Strip any base64 padding artifacts that might cause token corruption
94
+ decoded = decoded.rstrip('=')
95
+ return decoded
32
96
  except (binascii.Error, UnicodeDecodeError):
33
- # If it isnt valid Base64, return the raw string
97
+ # If it isn't valid Base64, return the raw string
34
98
  return enc
35
99
 
36
100
 
37
- def load_yaml_secrets() -> None:
38
- """
39
- Read /run/secrets.yaml, decode each *_ENCODED value, and export
40
- as environment variables. Special‑case REPO_TOKEN → GITHUB_TOKEN.
41
- Safe to call when the file does not exist.
42
- """
101
+ def _is_dev_environment() -> bool:
102
+ """Check if running in dev environment by looking for dev-specific indicators."""
103
+ return (
104
+ _YAML_PATH.exists() or
105
+ os.environ.get("ENVIRONMENT") == "dev" or
106
+ os.path.exists("/workspace/docker/dev")
107
+ )
108
+
109
+
110
+ def _get_env_secrets() -> Dict[str, Optional[str]]:
111
+ """Get secrets from environment variables."""
112
+ env_secrets = {}
113
+ for secret_key in EXPECTED_SECRETS:
114
+ env_name = SECRET_ENV_MAPPING.get(secret_key, secret_key)
115
+ raw_value = os.environ.get(env_name)
116
+ if raw_value:
117
+ # Check if value is already decoded, use as-is; otherwise decode it
118
+ if (secret_key == "TF_API_KEY" and ".atlasv1." in raw_value) or \
119
+ (secret_key == "REPO_API_KEY" and raw_value.startswith("ghp_")) or \
120
+ (secret_key == "OPENAI_API_KEY" and raw_value.startswith("sk-")) or \
121
+ (secret_key == "ANTHROPIC_API_KEY" and raw_value.startswith("sk-ant-")) or \
122
+ (secret_key == "GOOGLE_API_KEY" and not "=" in raw_value and len(raw_value) < 100) or \
123
+ (secret_key in ["DOCKERHUB_USERNAME"] and not "=" in raw_value):
124
+ env_secrets[secret_key] = raw_value
125
+ else:
126
+ env_secrets[secret_key] = _decode_b64(raw_value)
127
+ else:
128
+ env_secrets[secret_key] = None
129
+ return env_secrets
130
+
131
+
132
+ def _load_secrets_from_file() -> Dict[str, str]:
133
+ """Load secrets from secrets.yaml file (dev environment only)."""
43
134
  if not _YAML_PATH.exists():
44
- return
135
+ return {}
136
+
137
+ try:
138
+ data: Dict[str, str] = yaml.safe_load(_YAML_PATH.read_text()) or {}
139
+ return {k: v for k, v in data.items() if v}
140
+ except Exception as e:
141
+ print(f"❌ Error reading secrets file {_YAML_PATH}: {e}")
142
+ return {}
143
+
45
144
 
46
- data: dict[str, str] = yaml.safe_load(_YAML_PATH.read_text()) or {}
47
- for key, encoded in data.items():
48
- if not encoded:
145
+ def _validate_and_set_secrets(secrets: Dict[str, str], source: str = "environment") -> None:
146
+ """Validate secrets and set them as environment variables."""
147
+ missing_required = []
148
+ empty_secrets = []
149
+ loaded_secrets = []
150
+ ai_secrets_available = 0
151
+
152
+ for secret_key in EXPECTED_SECRETS:
153
+ secret_value = secrets.get(secret_key)
154
+ env_name = SECRET_ENV_MAPPING.get(secret_key, secret_key)
155
+
156
+ if secret_value is None:
157
+ if secret_key in REQUIRED_SECRETS:
158
+ missing_required.append(secret_key)
49
159
  continue
160
+
161
+ if not secret_value.strip():
162
+ empty_secrets.append(secret_key)
163
+ continue
164
+
165
+ # Set environment variable (decode only if from file, not if already from env)
166
+ try:
167
+ if source == "file":
168
+ # File values need to be decoded
169
+ decoded_value = _decode_b64(secret_value)
170
+ else:
171
+ # Environment values are already decoded
172
+ decoded_value = secret_value
173
+
174
+ if decoded_value:
175
+ os.environ[env_name] = decoded_value
176
+ loaded_secrets.append(env_name)
177
+ if secret_key in AI_API_SECRETS:
178
+ ai_secrets_available += 1
179
+ print(f"✅ {env_name}: loaded from {source}")
180
+ else:
181
+ empty_secrets.append(secret_key)
182
+ except Exception as e:
183
+ print(f"❌ Error processing {secret_key}: {e}")
184
+ empty_secrets.append(secret_key)
185
+
186
+ # Check for critical errors
187
+ critical_errors = []
188
+
189
+ # Required secrets must be present
190
+ if missing_required:
191
+ critical_errors.append(f"Missing required secrets: {', '.join(missing_required)}")
192
+
193
+ # At least one AI API key should be available for full functionality
194
+ if ai_secrets_available == 0:
195
+ available_ai_keys = [SECRET_ENV_MAPPING[k] for k in AI_API_SECRETS if k in secrets]
196
+ if not available_ai_keys:
197
+ critical_errors.append("No AI API keys available. At least one is recommended for full functionality.")
198
+
199
+ # Handle empty secrets (warn but don't fail)
200
+ if empty_secrets:
201
+ print(f"⚠️ Warning: Empty secrets found: {', '.join(empty_secrets)}")
202
+
203
+ # Print summary
204
+ if loaded_secrets:
205
+ print(f"✅ Successfully loaded {len(loaded_secrets)} secrets from {source}")
206
+ if ai_secrets_available > 0:
207
+ print(f"🤖 AI capabilities enabled ({ai_secrets_available} API key(s) configured)")
208
+
209
+ # Handle critical errors
210
+ if critical_errors:
211
+ error_msg = "🔐 Secret validation failed:\n"
212
+ for error in critical_errors:
213
+ error_msg += f"❌ {error}\n"
214
+
215
+ if _is_dev_environment():
216
+ error_msg += "\n💡 For dev environment:\n"
217
+ error_msg += f" - Ensure {_YAML_PATH} exists with base64-encoded values\n"
218
+ error_msg += f" - Or set environment variables: {', '.join(SECRET_ENV_MAPPING.values())}\n"
219
+ else:
220
+ error_msg += "\n💡 For GitHub Actions:\n"
221
+ error_msg += " - Ensure all required secrets are configured in GitHub repository settings\n"
222
+ error_msg += " - Secrets should be base64-encoded\n"
223
+
224
+ print(error_msg)
225
+ sys.exit(1)
50
226
 
51
- # Strip the "_ENCODED" suffix
52
- # base_name = key.removesuffix("_ENCODED")
53
227
 
54
- # Map specific keys to their expected environment variable names
55
- if key == "REPO_API_KEY":
56
- env_name = "GITHUB_TOKEN"
57
- elif key == "TF_API_KEY":
58
- # env_name = "TF_TOKEN_APP_TERRAFORM_IO"
59
- env_name = "TFE_TOKEN"
228
+ def load_secrets() -> None:
229
+ """
230
+ Load and validate secrets from environment variables or secrets.yaml file.
231
+
232
+ Workflow:
233
+ 1. Check if secrets are available in environment variables
234
+ 2. If any env secrets exist but are empty, halt execution with error
235
+ 3. If no env secrets present and in dev environment, try loading from file
236
+ 4. Validate all secrets are present and non-empty
237
+ 5. Decode base64 values and set as environment variables
238
+
239
+ Exits with error code 1 if secrets are missing or invalid.
240
+ """
241
+ print("🔐 Loading and validating secrets...")
242
+
243
+ # First, check environment variables
244
+ env_secrets = _get_env_secrets()
245
+ env_secrets_present = any(v is not None for v in env_secrets.values())
246
+
247
+ if env_secrets_present:
248
+ # Environment variables are present, validate them
249
+ print("🔍 Found secrets in environment variables")
250
+ valid_env_secrets = {k: v for k, v in env_secrets.items() if v is not None}
251
+ _validate_and_set_secrets(valid_env_secrets, "environment")
252
+ return
253
+
254
+ # No environment secrets found
255
+ if _is_dev_environment():
256
+ print("🔍 No environment secrets found, checking secrets file...")
257
+ file_secrets = _load_secrets_from_file()
258
+ if file_secrets:
259
+ print(f"📁 Loading secrets from {_YAML_PATH}")
260
+ _validate_and_set_secrets(file_secrets, "file")
261
+ return
60
262
  else:
61
- env_name = key
263
+ print(f"❌ No secrets found in {_YAML_PATH}")
264
+
265
+ # No secrets available anywhere
266
+ error_msg = "🔐 No secrets available!\n"
267
+ if _is_dev_environment():
268
+ error_msg += f"💡 For dev environment, provide secrets via:\n"
269
+ error_msg += f" - Environment variables: {', '.join(SECRET_ENV_MAPPING.values())}\n"
270
+ error_msg += f" - Or mount secrets file to: {_YAML_PATH}\n"
271
+ else:
272
+ error_msg += "💡 For GitHub Actions, configure repository secrets\n"
273
+
274
+ print(error_msg)
275
+ sys.exit(1)
276
+
62
277
 
63
- # Decode and export
64
- plain_value = _decode_b64(str(encoded))
65
- os.environ[env_name] = plain_value
66
- # print(f"Decoded {env_name}={plain_value}")
278
+ # Backward compatibility alias
279
+ load_yaml_secrets = load_secrets
@@ -10,6 +10,7 @@ from pydantic import BaseModel, Field
10
10
  from langchain_core.tools import tool
11
11
 
12
12
  from diagram_to_iac.core.memory import create_memory
13
+ from diagram_to_iac.core.config_loader import get_config, get_config_value
13
14
 
14
15
 
15
16
  # --- Pydantic Schemas for Tool Inputs ---
@@ -60,51 +61,79 @@ class ShellExecutor:
60
61
  datefmt='%Y-%m-%d %H:%M:%S'
61
62
  )
62
63
 
63
- # Load configuration following our pattern
64
- if config_path is None:
65
- base_dir = os.path.dirname(os.path.abspath(__file__))
66
- config_path = os.path.join(base_dir, 'shell_config.yaml')
67
- self.logger.debug(f"Default config path set to: {config_path}")
68
-
64
+ # Load configuration using centralized system with fallback to direct file loading
69
65
  try:
70
- with open(config_path, 'r') as f:
71
- self.config = yaml.safe_load(f)
72
- if self.config is None:
73
- self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
66
+ # Use centralized configuration loading
67
+ base_config = get_config()
68
+ shell_config = base_config.get('tools', {}).get('shell', {})
69
+
70
+ # Load tool-specific config if provided
71
+ tool_config = {}
72
+ if config_path and os.path.exists(config_path):
73
+ with open(config_path, 'r') as f:
74
+ tool_config = yaml.safe_load(f) or {}
75
+
76
+ # Merge configurations (tool config overrides base config)
77
+ merged_config = self._deep_merge(shell_config, tool_config)
78
+
79
+ if not merged_config:
80
+ self.logger.warning("No shell configuration found in centralized system. Using defaults.")
74
81
  self._set_default_config()
75
82
  else:
76
- self.logger.info(f"Configuration loaded successfully from {config_path}")
77
- except FileNotFoundError:
78
- self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
79
- self._set_default_config()
80
- except yaml.YAMLError as e:
81
- self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
82
- self._set_default_config()
83
+ # Ensure the config has the expected nested structure for backward compatibility
84
+ if 'shell_executor' not in merged_config and merged_config:
85
+ # Wrap flat config in expected nested structure
86
+ self.config = {'shell_executor': merged_config}
87
+ else:
88
+ self.config = merged_config
89
+ self.logger.info("Configuration loaded from centralized system")
90
+ except Exception as e:
91
+ self.logger.warning(f"Failed to load from centralized config: {e}. Falling back to direct file loading.")
92
+ # Fallback to direct file loading for backward compatibility
93
+ if config_path is None:
94
+ base_dir = os.path.dirname(os.path.abspath(__file__))
95
+ config_path = os.path.join(base_dir, 'shell_config.yaml')
96
+ self.logger.debug(f"Default config path set to: {config_path}")
97
+
98
+ try:
99
+ with open(config_path, 'r') as f:
100
+ self.config = yaml.safe_load(f)
101
+ if self.config is None:
102
+ self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
103
+ self._set_default_config()
104
+ else:
105
+ self.logger.info(f"Configuration loaded successfully from {config_path}")
106
+ except FileNotFoundError:
107
+ self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
108
+ self._set_default_config()
109
+ except yaml.YAMLError as e:
110
+ self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
111
+ self._set_default_config()
83
112
 
84
113
  # Initialize memory system following our pattern
85
114
  self.memory = create_memory(memory_type)
86
115
  self.logger.info(f"Shell executor memory system initialized: {type(self.memory).__name__}")
87
116
 
88
117
  # Log configuration summary
89
- shell_config = self.config.get('shell_executor', {})
118
+ shell_config = self.config.get('shell_executor', {}) or self.config # Support both formats
90
119
  self.logger.info(f"Shell executor initialized with allowed binaries: {shell_config.get('allowed_binaries', [])}")
91
120
  self.logger.info(f"Workspace base: {shell_config.get('workspace_base', '/workspace')}")
92
121
  self.logger.info(f"Default timeout: {shell_config.get('default_timeout', 30)}s")
93
122
 
94
123
  def _set_default_config(self):
95
- """Set default configuration following our established pattern."""
124
+ """Set default configuration using centralized system."""
96
125
  self.logger.info("Setting default configuration for ShellExecutor.")
97
126
  self.config = {
98
127
  'shell_executor': {
99
- 'allowed_binaries': ['git', 'bash', 'sh', 'gh', 'ls'],
100
- 'default_timeout': 30,
101
- 'max_output_size': 8192,
102
- 'workspace_base': '/workspace',
103
- 'allow_relative_paths': True,
104
- 'restrict_to_workspace': True,
105
- 'enable_detailed_logging': True,
106
- 'log_command_execution': True,
107
- 'log_output_truncation': True
128
+ 'allowed_binaries': get_config_value("tools.shell.allowed_binaries", ['git', 'bash', 'sh', 'gh', 'ls']),
129
+ 'default_timeout': get_config_value("network.shell_timeout", 30),
130
+ 'max_output_size': get_config_value("tools.shell.max_output_size", 8192),
131
+ 'workspace_base': get_config_value("system.workspace_base", '/workspace'),
132
+ 'allow_relative_paths': get_config_value("tools.shell.allow_relative_paths", True),
133
+ 'restrict_to_workspace': get_config_value("tools.shell.restrict_to_workspace", True),
134
+ 'enable_detailed_logging': get_config_value("tools.shell.enable_detailed_logging", True),
135
+ 'log_command_execution': get_config_value("tools.shell.log_command_execution", True),
136
+ 'log_output_truncation': get_config_value("tools.shell.log_output_truncation", True)
108
137
  },
109
138
  'error_messages': {
110
139
  'binary_not_allowed': "Shell executor: Binary '{binary}' is not allowed.",
@@ -119,6 +148,25 @@ class ShellExecutor:
119
148
  }
120
149
  }
121
150
 
151
+ def _deep_merge(self, base: dict, overlay: dict) -> dict:
152
+ """
153
+ Deep merge two dictionaries, with overlay taking precedence.
154
+
155
+ Args:
156
+ base: Base dictionary
157
+ overlay: Dictionary to overlay on base
158
+
159
+ Returns:
160
+ Merged dictionary
161
+ """
162
+ result = base.copy()
163
+ for key, value in overlay.items():
164
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
165
+ result[key] = self._deep_merge(result[key], value)
166
+ else:
167
+ result[key] = value
168
+ return result
169
+
122
170
  def _validate_binary(self, command: str) -> None:
123
171
  """Validate that the command uses an allowed binary."""
124
172
  try:
@@ -128,10 +176,19 @@ class ShellExecutor:
128
176
  raise ValueError("Empty command provided")
129
177
 
130
178
  binary = cmd_parts[0]
131
- allowed_binaries = self.config.get('shell_executor', {}).get('allowed_binaries', [])
179
+ # Try both centralized config format and legacy format
180
+ allowed_binaries = (
181
+ self.config.get('allowed_binaries', []) or # Centralized format
182
+ self.config.get('shell_executor', {}).get('allowed_binaries', []) # Legacy format
183
+ )
132
184
 
133
185
  if binary not in allowed_binaries:
134
- error_msg = self.config.get('error_messages', {}).get(
186
+ # Try both centralized config format and legacy format for error messages
187
+ error_messages = (
188
+ self.config.get('error_messages', {}) or
189
+ self.config.get('shell_executor', {}).get('error_messages', {})
190
+ )
191
+ error_msg = error_messages.get(
135
192
  'binary_not_allowed',
136
193
  "Shell executor: Binary '{binary}' is not in the allowed list."
137
194
  ).format(binary=binary)
@@ -146,7 +203,7 @@ class ShellExecutor:
146
203
 
147
204
  def _validate_workspace_path(self, cwd: Optional[str]) -> str:
148
205
  """Validate and resolve the working directory path."""
149
- shell_config = self.config.get('shell_executor', {})
206
+ shell_config = self.config.get('shell_executor', {}) or self.config # Support both formats
150
207
  workspace_base = shell_config.get('workspace_base', '/workspace')
151
208
 
152
209
  if cwd is None:
@@ -213,7 +270,7 @@ class ShellExecutor:
213
270
  resolved_cwd = self._validate_workspace_path(shell_input.cwd)
214
271
 
215
272
  # Get timeout from input or config
216
- shell_config = self.config.get('shell_executor', {})
273
+ shell_config = self.config.get('shell_executor', {}) or self.config # Support both formats
217
274
  timeout = shell_input.timeout or shell_config.get('default_timeout', 30)
218
275
  max_output_size = shell_config.get('max_output_size', 8192)
219
276