diagram-to-iac 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diagram_to_iac/__init__.py +10 -0
- diagram_to_iac/actions/__init__.py +7 -0
- diagram_to_iac/actions/git_entry.py +174 -0
- diagram_to_iac/actions/supervisor_entry.py +116 -0
- diagram_to_iac/actions/terraform_agent_entry.py +207 -0
- diagram_to_iac/agents/__init__.py +26 -0
- diagram_to_iac/agents/demonstrator_langgraph/__init__.py +10 -0
- diagram_to_iac/agents/demonstrator_langgraph/agent.py +826 -0
- diagram_to_iac/agents/git_langgraph/__init__.py +10 -0
- diagram_to_iac/agents/git_langgraph/agent.py +1018 -0
- diagram_to_iac/agents/git_langgraph/pr.py +146 -0
- diagram_to_iac/agents/hello_langgraph/__init__.py +9 -0
- diagram_to_iac/agents/hello_langgraph/agent.py +621 -0
- diagram_to_iac/agents/policy_agent/__init__.py +15 -0
- diagram_to_iac/agents/policy_agent/agent.py +507 -0
- diagram_to_iac/agents/policy_agent/integration_example.py +191 -0
- diagram_to_iac/agents/policy_agent/tools/__init__.py +14 -0
- diagram_to_iac/agents/policy_agent/tools/tfsec_tool.py +259 -0
- diagram_to_iac/agents/shell_langgraph/__init__.py +21 -0
- diagram_to_iac/agents/shell_langgraph/agent.py +122 -0
- diagram_to_iac/agents/shell_langgraph/detector.py +50 -0
- diagram_to_iac/agents/supervisor_langgraph/__init__.py +17 -0
- diagram_to_iac/agents/supervisor_langgraph/agent.py +1947 -0
- diagram_to_iac/agents/supervisor_langgraph/demonstrator.py +22 -0
- diagram_to_iac/agents/supervisor_langgraph/guards.py +23 -0
- diagram_to_iac/agents/supervisor_langgraph/pat_loop.py +49 -0
- diagram_to_iac/agents/supervisor_langgraph/router.py +9 -0
- diagram_to_iac/agents/terraform_langgraph/__init__.py +15 -0
- diagram_to_iac/agents/terraform_langgraph/agent.py +1216 -0
- diagram_to_iac/agents/terraform_langgraph/parser.py +76 -0
- diagram_to_iac/core/__init__.py +7 -0
- diagram_to_iac/core/agent_base.py +19 -0
- diagram_to_iac/core/enhanced_memory.py +302 -0
- diagram_to_iac/core/errors.py +4 -0
- diagram_to_iac/core/issue_tracker.py +49 -0
- diagram_to_iac/core/memory.py +132 -0
- diagram_to_iac/r2d.py +345 -13
- diagram_to_iac/services/__init__.py +10 -0
- diagram_to_iac/services/observability.py +59 -0
- diagram_to_iac/services/step_summary.py +77 -0
- diagram_to_iac/tools/__init__.py +11 -0
- diagram_to_iac/tools/api_utils.py +108 -26
- diagram_to_iac/tools/git/__init__.py +45 -0
- diagram_to_iac/tools/git/git.py +956 -0
- diagram_to_iac/tools/hello/__init__.py +30 -0
- diagram_to_iac/tools/hello/cal_utils.py +31 -0
- diagram_to_iac/tools/hello/text_utils.py +97 -0
- diagram_to_iac/tools/llm_utils/__init__.py +20 -0
- diagram_to_iac/tools/llm_utils/anthropic_driver.py +87 -0
- diagram_to_iac/tools/llm_utils/base_driver.py +90 -0
- diagram_to_iac/tools/llm_utils/gemini_driver.py +89 -0
- diagram_to_iac/tools/llm_utils/openai_driver.py +93 -0
- diagram_to_iac/tools/llm_utils/router.py +303 -0
- diagram_to_iac/tools/sec_utils.py +4 -2
- diagram_to_iac/tools/shell/__init__.py +17 -0
- diagram_to_iac/tools/shell/shell.py +415 -0
- diagram_to_iac/tools/text_utils.py +277 -0
- diagram_to_iac/tools/tf/terraform.py +851 -0
- diagram_to_iac-0.9.0.dist-info/METADATA +256 -0
- diagram_to_iac-0.9.0.dist-info/RECORD +64 -0
- {diagram_to_iac-0.7.0.dist-info → diagram_to_iac-0.9.0.dist-info}/WHEEL +1 -1
- diagram_to_iac-0.9.0.dist-info/entry_points.txt +6 -0
- diagram_to_iac/agents/codegen_agent.py +0 -0
- diagram_to_iac/agents/consensus_agent.py +0 -0
- diagram_to_iac/agents/deployment_agent.py +0 -0
- diagram_to_iac/agents/github_agent.py +0 -0
- diagram_to_iac/agents/interpretation_agent.py +0 -0
- diagram_to_iac/agents/question_agent.py +0 -0
- diagram_to_iac/agents/supervisor.py +0 -0
- diagram_to_iac/agents/vision_agent.py +0 -0
- diagram_to_iac/core/config.py +0 -0
- diagram_to_iac/tools/cv_utils.py +0 -0
- diagram_to_iac/tools/gh_utils.py +0 -0
- diagram_to_iac/tools/tf_utils.py +0 -0
- diagram_to_iac-0.7.0.dist-info/METADATA +0 -16
- diagram_to_iac-0.7.0.dist-info/RECORD +0 -32
- diagram_to_iac-0.7.0.dist-info/entry_points.txt +0 -2
- {diagram_to_iac-0.7.0.dist-info → diagram_to_iac-0.9.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,415 @@
|
|
1
|
+
import os
|
2
|
+
import shlex
|
3
|
+
import subprocess
|
4
|
+
import time
|
5
|
+
import logging
|
6
|
+
import yaml
|
7
|
+
from typing import Optional, Dict, Any, List
|
8
|
+
from pathlib import Path
|
9
|
+
from pydantic import BaseModel, Field
|
10
|
+
from langchain_core.tools import tool
|
11
|
+
|
12
|
+
from diagram_to_iac.core.memory import create_memory
|
13
|
+
|
14
|
+
|
15
|
+
# --- Pydantic Schemas for Tool Inputs ---
|
16
|
+
class ShellExecInput(BaseModel):
|
17
|
+
"""Input schema for shell command execution following our established pattern."""
|
18
|
+
command: str = Field(..., description="Shell command to execute")
|
19
|
+
cwd: Optional[str] = Field(None, description="Working directory for command execution")
|
20
|
+
timeout: Optional[int] = Field(None, description="Timeout in seconds (overrides config default)")
|
21
|
+
env_vars: Optional[Dict[str, str]] = Field(None, description="Additional environment variables to set")
|
22
|
+
|
23
|
+
|
24
|
+
class ShellExecOutput(BaseModel):
|
25
|
+
"""Output schema for shell command execution following our established pattern."""
|
26
|
+
output: str = Field(..., description="Combined stdout and stderr output")
|
27
|
+
exit_code: int = Field(..., description="Process exit code")
|
28
|
+
command: str = Field(..., description="Executed command")
|
29
|
+
duration: float = Field(..., description="Execution time in seconds")
|
30
|
+
cwd: str = Field(..., description="Working directory used")
|
31
|
+
truncated: bool = Field(False, description="Whether output was truncated")
|
32
|
+
|
33
|
+
|
34
|
+
class ShellExecutor:
|
35
|
+
"""
|
36
|
+
ShellExecutor provides safe shell command execution following our established pattern.
|
37
|
+
|
38
|
+
Features:
|
39
|
+
- Configuration-driven security (allowed binaries, workspace restrictions)
|
40
|
+
- Comprehensive logging with structured messages
|
41
|
+
- Memory integration for operation tracking
|
42
|
+
- Robust error handling with graceful fallbacks
|
43
|
+
- Timeout protection and output size limits
|
44
|
+
"""
|
45
|
+
|
46
|
+
def __init__(self, config_path: str = None, memory_type: str = "persistent"):
|
47
|
+
"""
|
48
|
+
Initialize ShellExecutor following our established pattern.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
config_path: Optional path to shell tools configuration file
|
52
|
+
memory_type: Type of memory to use ("persistent", "memory", or "langgraph")
|
53
|
+
"""
|
54
|
+
# Configure logger following our pattern
|
55
|
+
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
56
|
+
if not logging.getLogger().hasHandlers():
|
57
|
+
logging.basicConfig(
|
58
|
+
level=logging.INFO,
|
59
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(threadName)s - %(message)s',
|
60
|
+
datefmt='%Y-%m-%d %H:%M:%S'
|
61
|
+
)
|
62
|
+
|
63
|
+
# Load configuration following our pattern
|
64
|
+
if config_path is None:
|
65
|
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
66
|
+
config_path = os.path.join(base_dir, 'shell_config.yaml')
|
67
|
+
self.logger.debug(f"Default config path set to: {config_path}")
|
68
|
+
|
69
|
+
try:
|
70
|
+
with open(config_path, 'r') as f:
|
71
|
+
self.config = yaml.safe_load(f)
|
72
|
+
if self.config is None:
|
73
|
+
self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
|
74
|
+
self._set_default_config()
|
75
|
+
else:
|
76
|
+
self.logger.info(f"Configuration loaded successfully from {config_path}")
|
77
|
+
except FileNotFoundError:
|
78
|
+
self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
|
79
|
+
self._set_default_config()
|
80
|
+
except yaml.YAMLError as e:
|
81
|
+
self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
|
82
|
+
self._set_default_config()
|
83
|
+
|
84
|
+
# Initialize memory system following our pattern
|
85
|
+
self.memory = create_memory(memory_type)
|
86
|
+
self.logger.info(f"Shell executor memory system initialized: {type(self.memory).__name__}")
|
87
|
+
|
88
|
+
# Log configuration summary
|
89
|
+
shell_config = self.config.get('shell_executor', {})
|
90
|
+
self.logger.info(f"Shell executor initialized with allowed binaries: {shell_config.get('allowed_binaries', [])}")
|
91
|
+
self.logger.info(f"Workspace base: {shell_config.get('workspace_base', '/workspace')}")
|
92
|
+
self.logger.info(f"Default timeout: {shell_config.get('default_timeout', 30)}s")
|
93
|
+
|
94
|
+
def _set_default_config(self):
|
95
|
+
"""Set default configuration following our established pattern."""
|
96
|
+
self.logger.info("Setting default configuration for ShellExecutor.")
|
97
|
+
self.config = {
|
98
|
+
'shell_executor': {
|
99
|
+
'allowed_binaries': ['git', 'bash', 'sh', 'gh', 'ls'],
|
100
|
+
'default_timeout': 30,
|
101
|
+
'max_output_size': 8192,
|
102
|
+
'workspace_base': '/workspace',
|
103
|
+
'allow_relative_paths': True,
|
104
|
+
'restrict_to_workspace': True,
|
105
|
+
'enable_detailed_logging': True,
|
106
|
+
'log_command_execution': True,
|
107
|
+
'log_output_truncation': True
|
108
|
+
},
|
109
|
+
'error_messages': {
|
110
|
+
'binary_not_allowed': "Shell executor: Binary '{binary}' is not allowed.",
|
111
|
+
'invalid_workspace_path': "Shell executor: Path '{path}' is outside the allowed workspace.",
|
112
|
+
'command_timeout': "Shell executor: Command timed out after {timeout} seconds.",
|
113
|
+
'execution_failed': "Shell executor: Command failed with exit code {exit_code}.",
|
114
|
+
'output_truncated': "Shell executor: Output truncated to {size} bytes."
|
115
|
+
},
|
116
|
+
'success_messages': {
|
117
|
+
'command_executed': "Shell executor: Command completed successfully in {duration:.2f}s.",
|
118
|
+
'output_captured': "Shell executor: Captured {size} bytes of output."
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
def _validate_binary(self, command: str) -> None:
|
123
|
+
"""Validate that the command uses an allowed binary."""
|
124
|
+
try:
|
125
|
+
# Parse command to get the binary
|
126
|
+
cmd_parts = shlex.split(command)
|
127
|
+
if not cmd_parts:
|
128
|
+
raise ValueError("Empty command provided")
|
129
|
+
|
130
|
+
binary = cmd_parts[0]
|
131
|
+
allowed_binaries = self.config.get('shell_executor', {}).get('allowed_binaries', [])
|
132
|
+
|
133
|
+
if binary not in allowed_binaries:
|
134
|
+
error_msg = self.config.get('error_messages', {}).get(
|
135
|
+
'binary_not_allowed',
|
136
|
+
"Shell executor: Binary '{binary}' is not in the allowed list."
|
137
|
+
).format(binary=binary)
|
138
|
+
self.logger.error(f"Binary validation failed: {error_msg}")
|
139
|
+
raise ValueError(error_msg)
|
140
|
+
|
141
|
+
self.logger.debug(f"Binary validation passed: '{binary}' is allowed")
|
142
|
+
|
143
|
+
except Exception as e:
|
144
|
+
self.logger.error(f"Binary validation error: {e}")
|
145
|
+
raise
|
146
|
+
|
147
|
+
def _validate_workspace_path(self, cwd: Optional[str]) -> str:
|
148
|
+
"""Validate and resolve the working directory path."""
|
149
|
+
shell_config = self.config.get('shell_executor', {})
|
150
|
+
workspace_base = shell_config.get('workspace_base', '/workspace')
|
151
|
+
|
152
|
+
if cwd is None:
|
153
|
+
resolved_cwd = os.getcwd()
|
154
|
+
self.logger.debug(f"Using current working directory: {resolved_cwd}")
|
155
|
+
else:
|
156
|
+
# Resolve absolute path
|
157
|
+
resolved_cwd = os.path.abspath(cwd)
|
158
|
+
|
159
|
+
# Check if path is within workspace if restriction is enabled
|
160
|
+
if shell_config.get('restrict_to_workspace', True):
|
161
|
+
workspace_path = os.path.abspath(workspace_base)
|
162
|
+
try:
|
163
|
+
# Check if resolved_cwd is within workspace_path
|
164
|
+
os.path.relpath(resolved_cwd, workspace_path)
|
165
|
+
if not resolved_cwd.startswith(workspace_path):
|
166
|
+
raise ValueError()
|
167
|
+
except ValueError:
|
168
|
+
error_msg = self.config.get('error_messages', {}).get(
|
169
|
+
'invalid_workspace_path',
|
170
|
+
"Shell executor: Path '{path}' is outside the allowed workspace."
|
171
|
+
).format(path=resolved_cwd)
|
172
|
+
self.logger.error(f"Workspace validation failed: {error_msg}")
|
173
|
+
raise ValueError(error_msg)
|
174
|
+
|
175
|
+
self.logger.debug(f"Workspace validation passed: {resolved_cwd}")
|
176
|
+
|
177
|
+
return resolved_cwd
|
178
|
+
|
179
|
+
def shell_exec(self, shell_input: ShellExecInput) -> ShellExecOutput:
|
180
|
+
"""
|
181
|
+
Execute shell command with comprehensive safety checks and logging.
|
182
|
+
|
183
|
+
Args:
|
184
|
+
shell_input: Validated input containing command and execution parameters
|
185
|
+
|
186
|
+
Returns:
|
187
|
+
ShellExecOutput: Structured output with execution results
|
188
|
+
|
189
|
+
Raises:
|
190
|
+
ValueError: For validation errors (binary not allowed, invalid path)
|
191
|
+
RuntimeError: For execution failures
|
192
|
+
"""
|
193
|
+
start_time = time.time()
|
194
|
+
command = shell_input.command
|
195
|
+
|
196
|
+
self.logger.info(f"Shell executor invoked with command: '{command}'")
|
197
|
+
|
198
|
+
# Store command invocation in memory
|
199
|
+
self.memory.add_to_conversation(
|
200
|
+
"system",
|
201
|
+
f"Shell command invoked: {command}",
|
202
|
+
{
|
203
|
+
"tool": "shell_executor",
|
204
|
+
"command": command,
|
205
|
+
"cwd": shell_input.cwd,
|
206
|
+
"timeout": shell_input.timeout
|
207
|
+
}
|
208
|
+
)
|
209
|
+
|
210
|
+
try:
|
211
|
+
# Validation following our pattern
|
212
|
+
self._validate_binary(command)
|
213
|
+
resolved_cwd = self._validate_workspace_path(shell_input.cwd)
|
214
|
+
|
215
|
+
# Get timeout from input or config
|
216
|
+
shell_config = self.config.get('shell_executor', {})
|
217
|
+
timeout = shell_input.timeout or shell_config.get('default_timeout', 30)
|
218
|
+
max_output_size = shell_config.get('max_output_size', 8192)
|
219
|
+
|
220
|
+
self.logger.debug(f"Executing command in '{resolved_cwd}' with timeout {timeout}s")
|
221
|
+
|
222
|
+
# Execute command with timeout protection
|
223
|
+
try:
|
224
|
+
cmd_list = shlex.split(command)
|
225
|
+
env = os.environ.copy()
|
226
|
+
env.update({"GIT_TERMINAL_PROMPT": "0"})
|
227
|
+
|
228
|
+
# Add any additional environment variables
|
229
|
+
if shell_input.env_vars:
|
230
|
+
env.update(shell_input.env_vars)
|
231
|
+
self.logger.debug(f"Added {len(shell_input.env_vars)} additional environment variables")
|
232
|
+
|
233
|
+
result = subprocess.run(
|
234
|
+
cmd_list,
|
235
|
+
cwd=resolved_cwd,
|
236
|
+
shell=False,
|
237
|
+
capture_output=True,
|
238
|
+
text=True,
|
239
|
+
timeout=timeout,
|
240
|
+
env=env
|
241
|
+
)
|
242
|
+
|
243
|
+
# Combine stdout and stderr
|
244
|
+
combined_output = result.stdout + result.stderr
|
245
|
+
|
246
|
+
# Truncate output if too large
|
247
|
+
truncated = False
|
248
|
+
if len(combined_output) > max_output_size:
|
249
|
+
combined_output = combined_output[-max_output_size:]
|
250
|
+
truncated = True
|
251
|
+
truncation_msg = self.config.get('error_messages', {}).get(
|
252
|
+
'output_truncated',
|
253
|
+
"Shell executor: Output truncated to {size} bytes."
|
254
|
+
).format(size=max_output_size)
|
255
|
+
self.logger.warning(truncation_msg)
|
256
|
+
|
257
|
+
duration = time.time() - start_time
|
258
|
+
|
259
|
+
# Create output object
|
260
|
+
output = ShellExecOutput(
|
261
|
+
output=combined_output,
|
262
|
+
exit_code=result.returncode,
|
263
|
+
command=command,
|
264
|
+
duration=duration,
|
265
|
+
cwd=resolved_cwd,
|
266
|
+
truncated=truncated
|
267
|
+
)
|
268
|
+
|
269
|
+
# Handle non-zero exit codes
|
270
|
+
if result.returncode != 0:
|
271
|
+
error_msg = self.config.get('error_messages', {}).get(
|
272
|
+
'execution_failed',
|
273
|
+
"Shell executor: Command failed with exit code {exit_code}."
|
274
|
+
).format(exit_code=result.returncode)
|
275
|
+
|
276
|
+
self.logger.error(f"{error_msg} Output: {combined_output}")
|
277
|
+
|
278
|
+
# Store error in memory
|
279
|
+
self.memory.add_to_conversation(
|
280
|
+
"system",
|
281
|
+
f"Shell command failed: {error_msg}",
|
282
|
+
{
|
283
|
+
"tool": "shell_executor",
|
284
|
+
"command": command,
|
285
|
+
"exit_code": result.returncode,
|
286
|
+
"error": True,
|
287
|
+
"output": combined_output
|
288
|
+
}
|
289
|
+
)
|
290
|
+
|
291
|
+
raise RuntimeError(f"{error_msg}\nOutput: {combined_output}")
|
292
|
+
|
293
|
+
# Log successful execution
|
294
|
+
success_msg = self.config.get('success_messages', {}).get(
|
295
|
+
'command_executed',
|
296
|
+
"Shell executor: Command completed successfully in {duration:.2f}s."
|
297
|
+
).format(duration=duration)
|
298
|
+
|
299
|
+
self.logger.info(success_msg)
|
300
|
+
self.logger.debug(f"Command output ({len(combined_output)} bytes): {combined_output}")
|
301
|
+
|
302
|
+
# Store successful result in memory
|
303
|
+
self.memory.add_to_conversation(
|
304
|
+
"system",
|
305
|
+
f"Shell command succeeded: {success_msg}",
|
306
|
+
{
|
307
|
+
"tool": "shell_executor",
|
308
|
+
"command": command,
|
309
|
+
"exit_code": result.returncode,
|
310
|
+
"duration": duration,
|
311
|
+
"output_size": len(combined_output),
|
312
|
+
"truncated": truncated
|
313
|
+
}
|
314
|
+
)
|
315
|
+
|
316
|
+
return output
|
317
|
+
|
318
|
+
except subprocess.TimeoutExpired:
|
319
|
+
duration = time.time() - start_time
|
320
|
+
timeout_msg = self.config.get('error_messages', {}).get(
|
321
|
+
'command_timeout',
|
322
|
+
"Shell executor: Command timed out after {timeout} seconds."
|
323
|
+
).format(timeout=timeout)
|
324
|
+
|
325
|
+
self.logger.error(f"{timeout_msg} Command: {command}")
|
326
|
+
|
327
|
+
# Store timeout error in memory
|
328
|
+
self.memory.add_to_conversation(
|
329
|
+
"system",
|
330
|
+
f"Shell command timeout: {timeout_msg}",
|
331
|
+
{
|
332
|
+
"tool": "shell_executor",
|
333
|
+
"command": command,
|
334
|
+
"timeout": timeout,
|
335
|
+
"duration": duration,
|
336
|
+
"error": True
|
337
|
+
}
|
338
|
+
)
|
339
|
+
|
340
|
+
raise RuntimeError(timeout_msg)
|
341
|
+
|
342
|
+
except Exception as e:
|
343
|
+
duration = time.time() - start_time
|
344
|
+
self.logger.error(f"Shell executor error after {duration:.2f}s: {e}", exc_info=True)
|
345
|
+
|
346
|
+
# Store error in memory
|
347
|
+
self.memory.add_to_conversation(
|
348
|
+
"system",
|
349
|
+
f"Shell executor error: {str(e)}",
|
350
|
+
{
|
351
|
+
"tool": "shell_executor",
|
352
|
+
"command": command,
|
353
|
+
"error": True,
|
354
|
+
"duration": duration
|
355
|
+
}
|
356
|
+
)
|
357
|
+
|
358
|
+
raise
|
359
|
+
|
360
|
+
|
361
|
+
# Global executor instance following our pattern
|
362
|
+
_shell_executor = None
|
363
|
+
|
364
|
+
def get_shell_executor(config_path: str = None, memory_type: str = "persistent") -> ShellExecutor:
|
365
|
+
"""Get or create the global shell executor instance."""
|
366
|
+
global _shell_executor
|
367
|
+
if _shell_executor is None:
|
368
|
+
_shell_executor = ShellExecutor(config_path=config_path, memory_type=memory_type)
|
369
|
+
return _shell_executor
|
370
|
+
|
371
|
+
|
372
|
+
# --- LangChain Tool Integration ---
|
373
|
+
@tool(args_schema=ShellExecInput)
|
374
|
+
def shell_exec(command: str, cwd: str = None, timeout: int = None) -> str:
|
375
|
+
"""
|
376
|
+
Execute shell commands safely with comprehensive logging and monitoring.
|
377
|
+
|
378
|
+
This tool follows our established pattern with:
|
379
|
+
- Configuration-driven security (allowed binaries, workspace restrictions)
|
380
|
+
- Comprehensive logging and error handling
|
381
|
+
- Memory integration for operation tracking
|
382
|
+
- Timeout protection and output size limits
|
383
|
+
|
384
|
+
Args:
|
385
|
+
command: Shell command to execute
|
386
|
+
cwd: Working directory (optional, defaults to current directory)
|
387
|
+
timeout: Timeout in seconds (optional, uses config default)
|
388
|
+
|
389
|
+
Returns:
|
390
|
+
str: Combined stdout and stderr output
|
391
|
+
|
392
|
+
Raises:
|
393
|
+
ValueError: For validation errors (binary not allowed, invalid path)
|
394
|
+
RuntimeError: For execution failures or timeouts
|
395
|
+
"""
|
396
|
+
executor = get_shell_executor()
|
397
|
+
|
398
|
+
shell_input = ShellExecInput(
|
399
|
+
command=command,
|
400
|
+
cwd=cwd,
|
401
|
+
timeout=timeout
|
402
|
+
)
|
403
|
+
|
404
|
+
result = executor.shell_exec(shell_input)
|
405
|
+
return result.output
|
406
|
+
|
407
|
+
|
408
|
+
# Convenience function following our pattern
|
409
|
+
def shell_exec_simple(cmd: str, cwd: str = None, timeout: int = 30) -> str:
|
410
|
+
"""
|
411
|
+
Simple shell execution function matching the original interface.
|
412
|
+
|
413
|
+
This maintains backward compatibility while using our comprehensive framework.
|
414
|
+
"""
|
415
|
+
return shell_exec(command=cmd, cwd=cwd, timeout=timeout)
|
@@ -0,0 +1,277 @@
|
|
1
|
+
"""
|
2
|
+
Text utilities for cleaning and enhancing error messages and issue titles.
|
3
|
+
|
4
|
+
This module provides utilities for:
|
5
|
+
- Cleaning ANSI color codes from terminal output
|
6
|
+
- Generating organic, context-aware issue titles
|
7
|
+
- Text processing for better user experience
|
8
|
+
"""
|
9
|
+
|
10
|
+
import re
|
11
|
+
from typing import Dict, List, Optional
|
12
|
+
from datetime import datetime
|
13
|
+
|
14
|
+
|
15
|
+
def clean_ansi_codes(text: str) -> str:
|
16
|
+
"""
|
17
|
+
Remove ANSI color codes and escape sequences from text.
|
18
|
+
|
19
|
+
Args:
|
20
|
+
text: Text that may contain ANSI escape sequences
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
Clean text without ANSI codes
|
24
|
+
"""
|
25
|
+
if not text or not isinstance(text, str):
|
26
|
+
return text
|
27
|
+
|
28
|
+
# Pattern to match ANSI escape sequences
|
29
|
+
ansi_pattern = re.compile(r'\x1b\[[0-9;]*[mK]')
|
30
|
+
|
31
|
+
# Also clean up the escaped unicode sequences that appear in logs
|
32
|
+
unicode_pattern = re.compile(r'�\[\d+m')
|
33
|
+
|
34
|
+
# Remove ANSI codes
|
35
|
+
clean_text = ansi_pattern.sub('', text)
|
36
|
+
|
37
|
+
# Remove escaped unicode sequences
|
38
|
+
clean_text = unicode_pattern.sub('', clean_text)
|
39
|
+
|
40
|
+
return clean_text
|
41
|
+
|
42
|
+
|
43
|
+
def generate_organic_issue_title(error_context: Dict[str, any]) -> str:
|
44
|
+
"""
|
45
|
+
Generate an organic, context-aware issue title based on error details.
|
46
|
+
|
47
|
+
This function analyzes the error context and creates a meaningful title
|
48
|
+
that feels like it was written by a thoughtful developer who understands
|
49
|
+
the problem.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
error_context: Dictionary containing error details with keys:
|
53
|
+
- error_type: Type of error (terraform_init, auth_failed, etc.)
|
54
|
+
- stack_detected: Dict of detected infrastructure files
|
55
|
+
- error_message: The actual error message
|
56
|
+
- repo_url: Repository URL
|
57
|
+
- branch_name: Branch name (optional)
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
Human-friendly, organic issue title
|
61
|
+
"""
|
62
|
+
error_type = error_context.get('error_type', 'unknown')
|
63
|
+
stack_detected = error_context.get('stack_detected', {})
|
64
|
+
error_message = error_context.get('error_message', '')
|
65
|
+
repo_url = error_context.get('repo_url', '')
|
66
|
+
branch_name = error_context.get('branch_name', '')
|
67
|
+
|
68
|
+
# Clean the error message first
|
69
|
+
clean_error = clean_ansi_codes(error_message)
|
70
|
+
|
71
|
+
# Extract repository name for context
|
72
|
+
repo_name = "repository"
|
73
|
+
if repo_url:
|
74
|
+
repo_match = re.search(r'/([^/]+)\.git$', repo_url)
|
75
|
+
if repo_match:
|
76
|
+
repo_name = repo_match.group(1)
|
77
|
+
|
78
|
+
# Analyze stack for context
|
79
|
+
stack_context = _analyze_stack_context(stack_detected)
|
80
|
+
|
81
|
+
# Generate title based on error type and context
|
82
|
+
if 'terraform init' in clean_error.lower() or error_type == 'terraform_init':
|
83
|
+
if 'token' in clean_error.lower() or 'login' in clean_error.lower():
|
84
|
+
return f"Terraform Cloud authentication required for {repo_name} deployment"
|
85
|
+
elif 'backend' in clean_error.lower():
|
86
|
+
return f"Terraform backend configuration issue in {repo_name}"
|
87
|
+
else:
|
88
|
+
return f"Terraform initialization failed in {repo_name} {stack_context}"
|
89
|
+
|
90
|
+
elif 'terraform plan' in clean_error.lower() or error_type == 'terraform_plan':
|
91
|
+
return f"Terraform plan validation errors in {repo_name} {stack_context}"
|
92
|
+
|
93
|
+
elif 'terraform apply' in clean_error.lower() or error_type == 'terraform_apply':
|
94
|
+
return f"Terraform deployment failed for {repo_name} {stack_context}"
|
95
|
+
|
96
|
+
elif 'auth' in clean_error.lower() or error_type == 'auth_failed':
|
97
|
+
if 'github' in clean_error.lower():
|
98
|
+
return f"GitHub access permissions needed for {repo_name}"
|
99
|
+
else:
|
100
|
+
return f"Authentication issue preventing {repo_name} deployment"
|
101
|
+
|
102
|
+
elif 'api key' in clean_error.lower() or error_type == 'api_key_error':
|
103
|
+
if 'openai' in clean_error.lower():
|
104
|
+
return f"OpenAI API configuration required for {repo_name} automation"
|
105
|
+
elif 'github' in clean_error.lower():
|
106
|
+
return f"GitHub API token configuration needed for {repo_name}"
|
107
|
+
else:
|
108
|
+
return f"API authentication configuration required for {repo_name}"
|
109
|
+
|
110
|
+
elif 'llm error' in clean_error.lower() or error_type == 'llm_error':
|
111
|
+
return f"AI service connectivity issue affecting {repo_name} automation"
|
112
|
+
|
113
|
+
elif 'network' in clean_error.lower() or 'timeout' in clean_error.lower() or error_type == 'network_error':
|
114
|
+
return f"Network connectivity issue during {repo_name} deployment"
|
115
|
+
|
116
|
+
elif 'timeout' in clean_error.lower() or error_type == 'timeout_error':
|
117
|
+
return f"Service timeout issue affecting {repo_name} automation"
|
118
|
+
|
119
|
+
elif 'permission' in clean_error.lower() or 'forbidden' in clean_error.lower() or error_type == 'permission_error':
|
120
|
+
return f"Access permissions needed for {repo_name} deployment"
|
121
|
+
|
122
|
+
elif 'planner error' in clean_error.lower() or error_type == 'planner_error':
|
123
|
+
return f"Workflow planning issue in {repo_name} automation"
|
124
|
+
|
125
|
+
elif 'workflow error' in clean_error.lower() or error_type == 'workflow_error':
|
126
|
+
return f"System workflow failure affecting {repo_name}"
|
127
|
+
|
128
|
+
elif stack_context and any(tf_count > 0 for tf_count in stack_detected.values() if isinstance(tf_count, int)):
|
129
|
+
return f"Infrastructure deployment issue in {repo_name} {stack_context}"
|
130
|
+
|
131
|
+
else:
|
132
|
+
# Generic but still organic fallback
|
133
|
+
action = "deployment" if stack_detected else "workflow"
|
134
|
+
return f"Automated {action} issue detected in {repo_name}"
|
135
|
+
|
136
|
+
|
137
|
+
def _analyze_stack_context(stack_detected: Dict[str, any]) -> str:
|
138
|
+
"""
|
139
|
+
Analyze detected stack files to provide meaningful context.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
stack_detected: Dictionary of detected file types and counts
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
Human-friendly description of the stack
|
146
|
+
"""
|
147
|
+
if not stack_detected:
|
148
|
+
return ""
|
149
|
+
|
150
|
+
contexts = []
|
151
|
+
|
152
|
+
# Check for Terraform
|
153
|
+
tf_count = stack_detected.get('*.tf', 0)
|
154
|
+
if tf_count > 0:
|
155
|
+
if tf_count == 1:
|
156
|
+
contexts.append("(single Terraform configuration)")
|
157
|
+
elif tf_count <= 5:
|
158
|
+
contexts.append(f"({tf_count} Terraform files)")
|
159
|
+
else:
|
160
|
+
contexts.append(f"(complex Terraform setup with {tf_count} files)")
|
161
|
+
|
162
|
+
# Check for other infrastructure files
|
163
|
+
yml_count = stack_detected.get('*.yml', 0) + stack_detected.get('*.yaml', 0)
|
164
|
+
if yml_count > 0:
|
165
|
+
contexts.append(f"with {yml_count} YAML configs")
|
166
|
+
|
167
|
+
ps1_count = stack_detected.get('*.ps1', 0)
|
168
|
+
if ps1_count > 0:
|
169
|
+
contexts.append(f"and {ps1_count} PowerShell scripts")
|
170
|
+
|
171
|
+
sh_count = stack_detected.get('*.sh', 0)
|
172
|
+
if sh_count > 0:
|
173
|
+
contexts.append(f"and {sh_count} shell scripts")
|
174
|
+
|
175
|
+
if contexts:
|
176
|
+
return " " + " ".join(contexts)
|
177
|
+
|
178
|
+
return ""
|
179
|
+
|
180
|
+
|
181
|
+
def enhance_error_message_for_issue(error_message: str, context: Optional[Dict[str, any]] = None) -> str:
|
182
|
+
"""
|
183
|
+
Clean and enhance error message for GitHub issue body.
|
184
|
+
|
185
|
+
Args:
|
186
|
+
error_message: Raw error message that may contain ANSI codes
|
187
|
+
context: Optional context for better error formatting
|
188
|
+
|
189
|
+
Returns:
|
190
|
+
Clean, well-formatted error message suitable for GitHub issues
|
191
|
+
"""
|
192
|
+
# Clean ANSI codes first
|
193
|
+
clean_msg = clean_ansi_codes(error_message)
|
194
|
+
|
195
|
+
# Split into lines and clean up
|
196
|
+
lines = clean_msg.split('\n')
|
197
|
+
cleaned_lines = []
|
198
|
+
|
199
|
+
for line in lines:
|
200
|
+
# Remove empty lines and excessive whitespace
|
201
|
+
line = line.strip()
|
202
|
+
if line:
|
203
|
+
# Format error blocks with proper markdown
|
204
|
+
if line.startswith('Error:') or line.startswith('╷') or line.startswith('│'):
|
205
|
+
cleaned_lines.append(line)
|
206
|
+
elif line.startswith('╵'):
|
207
|
+
cleaned_lines.append(line)
|
208
|
+
cleaned_lines.append('') # Add space after error blocks
|
209
|
+
else:
|
210
|
+
cleaned_lines.append(line)
|
211
|
+
|
212
|
+
# Join back and format as code block for better readability
|
213
|
+
enhanced_msg = '\n'.join(cleaned_lines)
|
214
|
+
|
215
|
+
# Wrap in code block if it looks like terminal output
|
216
|
+
if any(indicator in enhanced_msg for indicator in ['$', '╷', '│', 'Error:', 'Initializing']):
|
217
|
+
enhanced_msg = f"```\n{enhanced_msg}\n```"
|
218
|
+
|
219
|
+
return enhanced_msg
|
220
|
+
|
221
|
+
|
222
|
+
def create_issue_metadata_section(context: Dict[str, any]) -> str:
|
223
|
+
"""
|
224
|
+
Create a metadata section for GitHub issues with deployment context.
|
225
|
+
|
226
|
+
Args:
|
227
|
+
context: Issue context including repo, branch, stack info
|
228
|
+
|
229
|
+
Returns:
|
230
|
+
Formatted metadata section for issue body
|
231
|
+
"""
|
232
|
+
repo_url = context.get('repo_url', 'Unknown')
|
233
|
+
branch_name = context.get('branch_name', 'Unknown')
|
234
|
+
stack_detected = context.get('stack_detected', {})
|
235
|
+
|
236
|
+
metadata_lines = [
|
237
|
+
"## Deployment Context",
|
238
|
+
"",
|
239
|
+
f"**Repository:** {repo_url}",
|
240
|
+
f"**Branch:** {branch_name}",
|
241
|
+
f"**Detected Stack:** {_format_stack_detection(stack_detected)}",
|
242
|
+
f"**Timestamp:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}",
|
243
|
+
"",
|
244
|
+
"---",
|
245
|
+
"",
|
246
|
+
"## Error Details",
|
247
|
+
""
|
248
|
+
]
|
249
|
+
|
250
|
+
return '\n'.join(metadata_lines)
|
251
|
+
|
252
|
+
|
253
|
+
def _format_stack_detection(stack_detected: Dict[str, any]) -> str:
|
254
|
+
"""
|
255
|
+
Format stack detection results for display.
|
256
|
+
|
257
|
+
Args:
|
258
|
+
stack_detected: Dictionary of detected file types and counts
|
259
|
+
|
260
|
+
Returns:
|
261
|
+
Human-friendly stack description
|
262
|
+
"""
|
263
|
+
if not stack_detected:
|
264
|
+
return "No infrastructure files detected"
|
265
|
+
|
266
|
+
items = []
|
267
|
+
for file_type, count in stack_detected.items():
|
268
|
+
if isinstance(count, int) and count > 0:
|
269
|
+
if count == 1:
|
270
|
+
items.append(f"1 {file_type} file")
|
271
|
+
else:
|
272
|
+
items.append(f"{count} {file_type} files")
|
273
|
+
|
274
|
+
if not items:
|
275
|
+
return "No infrastructure files detected"
|
276
|
+
|
277
|
+
return ", ".join(items)
|