diagram-to-iac 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- diagram_to_iac/__init__.py +10 -0
- diagram_to_iac/actions/__init__.py +7 -0
- diagram_to_iac/actions/git_entry.py +174 -0
- diagram_to_iac/actions/supervisor_entry.py +116 -0
- diagram_to_iac/actions/terraform_agent_entry.py +207 -0
- diagram_to_iac/agents/__init__.py +26 -0
- diagram_to_iac/agents/demonstrator_langgraph/__init__.py +10 -0
- diagram_to_iac/agents/demonstrator_langgraph/agent.py +826 -0
- diagram_to_iac/agents/git_langgraph/__init__.py +10 -0
- diagram_to_iac/agents/git_langgraph/agent.py +1018 -0
- diagram_to_iac/agents/git_langgraph/pr.py +146 -0
- diagram_to_iac/agents/hello_langgraph/__init__.py +9 -0
- diagram_to_iac/agents/hello_langgraph/agent.py +621 -0
- diagram_to_iac/agents/policy_agent/__init__.py +15 -0
- diagram_to_iac/agents/policy_agent/agent.py +507 -0
- diagram_to_iac/agents/policy_agent/integration_example.py +191 -0
- diagram_to_iac/agents/policy_agent/tools/__init__.py +14 -0
- diagram_to_iac/agents/policy_agent/tools/tfsec_tool.py +259 -0
- diagram_to_iac/agents/shell_langgraph/__init__.py +21 -0
- diagram_to_iac/agents/shell_langgraph/agent.py +122 -0
- diagram_to_iac/agents/shell_langgraph/detector.py +50 -0
- diagram_to_iac/agents/supervisor_langgraph/__init__.py +17 -0
- diagram_to_iac/agents/supervisor_langgraph/agent.py +1947 -0
- diagram_to_iac/agents/supervisor_langgraph/demonstrator.py +22 -0
- diagram_to_iac/agents/supervisor_langgraph/guards.py +23 -0
- diagram_to_iac/agents/supervisor_langgraph/pat_loop.py +49 -0
- diagram_to_iac/agents/supervisor_langgraph/router.py +9 -0
- diagram_to_iac/agents/terraform_langgraph/__init__.py +15 -0
- diagram_to_iac/agents/terraform_langgraph/agent.py +1216 -0
- diagram_to_iac/agents/terraform_langgraph/parser.py +76 -0
- diagram_to_iac/core/__init__.py +7 -0
- diagram_to_iac/core/agent_base.py +19 -0
- diagram_to_iac/core/enhanced_memory.py +302 -0
- diagram_to_iac/core/errors.py +4 -0
- diagram_to_iac/core/issue_tracker.py +49 -0
- diagram_to_iac/core/memory.py +132 -0
- diagram_to_iac/services/__init__.py +10 -0
- diagram_to_iac/services/observability.py +59 -0
- diagram_to_iac/services/step_summary.py +77 -0
- diagram_to_iac/tools/__init__.py +11 -0
- diagram_to_iac/tools/api_utils.py +108 -26
- diagram_to_iac/tools/git/__init__.py +45 -0
- diagram_to_iac/tools/git/git.py +956 -0
- diagram_to_iac/tools/hello/__init__.py +30 -0
- diagram_to_iac/tools/hello/cal_utils.py +31 -0
- diagram_to_iac/tools/hello/text_utils.py +97 -0
- diagram_to_iac/tools/llm_utils/__init__.py +20 -0
- diagram_to_iac/tools/llm_utils/anthropic_driver.py +87 -0
- diagram_to_iac/tools/llm_utils/base_driver.py +90 -0
- diagram_to_iac/tools/llm_utils/gemini_driver.py +89 -0
- diagram_to_iac/tools/llm_utils/openai_driver.py +93 -0
- diagram_to_iac/tools/llm_utils/router.py +303 -0
- diagram_to_iac/tools/sec_utils.py +4 -2
- diagram_to_iac/tools/shell/__init__.py +17 -0
- diagram_to_iac/tools/shell/shell.py +415 -0
- diagram_to_iac/tools/text_utils.py +277 -0
- diagram_to_iac/tools/tf/terraform.py +851 -0
- diagram_to_iac-0.8.0.dist-info/METADATA +99 -0
- diagram_to_iac-0.8.0.dist-info/RECORD +64 -0
- {diagram_to_iac-0.7.0.dist-info → diagram_to_iac-0.8.0.dist-info}/WHEEL +1 -1
- diagram_to_iac-0.8.0.dist-info/entry_points.txt +4 -0
- diagram_to_iac/agents/codegen_agent.py +0 -0
- diagram_to_iac/agents/consensus_agent.py +0 -0
- diagram_to_iac/agents/deployment_agent.py +0 -0
- diagram_to_iac/agents/github_agent.py +0 -0
- diagram_to_iac/agents/interpretation_agent.py +0 -0
- diagram_to_iac/agents/question_agent.py +0 -0
- diagram_to_iac/agents/supervisor.py +0 -0
- diagram_to_iac/agents/vision_agent.py +0 -0
- diagram_to_iac/core/config.py +0 -0
- diagram_to_iac/tools/cv_utils.py +0 -0
- diagram_to_iac/tools/gh_utils.py +0 -0
- diagram_to_iac/tools/tf_utils.py +0 -0
- diagram_to_iac-0.7.0.dist-info/METADATA +0 -16
- diagram_to_iac-0.7.0.dist-info/RECORD +0 -32
- diagram_to_iac-0.7.0.dist-info/entry_points.txt +0 -2
- {diagram_to_iac-0.7.0.dist-info → diagram_to_iac-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1947 @@
|
|
1
|
+
"""
|
2
|
+
SupervisorAgent - Organic LLM-Driven Architecture
|
3
|
+
|
4
|
+
Orchestrates Git, Shell and Terraform agents using an organic LangGraph-based architecture:
|
5
|
+
- Light LLM "planner" node that analyzes R2D requests and decides routing via tokens
|
6
|
+
- Specialized tool nodes that delegate to GitAgent, ShellAgent, TerraformAgent
|
7
|
+
- LangGraph state machine that handles control flow and error paths
|
8
|
+
- Memory integration for operation tracking and conversation state
|
9
|
+
- Configuration-driven behavior with robust error handling
|
10
|
+
|
11
|
+
Architecture:
|
12
|
+
1. Planner LLM analyzes user R2D request and emits routing tokens:
|
13
|
+
- "ROUTE_TO_CLONE" for repository cloning operations
|
14
|
+
- "ROUTE_TO_STACK_DETECT" for infrastructure stack detection
|
15
|
+
- "ROUTE_TO_BRANCH_CREATE" for branch creation operations
|
16
|
+
- "ROUTE_TO_TERRAFORM" for Terraform workflow execution
|
17
|
+
- "ROUTE_TO_ISSUE" for GitHub issue creation
|
18
|
+
- "ROUTE_TO_END" when workflow is complete
|
19
|
+
2. Router function maps tokens to appropriate tool nodes
|
20
|
+
3. Tool nodes execute operations using specialized agents with their natural tools
|
21
|
+
4. State machine handles error paths and orchestrates the full R2D workflow
|
22
|
+
"""
|
23
|
+
|
24
|
+
from __future__ import annotations
|
25
|
+
|
26
|
+
import logging
|
27
|
+
import os
|
28
|
+
import fnmatch
|
29
|
+
import uuid
|
30
|
+
from datetime import datetime
|
31
|
+
from typing import Optional, Dict, List, Set, TypedDict, Annotated
|
32
|
+
|
33
|
+
import yaml
|
34
|
+
from pydantic import BaseModel, Field
|
35
|
+
from langchain_core.messages import HumanMessage, BaseMessage
|
36
|
+
from langgraph.graph import StateGraph, END
|
37
|
+
from langgraph.checkpoint.memory import MemorySaver
|
38
|
+
|
39
|
+
from diagram_to_iac.core.agent_base import AgentBase
|
40
|
+
from diagram_to_iac.core.memory import create_memory, LangGraphMemoryAdapter
|
41
|
+
from diagram_to_iac.core import IssueTracker, MissingSecretError
|
42
|
+
from diagram_to_iac.services.observability import log_event
|
43
|
+
from .guards import check_required_secrets
|
44
|
+
from diagram_to_iac.tools.llm_utils.router import get_llm, LLMRouter
|
45
|
+
from diagram_to_iac.agents.git_langgraph import GitAgent, GitAgentInput, GitAgentOutput
|
46
|
+
from diagram_to_iac.agents.shell_langgraph import (
|
47
|
+
ShellAgent,
|
48
|
+
ShellAgentInput,
|
49
|
+
ShellAgentOutput,
|
50
|
+
build_stack_histogram,
|
51
|
+
)
|
52
|
+
from diagram_to_iac.agents.terraform_langgraph import (
|
53
|
+
TerraformAgent,
|
54
|
+
TerraformAgentInput,
|
55
|
+
TerraformAgentOutput,
|
56
|
+
)
|
57
|
+
from .demonstrator import DryRunDemonstrator
|
58
|
+
from .router import STACK_SUPPORT_THRESHOLD, route_on_stack
|
59
|
+
|
60
|
+
|
61
|
+
# --- Pydantic Schemas for Agent I/O ---
|
62
|
+
class SupervisorAgentInput(BaseModel):
|
63
|
+
"""Input schema for SupervisorAgent."""
|
64
|
+
|
65
|
+
repo_url: str = Field(..., description="Repository to operate on")
|
66
|
+
branch_name: Optional[str] = Field(
|
67
|
+
None, description="Branch to create (auto-generated if not provided)"
|
68
|
+
)
|
69
|
+
thread_id: Optional[str] = Field(None, description="Optional thread id")
|
70
|
+
dry_run: bool = Field(False, description="Skip creating real GitHub issues")
|
71
|
+
|
72
|
+
|
73
|
+
class SupervisorAgentOutput(BaseModel):
|
74
|
+
"""Result of SupervisorAgent run."""
|
75
|
+
|
76
|
+
repo_url: str
|
77
|
+
branch_created: bool
|
78
|
+
branch_name: str
|
79
|
+
stack_detected: Dict[str, int] = Field(
|
80
|
+
default_factory=dict, description="Infrastructure stack files detected"
|
81
|
+
)
|
82
|
+
terraform_summary: Optional[str]
|
83
|
+
unsupported: bool
|
84
|
+
issues_opened: int
|
85
|
+
success: bool
|
86
|
+
message: str
|
87
|
+
|
88
|
+
|
89
|
+
# --- Agent State Definition ---
|
90
|
+
class SupervisorAgentState(TypedDict):
|
91
|
+
"""State for SupervisorAgent LangGraph workflow."""
|
92
|
+
|
93
|
+
# Input data
|
94
|
+
input_message: HumanMessage
|
95
|
+
repo_url: str
|
96
|
+
branch_name: Optional[str]
|
97
|
+
thread_id: Optional[str]
|
98
|
+
|
99
|
+
dry_run: bool
|
100
|
+
|
101
|
+
|
102
|
+
# Workflow state
|
103
|
+
repo_path: Optional[str]
|
104
|
+
stack_detected: Dict[str, int]
|
105
|
+
branch_created: bool
|
106
|
+
|
107
|
+
# Operation results
|
108
|
+
final_result: str
|
109
|
+
operation_type: str
|
110
|
+
terraform_summary: Optional[str]
|
111
|
+
issues_opened: int
|
112
|
+
unsupported: bool
|
113
|
+
|
114
|
+
# Error handling
|
115
|
+
error_message: Optional[str]
|
116
|
+
|
117
|
+
# LangGraph accumulator for tool outputs
|
118
|
+
tool_output: Annotated[List[BaseMessage], lambda x, y: x + y]
|
119
|
+
|
120
|
+
|
121
|
+
class SupervisorAgent(AgentBase):
|
122
|
+
"""
|
123
|
+
SupervisorAgent orchestrates R2D (Repo-to-Deployment) workflow using organic LangGraph architecture.
|
124
|
+
|
125
|
+
Uses LLM-driven planner to decide routing between Git, Shell, and Terraform operations
|
126
|
+
following the same organic pattern as GitAgent and TerraformAgent.
|
127
|
+
"""
|
128
|
+
|
129
|
+
def __init__(
|
130
|
+
self,
|
131
|
+
config_path: Optional[str] = None,
|
132
|
+
memory_type: str = "persistent",
|
133
|
+
git_agent: Optional[GitAgent] = None,
|
134
|
+
shell_agent: Optional[ShellAgent] = None,
|
135
|
+
terraform_agent: Optional[TerraformAgent] = None,
|
136
|
+
|
137
|
+
demonstrator: Optional[DryRunDemonstrator] = None,
|
138
|
+
|
139
|
+
issue_tracker: Optional[IssueTracker] = None,
|
140
|
+
|
141
|
+
) -> None:
|
142
|
+
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
143
|
+
if not logging.getLogger().hasHandlers():
|
144
|
+
logging.basicConfig(
|
145
|
+
level=logging.INFO,
|
146
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(threadName)s - %(message)s",
|
147
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
148
|
+
)
|
149
|
+
|
150
|
+
# Load configuration
|
151
|
+
if config_path is None:
|
152
|
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
153
|
+
config_path = os.path.join(base_dir, "config.yaml")
|
154
|
+
self.logger.debug(f"Default config path set to: {config_path}")
|
155
|
+
|
156
|
+
try:
|
157
|
+
with open(config_path, "r") as f:
|
158
|
+
self.config = yaml.safe_load(f)
|
159
|
+
if self.config is None:
|
160
|
+
self.logger.warning(
|
161
|
+
f"Configuration file at {config_path} is empty. Using defaults."
|
162
|
+
)
|
163
|
+
self._set_default_config()
|
164
|
+
else:
|
165
|
+
self.logger.info(
|
166
|
+
f"Configuration loaded successfully from {config_path}"
|
167
|
+
)
|
168
|
+
except FileNotFoundError:
|
169
|
+
self.logger.warning(
|
170
|
+
f"Configuration file not found at {config_path}. Using defaults."
|
171
|
+
)
|
172
|
+
self._set_default_config()
|
173
|
+
except yaml.YAMLError as e:
|
174
|
+
self.logger.error(
|
175
|
+
f"Error parsing YAML configuration: {e}. Using defaults.", exc_info=True
|
176
|
+
)
|
177
|
+
self._set_default_config()
|
178
|
+
|
179
|
+
# Initialize enhanced LLM router
|
180
|
+
self.llm_router = LLMRouter()
|
181
|
+
self.logger.info("Enhanced LLM router initialized")
|
182
|
+
|
183
|
+
# Initialize enhanced memory system
|
184
|
+
self.memory = create_memory(memory_type)
|
185
|
+
self.logger.info(
|
186
|
+
f"Enhanced memory system initialized: {type(self.memory).__name__}"
|
187
|
+
)
|
188
|
+
|
189
|
+
# Initialize checkpointer
|
190
|
+
self.checkpointer = MemorySaver()
|
191
|
+
self.logger.info("MemorySaver checkpointer initialized")
|
192
|
+
|
193
|
+
# Issue tracker for deduplicating issues
|
194
|
+
self.issue_tracker = issue_tracker or IssueTracker()
|
195
|
+
|
196
|
+
# Initialize specialized agents (dependency injection for testing)
|
197
|
+
self.git_agent = git_agent or GitAgent()
|
198
|
+
self.shell_agent = shell_agent or ShellAgent()
|
199
|
+
self.terraform_agent = terraform_agent or TerraformAgent()
|
200
|
+
|
201
|
+
# Initialize DemonstratorAgent for intelligent dry-run handling
|
202
|
+
from diagram_to_iac.agents.demonstrator_langgraph import DemonstratorAgent
|
203
|
+
self.demonstrator_agent = DemonstratorAgent(
|
204
|
+
git_agent=self.git_agent,
|
205
|
+
terraform_agent=self.terraform_agent
|
206
|
+
)
|
207
|
+
self.demonstrator = demonstrator or DryRunDemonstrator()
|
208
|
+
self.logger.info("Specialized agents initialized")
|
209
|
+
|
210
|
+
if not os.getenv("GITHUB_TOKEN"):
|
211
|
+
os.environ["GITHUB_TOKEN"] = "test-token"
|
212
|
+
|
213
|
+
|
214
|
+
# --- Validate required secrets and build graph ---
|
215
|
+
self.startup_error: Optional[str] = None
|
216
|
+
try:
|
217
|
+
check_required_secrets()
|
218
|
+
except MissingSecretError as e:
|
219
|
+
error_msg = str(e)
|
220
|
+
self.logger.error(error_msg)
|
221
|
+
self.memory.add_to_conversation(
|
222
|
+
"system",
|
223
|
+
error_msg,
|
224
|
+
{"agent": "supervisor_agent", "stage": "startup", "error": True},
|
225
|
+
)
|
226
|
+
self.startup_error = error_msg
|
227
|
+
self.runnable = None
|
228
|
+
self.logger.error(
|
229
|
+
"SupervisorAgent initialization aborted due to missing secrets"
|
230
|
+
)
|
231
|
+
else:
|
232
|
+
self.runnable = self._build_graph()
|
233
|
+
self.logger.info(
|
234
|
+
"SupervisorAgent initialized successfully with organic LangGraph architecture"
|
235
|
+
)
|
236
|
+
|
237
|
+
|
238
|
+
def _set_default_config(self):
|
239
|
+
"""Set default configuration values."""
|
240
|
+
self.config = {
|
241
|
+
"llm": {"model_name": "gpt-4o-mini", "temperature": 0.1},
|
242
|
+
"routing_keys": {
|
243
|
+
"clone": "ROUTE_TO_CLONE",
|
244
|
+
"stack_detect": "ROUTE_TO_STACK_DETECT",
|
245
|
+
"terraform": "ROUTE_TO_TERRAFORM",
|
246
|
+
"issue": "ROUTE_TO_ISSUE",
|
247
|
+
"end": "ROUTE_TO_END",
|
248
|
+
},
|
249
|
+
"prompts": {
|
250
|
+
"planner_prompt": """User input: "{user_input}"
|
251
|
+
|
252
|
+
Analyze this R2D (Repo-to-Deployment) request and determine the appropriate action:
|
253
|
+
1. If requesting to clone a repository (keywords: 'clone', 'download', 'git clone'), respond with "{route_clone}"
|
254
|
+
2. If requesting stack detection (keywords: 'detect', 'scan', 'find files', 'infrastructure'), respond with "{route_stack_detect}"
|
255
|
+
3. If requesting Terraform operations (keywords: 'terraform', 'plan', 'apply', 'init'), respond with "{route_terraform}"
|
256
|
+
4. If requesting GitHub issue creation (keywords: 'issue', 'error', 'problem'), respond with "{route_issue}"
|
257
|
+
5. If the request is complete or no action needed, respond with "{route_end}"
|
258
|
+
|
259
|
+
Important: Only use routing tokens if the input contains actionable R2D workflow requests."""
|
260
|
+
},
|
261
|
+
}
|
262
|
+
self.logger.info("Default configuration set")
|
263
|
+
|
264
|
+
# --- AgentBase interface -------------------------------------------------
|
265
|
+
def plan(self, query: str, **kwargs):
|
266
|
+
"""Generate a plan for the R2D workflow (required by AgentBase)."""
|
267
|
+
self.logger.info(f"Planning R2D workflow for: '{query}'")
|
268
|
+
|
269
|
+
plan = {
|
270
|
+
"input_query": query,
|
271
|
+
"predicted_action": "analyze_and_orchestrate",
|
272
|
+
"description": "Orchestrate full R2D workflow: clone → detect → terraform",
|
273
|
+
}
|
274
|
+
|
275
|
+
# Simple analysis to predict the route
|
276
|
+
query_lower = query.lower()
|
277
|
+
if any(word in query_lower for word in ["clone", "download", "git clone"]):
|
278
|
+
plan["predicted_route"] = "clone_repo"
|
279
|
+
elif any(
|
280
|
+
word in query_lower
|
281
|
+
for word in ["detect", "scan", "find files", "infrastructure"]
|
282
|
+
):
|
283
|
+
plan["predicted_route"] = "stack_detection"
|
284
|
+
elif any(
|
285
|
+
word in query_lower for word in ["terraform", "plan", "apply", "init"]
|
286
|
+
):
|
287
|
+
plan["predicted_route"] = "terraform_workflow"
|
288
|
+
elif any(word in query_lower for word in ["issue", "error", "problem"]):
|
289
|
+
plan["predicted_route"] = "issue_creation"
|
290
|
+
else:
|
291
|
+
plan["predicted_route"] = "full_r2d_workflow"
|
292
|
+
|
293
|
+
self.logger.debug(f"Generated plan: {plan}")
|
294
|
+
return plan
|
295
|
+
|
296
|
+
def report(self, *args, **kwargs):
|
297
|
+
"""Get current memory state (required by AgentBase)."""
|
298
|
+
return self.get_memory_state()
|
299
|
+
|
300
|
+
# --- Organic LangGraph Architecture Methods ---
|
301
|
+
|
302
|
+
def _planner_llm_node(self, state: SupervisorAgentState):
|
303
|
+
"""
|
304
|
+
LLM planner node that analyzes R2D requests and decides routing.
|
305
|
+
Emits routing tokens based on the user's workflow requirements.
|
306
|
+
"""
|
307
|
+
# Get LLM configuration
|
308
|
+
llm_config = self.config.get("llm", {})
|
309
|
+
model_name = llm_config.get("model_name")
|
310
|
+
temperature = llm_config.get("temperature")
|
311
|
+
|
312
|
+
# Use enhanced LLM router following GitAgent/TerraformAgent pattern
|
313
|
+
try:
|
314
|
+
if model_name is not None or temperature is not None:
|
315
|
+
actual_model_name = (
|
316
|
+
model_name if model_name is not None else "gpt-4o-mini"
|
317
|
+
)
|
318
|
+
actual_temperature = temperature if temperature is not None else 0.1
|
319
|
+
self.logger.debug(
|
320
|
+
f"Supervisor planner using LLM: {actual_model_name}, Temp: {actual_temperature}"
|
321
|
+
)
|
322
|
+
|
323
|
+
llm = self.llm_router.get_llm(
|
324
|
+
model_name=actual_model_name,
|
325
|
+
temperature=actual_temperature,
|
326
|
+
agent_name="supervisor_agent",
|
327
|
+
)
|
328
|
+
else:
|
329
|
+
self.logger.debug(
|
330
|
+
"Supervisor planner using agent-specific LLM configuration"
|
331
|
+
)
|
332
|
+
llm = self.llm_router.get_llm_for_agent("supervisor_agent")
|
333
|
+
except Exception as e:
|
334
|
+
self.logger.error(
|
335
|
+
f"Failed to get LLM from router: {e}. Falling back to basic get_llm."
|
336
|
+
)
|
337
|
+
llm = get_llm(model_name=model_name, temperature=temperature)
|
338
|
+
|
339
|
+
# Store conversation in memory
|
340
|
+
query_content = state["input_message"].content
|
341
|
+
self.memory.add_to_conversation(
|
342
|
+
"user", query_content, {"agent": "supervisor_agent", "node": "planner"}
|
343
|
+
)
|
344
|
+
|
345
|
+
try:
|
346
|
+
self.logger.debug(f"Supervisor planner LLM input: {query_content}")
|
347
|
+
|
348
|
+
# Build the R2D-specific analysis prompt
|
349
|
+
analysis_prompt_template = self.config.get("prompts", {}).get(
|
350
|
+
"planner_prompt",
|
351
|
+
"""
|
352
|
+
User input: "{user_input}"
|
353
|
+
|
354
|
+
Analyze this R2D (Repo-to-Deployment) request and determine the appropriate action:
|
355
|
+
1. If requesting to clone a repository (keywords: 'clone', 'download', 'git clone'), respond with "{route_clone}"
|
356
|
+
2. If requesting stack detection (keywords: 'detect', 'scan', 'find files', 'infrastructure'), respond with "{route_stack_detect}"
|
357
|
+
3. If requesting Terraform operations (keywords: 'terraform', 'plan', 'apply', 'init'), respond with "{route_terraform}"
|
358
|
+
4. If requesting GitHub issue creation (keywords: 'issue', 'error', 'problem'), respond with "{route_issue}"
|
359
|
+
5. If the request is complete or no action needed, respond with "{route_end}"
|
360
|
+
|
361
|
+
Important: Only use routing tokens if the input contains actionable R2D workflow requests.
|
362
|
+
""",
|
363
|
+
)
|
364
|
+
|
365
|
+
routing_keys = self.config.get(
|
366
|
+
"routing_keys",
|
367
|
+
{
|
368
|
+
"clone": "ROUTE_TO_CLONE",
|
369
|
+
"stack_detect": "ROUTE_TO_STACK_DETECT",
|
370
|
+
"terraform": "ROUTE_TO_TERRAFORM",
|
371
|
+
"issue": "ROUTE_TO_ISSUE",
|
372
|
+
"end": "ROUTE_TO_END",
|
373
|
+
},
|
374
|
+
)
|
375
|
+
|
376
|
+
analysis_prompt = analysis_prompt_template.format(
|
377
|
+
user_input=query_content,
|
378
|
+
route_clone=routing_keys["clone"],
|
379
|
+
route_stack_detect=routing_keys["stack_detect"],
|
380
|
+
route_terraform=routing_keys["terraform"],
|
381
|
+
route_issue=routing_keys["issue"],
|
382
|
+
route_end=routing_keys["end"],
|
383
|
+
)
|
384
|
+
|
385
|
+
self.logger.debug(f"Supervisor planner LLM prompt: {analysis_prompt}")
|
386
|
+
|
387
|
+
response = llm.invoke([HumanMessage(content=analysis_prompt)])
|
388
|
+
self.logger.debug(f"Supervisor planner LLM response: {response.content}")
|
389
|
+
response_content = response.content.strip()
|
390
|
+
|
391
|
+
# Store LLM response in memory
|
392
|
+
self.memory.add_to_conversation(
|
393
|
+
"assistant",
|
394
|
+
response_content,
|
395
|
+
{"agent": "supervisor_agent", "node": "planner", "model": model_name},
|
396
|
+
)
|
397
|
+
|
398
|
+
# Determine routing based on response content
|
399
|
+
new_state_update = {}
|
400
|
+
if routing_keys["clone"] in response_content:
|
401
|
+
new_state_update = {
|
402
|
+
"final_result": "route_to_clone",
|
403
|
+
"operation_type": "clone",
|
404
|
+
"error_message": None,
|
405
|
+
}
|
406
|
+
elif routing_keys["stack_detect"] in response_content:
|
407
|
+
new_state_update = {
|
408
|
+
"final_result": "route_to_stack_detect",
|
409
|
+
"operation_type": "stack_detect",
|
410
|
+
"error_message": None,
|
411
|
+
}
|
412
|
+
elif routing_keys["terraform"] in response_content:
|
413
|
+
new_state_update = {
|
414
|
+
"final_result": "route_to_terraform",
|
415
|
+
"operation_type": "terraform",
|
416
|
+
"error_message": None,
|
417
|
+
}
|
418
|
+
elif routing_keys["issue"] in response_content:
|
419
|
+
new_state_update = {
|
420
|
+
"final_result": "route_to_issue",
|
421
|
+
"operation_type": "issue",
|
422
|
+
"error_message": None,
|
423
|
+
}
|
424
|
+
elif routing_keys["end"] in response_content:
|
425
|
+
# Direct answer or route to end
|
426
|
+
new_state_update = {
|
427
|
+
"final_result": response.content,
|
428
|
+
"operation_type": "direct_answer",
|
429
|
+
"error_message": None,
|
430
|
+
}
|
431
|
+
else:
|
432
|
+
# Default: treat as complete R2D workflow request
|
433
|
+
new_state_update = {
|
434
|
+
"final_result": "route_to_clone", # Start with cloning
|
435
|
+
"operation_type": "full_workflow",
|
436
|
+
"error_message": None,
|
437
|
+
}
|
438
|
+
|
439
|
+
self.logger.info(
|
440
|
+
f"Supervisor planner decision: {new_state_update.get('final_result', 'N/A')}"
|
441
|
+
)
|
442
|
+
return new_state_update
|
443
|
+
|
444
|
+
except Exception as e:
|
445
|
+
self.logger.error(f"LLM error in supervisor planner: {e}", exc_info=True)
|
446
|
+
self.memory.add_to_conversation(
|
447
|
+
"system",
|
448
|
+
f"Error in planner: {str(e)}",
|
449
|
+
{"agent": "supervisor_agent", "node": "planner", "error": True},
|
450
|
+
)
|
451
|
+
|
452
|
+
# Enhanced error categorization for better issue titles
|
453
|
+
error_message = str(e)
|
454
|
+
enhanced_error_message = f"SupervisorAgent planner error: {error_message}"
|
455
|
+
|
456
|
+
# Detect specific error types for better routing and title generation
|
457
|
+
if "api key" in error_message.lower() or "401" in error_message.lower():
|
458
|
+
enhanced_error_message = (
|
459
|
+
f"SupervisorAgent API key error: {error_message}"
|
460
|
+
)
|
461
|
+
elif (
|
462
|
+
"openai" in error_message.lower()
|
463
|
+
or "anthropic" in error_message.lower()
|
464
|
+
):
|
465
|
+
enhanced_error_message = (
|
466
|
+
f"SupervisorAgent LLM service error: {error_message}"
|
467
|
+
)
|
468
|
+
elif (
|
469
|
+
"network" in error_message.lower()
|
470
|
+
or "connection" in error_message.lower()
|
471
|
+
):
|
472
|
+
enhanced_error_message = (
|
473
|
+
f"SupervisorAgent network error: {error_message}"
|
474
|
+
)
|
475
|
+
elif "timeout" in error_message.lower():
|
476
|
+
enhanced_error_message = (
|
477
|
+
f"SupervisorAgent timeout error: {error_message}"
|
478
|
+
)
|
479
|
+
elif (
|
480
|
+
"permission" in error_message.lower()
|
481
|
+
or "forbidden" in error_message.lower()
|
482
|
+
):
|
483
|
+
enhanced_error_message = (
|
484
|
+
f"SupervisorAgent permission error: {error_message}"
|
485
|
+
)
|
486
|
+
|
487
|
+
# Route to issue creation for any planner errors (API key, network, etc.)
|
488
|
+
self.logger.warning(
|
489
|
+
f"Error detected in supervisor planner, routing to issue creation: {enhanced_error_message}"
|
490
|
+
)
|
491
|
+
return {
|
492
|
+
"final_result": "route_to_issue",
|
493
|
+
"error_message": enhanced_error_message,
|
494
|
+
"operation_type": "planner_error",
|
495
|
+
}
|
496
|
+
|
497
|
+
def _route_after_planner(self, state: SupervisorAgentState):
|
498
|
+
"""
|
499
|
+
Router function that determines the next node based on planner output.
|
500
|
+
Maps routing tokens to appropriate tool nodes or END.
|
501
|
+
Only used from the planner node.
|
502
|
+
"""
|
503
|
+
self.logger.debug(
|
504
|
+
f"Supervisor routing after planner. State: {state.get('final_result')}, error: {state.get('error_message')}"
|
505
|
+
)
|
506
|
+
|
507
|
+
if state.get("error_message"):
|
508
|
+
self.logger.warning(
|
509
|
+
f"Error detected in supervisor planner, routing to issue creation: {state['error_message']}"
|
510
|
+
)
|
511
|
+
return "issue_create_node"
|
512
|
+
|
513
|
+
final_result = state.get("final_result", "")
|
514
|
+
|
515
|
+
# Route based on planner decision
|
516
|
+
if final_result == "route_to_clone":
|
517
|
+
return "clone_repo_node"
|
518
|
+
elif final_result == "route_to_stack_detect":
|
519
|
+
return "stack_detect_node"
|
520
|
+
elif final_result == "route_to_terraform":
|
521
|
+
return "terraform_workflow_node"
|
522
|
+
elif final_result == "route_to_issue":
|
523
|
+
return "issue_create_node"
|
524
|
+
else:
|
525
|
+
return END
|
526
|
+
|
527
|
+
def _route_workflow_continuation(self, state: SupervisorAgentState):
|
528
|
+
"""
|
529
|
+
Router function for sequential workflow continuation.
|
530
|
+
Determines the next step in the R2D workflow based on current state.
|
531
|
+
"""
|
532
|
+
self.logger.debug(
|
533
|
+
f"Supervisor workflow routing. State: {state.get('final_result')}, error: {state.get('error_message')}"
|
534
|
+
)
|
535
|
+
|
536
|
+
# If there's an error, route to issue creation
|
537
|
+
if state.get("error_message"):
|
538
|
+
self.logger.warning(
|
539
|
+
f"Error detected, routing to issue creation: {state['error_message']}"
|
540
|
+
)
|
541
|
+
return "issue_create_node"
|
542
|
+
|
543
|
+
final_result = state.get("final_result", "")
|
544
|
+
|
545
|
+
# Sequential workflow: clone → stack_detect → terraform → end (removed branch_create)
|
546
|
+
if final_result == "route_to_stack_detect":
|
547
|
+
return "stack_detect_node"
|
548
|
+
elif final_result == "route_to_terraform":
|
549
|
+
return "terraform_workflow_node"
|
550
|
+
elif final_result == "route_to_issue":
|
551
|
+
return "issue_create_node"
|
552
|
+
else:
|
553
|
+
# Default: workflow complete
|
554
|
+
return END
|
555
|
+
|
556
|
+
# --- Tool Nodes: Use specialized agents with their natural tools ---
|
557
|
+
|
558
|
+
def _clone_repo_node(self, state: SupervisorAgentState):
|
559
|
+
"""Clone repository using GitAgent."""
|
560
|
+
try:
|
561
|
+
self.logger.info(f"Cloning repository: {state['repo_url']}")
|
562
|
+
|
563
|
+
git_result: GitAgentOutput = self.git_agent.run(
|
564
|
+
GitAgentInput(
|
565
|
+
query=f"clone repository {state['repo_url']}",
|
566
|
+
thread_id=state.get("thread_id"),
|
567
|
+
)
|
568
|
+
)
|
569
|
+
|
570
|
+
if git_result.error_message:
|
571
|
+
self.logger.error(
|
572
|
+
f"Repository cloning failed: {git_result.error_message}"
|
573
|
+
)
|
574
|
+
return {
|
575
|
+
"final_result": f"Repository cloning failed: {git_result.error_message}",
|
576
|
+
"error_message": git_result.error_message,
|
577
|
+
"operation_type": "clone_error",
|
578
|
+
}
|
579
|
+
|
580
|
+
# Update state with repo path and continue to stack detection
|
581
|
+
self.logger.info(
|
582
|
+
f"Repository cloned successfully to: {git_result.repo_path}"
|
583
|
+
)
|
584
|
+
return {
|
585
|
+
"repo_path": git_result.repo_path,
|
586
|
+
"final_result": "route_to_stack_detect", # Continue workflow
|
587
|
+
"operation_type": "clone_success",
|
588
|
+
"error_message": None,
|
589
|
+
}
|
590
|
+
|
591
|
+
except Exception as e:
|
592
|
+
self.logger.error(f"Error in clone repo node: {e}")
|
593
|
+
return {
|
594
|
+
"final_result": f"Clone operation failed: {str(e)}",
|
595
|
+
"error_message": str(e),
|
596
|
+
"operation_type": "clone_error",
|
597
|
+
}
|
598
|
+
|
599
|
+
def _stack_detect_node(self, state: SupervisorAgentState):
|
600
|
+
"""Detect infrastructure stack using enhanced detection logic."""
|
601
|
+
try:
|
602
|
+
repo_path = state.get("repo_path")
|
603
|
+
if not repo_path:
|
604
|
+
return {
|
605
|
+
"final_result": "No repository path available for stack detection",
|
606
|
+
"error_message": "Missing repo_path",
|
607
|
+
"operation_type": "stack_detect_error",
|
608
|
+
}
|
609
|
+
|
610
|
+
self.logger.info(f"Detecting infrastructure stack in: {repo_path}")
|
611
|
+
|
612
|
+
stack_detected = detect_stack_files(repo_path, self.shell_agent)
|
613
|
+
histogram = build_stack_histogram(repo_path, self.shell_agent)
|
614
|
+
self.logger.info(
|
615
|
+
f"Stack detection completed: {stack_detected}, histogram: {histogram}"
|
616
|
+
)
|
617
|
+
|
618
|
+
if route_on_stack(histogram):
|
619
|
+
unsupported = [k for k, v in histogram.items() if v < STACK_SUPPORT_THRESHOLD]
|
620
|
+
stack = unsupported[0] if unsupported else "unknown"
|
621
|
+
issue_title = f"Unsupported: {stack}"
|
622
|
+
issue_body = (
|
623
|
+
f"Automated detection flagged unsupported stack {stack}. "
|
624
|
+
f"Histogram: {histogram}. cc @github-copilot"
|
625
|
+
)
|
626
|
+
|
627
|
+
issue_result = self.git_agent.run(
|
628
|
+
GitAgentInput(
|
629
|
+
query=f"open issue {issue_title} for repository {state['repo_url']}: {issue_body}",
|
630
|
+
thread_id=state.get("thread_id"),
|
631
|
+
)
|
632
|
+
)
|
633
|
+
|
634
|
+
issues_opened = 0
|
635
|
+
error_message = None
|
636
|
+
final_result = f"Unsupported stack detected: {stack}"
|
637
|
+
if issue_result.error_message:
|
638
|
+
error_message = issue_result.error_message
|
639
|
+
final_result += f" - Issue creation failed: {issue_result.error_message}"
|
640
|
+
else:
|
641
|
+
issues_opened = 1
|
642
|
+
final_result += f" - Issue created: {issue_result.result}"
|
643
|
+
|
644
|
+
return {
|
645
|
+
"stack_detected": stack_detected,
|
646
|
+
"final_result": final_result,
|
647
|
+
"operation_type": "unsupported_stack",
|
648
|
+
"error_message": error_message,
|
649
|
+
"issues_opened": issues_opened,
|
650
|
+
"unsupported": True,
|
651
|
+
}
|
652
|
+
|
653
|
+
return {
|
654
|
+
"stack_detected": stack_detected,
|
655
|
+
"final_result": "route_to_terraform", # Skip branch creation, go directly to terraform
|
656
|
+
"operation_type": "stack_detect_success",
|
657
|
+
"error_message": None,
|
658
|
+
}
|
659
|
+
|
660
|
+
except Exception as e:
|
661
|
+
self.logger.error(f"Error in stack detection node: {e}")
|
662
|
+
return {
|
663
|
+
"final_result": f"Stack detection failed: {str(e)}",
|
664
|
+
"error_message": str(e),
|
665
|
+
"operation_type": "stack_detect_error",
|
666
|
+
}
|
667
|
+
|
668
|
+
def _terraform_workflow_node(self, state: SupervisorAgentState):
|
669
|
+
"""Execute Terraform workflow using TerraformAgent."""
|
670
|
+
try:
|
671
|
+
repo_path = state.get("repo_path")
|
672
|
+
stack_detected = state.get("stack_detected", {})
|
673
|
+
|
674
|
+
if not repo_path:
|
675
|
+
return {
|
676
|
+
"final_result": "No repository path available for Terraform workflow",
|
677
|
+
"error_message": "Missing repo_path",
|
678
|
+
"operation_type": "terraform_error",
|
679
|
+
}
|
680
|
+
|
681
|
+
# Enhanced Terraform workflow if Terraform files detected
|
682
|
+
if stack_detected.get("*.tf", 0) > 0:
|
683
|
+
self.logger.info(
|
684
|
+
f"Found {stack_detected['*.tf']} Terraform files, running enhanced workflow"
|
685
|
+
)
|
686
|
+
tf_result = self._run_enhanced_terraform_workflow(
|
687
|
+
repo_path, state.get("thread_id")
|
688
|
+
)
|
689
|
+
else:
|
690
|
+
self.logger.info("No Terraform files detected, running basic plan")
|
691
|
+
tf_result: TerraformAgentOutput = self.terraform_agent.run(
|
692
|
+
TerraformAgentInput(
|
693
|
+
query=f"terraform plan in {repo_path}",
|
694
|
+
thread_id=state.get("thread_id"),
|
695
|
+
)
|
696
|
+
)
|
697
|
+
|
698
|
+
if tf_result.error_message:
|
699
|
+
self.logger.error(
|
700
|
+
f"Terraform workflow failed: {tf_result.error_message}"
|
701
|
+
)
|
702
|
+
|
703
|
+
# If authentication is missing, request token and retry
|
704
|
+
if tf_result.error_tags and "needs_pat" in tf_result.error_tags:
|
705
|
+
from .pat_loop import request_and_wait_for_pat
|
706
|
+
|
707
|
+
if request_and_wait_for_pat(state["repo_url"], self.git_agent, poll_interval=5, timeout=60):
|
708
|
+
tf_result = self._run_enhanced_terraform_workflow(
|
709
|
+
repo_path, state.get("thread_id")
|
710
|
+
)
|
711
|
+
if not tf_result.error_message:
|
712
|
+
return {
|
713
|
+
"terraform_summary": tf_result.result,
|
714
|
+
"final_result": "R2D workflow completed successfully",
|
715
|
+
"operation_type": "terraform_success",
|
716
|
+
"error_message": None,
|
717
|
+
}
|
718
|
+
|
719
|
+
return {
|
720
|
+
"final_result": "route_to_issue", # Route to issue creation
|
721
|
+
"terraform_summary": tf_result.result,
|
722
|
+
"error_message": tf_result.error_message,
|
723
|
+
"operation_type": "terraform_error",
|
724
|
+
}
|
725
|
+
|
726
|
+
self.logger.info("Terraform workflow completed successfully")
|
727
|
+
return {
|
728
|
+
"terraform_summary": tf_result.result,
|
729
|
+
"final_result": "R2D workflow completed successfully",
|
730
|
+
"operation_type": "terraform_success",
|
731
|
+
"error_message": None,
|
732
|
+
}
|
733
|
+
|
734
|
+
except Exception as e:
|
735
|
+
self.logger.error(f"Error in Terraform workflow node: {e}")
|
736
|
+
return {
|
737
|
+
"final_result": f"Terraform workflow failed: {str(e)}",
|
738
|
+
"error_message": str(e),
|
739
|
+
"operation_type": "terraform_error",
|
740
|
+
}
|
741
|
+
|
742
|
+
def _issue_create_node(self, state: SupervisorAgentState):
|
743
|
+
"""Create GitHub issue using GitAgent with organic title generation and clean error formatting."""
|
744
|
+
try:
|
745
|
+
|
746
|
+
repo_url = state['repo_url']
|
747
|
+
branch_name = state.get('branch_name', 'unknown')
|
748
|
+
stack_detected = state.get('stack_detected', {})
|
749
|
+
error_message = state.get('error_message', 'Unknown error')
|
750
|
+
dry_run = state.get('dry_run', False)
|
751
|
+
|
752
|
+
|
753
|
+
self.logger.info("Creating GitHub issue for R2D workflow error")
|
754
|
+
|
755
|
+
# Import text utilities for organic title generation and ANSI cleanup
|
756
|
+
from diagram_to_iac.tools.text_utils import (
|
757
|
+
generate_organic_issue_title,
|
758
|
+
enhance_error_message_for_issue,
|
759
|
+
create_issue_metadata_section,
|
760
|
+
)
|
761
|
+
|
762
|
+
# Determine error type from message for better title generation
|
763
|
+
error_type = "unknown"
|
764
|
+
if "terraform init" in error_message.lower():
|
765
|
+
error_type = "terraform_init"
|
766
|
+
elif "terraform plan" in error_message.lower():
|
767
|
+
error_type = "terraform_plan"
|
768
|
+
elif "terraform apply" in error_message.lower():
|
769
|
+
error_type = "terraform_apply"
|
770
|
+
elif "auth" in error_message.lower() or "missing_terraform_token" in error_message.lower() or "error_missing_terraform_token" in error_message.lower():
|
771
|
+
error_type = "auth_failed"
|
772
|
+
elif "api key" in error_message.lower() or "401" in error_message.lower():
|
773
|
+
error_type = "api_key_error"
|
774
|
+
elif (
|
775
|
+
"llm error" in error_message.lower()
|
776
|
+
or "supervisoragent llm error" in error_message.lower()
|
777
|
+
):
|
778
|
+
error_type = "llm_error"
|
779
|
+
elif (
|
780
|
+
"network" in error_message.lower()
|
781
|
+
or "connection" in error_message.lower()
|
782
|
+
):
|
783
|
+
error_type = "network_error"
|
784
|
+
elif "timeout" in error_message.lower():
|
785
|
+
error_type = "timeout_error"
|
786
|
+
elif (
|
787
|
+
"permission" in error_message.lower()
|
788
|
+
or "forbidden" in error_message.lower()
|
789
|
+
):
|
790
|
+
error_type = "permission_error"
|
791
|
+
elif "planner error" in error_message.lower():
|
792
|
+
error_type = "planner_error"
|
793
|
+
elif "workflow error" in error_message.lower():
|
794
|
+
error_type = "workflow_error"
|
795
|
+
|
796
|
+
# Create context for organic title generation
|
797
|
+
error_context = {
|
798
|
+
"error_type": error_type,
|
799
|
+
"stack_detected": stack_detected,
|
800
|
+
"error_message": error_message,
|
801
|
+
"repo_url": repo_url,
|
802
|
+
"branch_name": branch_name,
|
803
|
+
}
|
804
|
+
|
805
|
+
# Generate organic, thoughtful issue title
|
806
|
+
try:
|
807
|
+
issue_title_final = generate_organic_issue_title(error_context)
|
808
|
+
except Exception as e:
|
809
|
+
self.logger.warning(f"Failed to generate organic title: {e}")
|
810
|
+
issue_title_final = f"R2D Workflow Error in {repo_url}"
|
811
|
+
|
812
|
+
# Default body in case text utils fail
|
813
|
+
issue_body_final = f"An error occurred: {error_message}\n\nContext: {error_context.get('error_type', 'N/A')}"
|
814
|
+
|
815
|
+
# Create enhanced issue body with metadata and clean error formatting
|
816
|
+
try:
|
817
|
+
metadata_section = create_issue_metadata_section(error_context)
|
818
|
+
enhanced_error = enhance_error_message_for_issue(
|
819
|
+
error_message, error_context
|
820
|
+
)
|
821
|
+
issue_body = f"{metadata_section}{enhanced_error}"
|
822
|
+
except Exception as e:
|
823
|
+
self.logger.warning(f"Failed to enhance issue body: {e}")
|
824
|
+
issue_body = issue_body_final
|
825
|
+
|
826
|
+
# Get existing issue ID for deduplication
|
827
|
+
existing_id = self._get_existing_issue_id(repo_url, error_type)
|
828
|
+
|
829
|
+
if dry_run:
|
830
|
+
if self.demonstrator:
|
831
|
+
should_proceed = self.demonstrator.show_issue(issue_title_final, issue_body)
|
832
|
+
|
833
|
+
if should_proceed:
|
834
|
+
# User chose to proceed with issue creation
|
835
|
+
self.logger.info("User chose to proceed with issue creation in dry-run mode")
|
836
|
+
# Fall through to create the actual issue (continue with normal flow below)
|
837
|
+
else:
|
838
|
+
# User chose not to proceed, end dry-run
|
839
|
+
self.logger.info("User chose not to proceed, ending dry-run")
|
840
|
+
return {
|
841
|
+
"final_result": "DRY RUN: User chose not to proceed with issue creation",
|
842
|
+
"issues_opened": 0,
|
843
|
+
"operation_type": "dry_run_aborted",
|
844
|
+
"error_message": None,
|
845
|
+
}
|
846
|
+
|
847
|
+
# Delegate to DemonstratorAgent for intelligent interactive dry-run
|
848
|
+
self.logger.info("Delegating to DemonstratorAgent for interactive dry-run")
|
849
|
+
|
850
|
+
from diagram_to_iac.agents.demonstrator_langgraph import DemonstratorAgentInput
|
851
|
+
|
852
|
+
demo_result = self.demonstrator_agent.run(
|
853
|
+
DemonstratorAgentInput(
|
854
|
+
query=f"Demonstrate error: {error_type}",
|
855
|
+
error_type=error_type,
|
856
|
+
error_message=error_message,
|
857
|
+
repo_url=repo_url,
|
858
|
+
branch_name=branch_name,
|
859
|
+
stack_detected=stack_detected,
|
860
|
+
issue_title=issue_title_final,
|
861
|
+
issue_body=issue_body,
|
862
|
+
existing_issue_id=existing_id,
|
863
|
+
thread_id=state.get("thread_id"),
|
864
|
+
)
|
865
|
+
)
|
866
|
+
|
867
|
+
# Return the demonstration result and exit early
|
868
|
+
return {
|
869
|
+
"final_result": demo_result["result"],
|
870
|
+
"issues_opened": 1 if demo_result["issue_created"] else 0,
|
871
|
+
"operation_type": f"demo_{demo_result['action_taken']}",
|
872
|
+
"error_message": demo_result.get("error_message"),
|
873
|
+
}
|
874
|
+
|
875
|
+
# Normal non-dry-run issue creation (only executed when dry_run=False)
|
876
|
+
|
877
|
+
|
878
|
+
|
879
|
+
|
880
|
+
issue_result = self.git_agent.run(
|
881
|
+
GitAgentInput(
|
882
|
+
query=f"open issue {issue_title_final} for repository {repo_url}: {issue_body}",
|
883
|
+
issue_id=existing_id,
|
884
|
+
)
|
885
|
+
)
|
886
|
+
|
887
|
+
if issue_result.error_message:
|
888
|
+
self.logger.error(
|
889
|
+
f"Issue creation failed: {issue_result.error_message}"
|
890
|
+
)
|
891
|
+
return {
|
892
|
+
"final_result": f"Issue creation failed: {issue_result.error_message}",
|
893
|
+
"issues_opened": 0,
|
894
|
+
"operation_type": "issue_error",
|
895
|
+
}
|
896
|
+
|
897
|
+
|
898
|
+
if existing_id is None:
|
899
|
+
new_id = self._parse_issue_number(issue_result.result)
|
900
|
+
if new_id is not None:
|
901
|
+
self._record_issue_id(repo_url, error_type, new_id)
|
902
|
+
|
903
|
+
self.logger.info("GitHub issue created successfully")
|
904
|
+
return {
|
905
|
+
"final_result": f"R2D workflow failed, issue created: {issue_result.result}",
|
906
|
+
"issues_opened": 1,
|
907
|
+
"operation_type": "issue_success",
|
908
|
+
"error_message": None,
|
909
|
+
}
|
910
|
+
|
911
|
+
except Exception as e:
|
912
|
+
self.logger.error(f"Error in issue creation node: {e}")
|
913
|
+
return {
|
914
|
+
"final_result": f"Issue creation failed: {str(e)}",
|
915
|
+
"issues_opened": 0,
|
916
|
+
"operation_type": "issue_error",
|
917
|
+
}
|
918
|
+
|
919
|
+
def _handle_interactive_dry_run(self, issue_title: str, issue_body: str, repo_url: str, existing_id: Optional[int], error_type: str) -> dict:
|
920
|
+
"""
|
921
|
+
Handle intelligent interactive dry-run mode with error-specific guidance and retry capabilities.
|
922
|
+
Analyzes the specific error and provides actionable steps to fix it.
|
923
|
+
"""
|
924
|
+
# Get the original error context from the state
|
925
|
+
error_message = getattr(self, '_current_error_message', 'Unknown error')
|
926
|
+
|
927
|
+
print("\n" + "="*80)
|
928
|
+
print("🔍 INTELLIGENT DRY RUN: R2D Workflow Error Analysis")
|
929
|
+
print("="*80)
|
930
|
+
|
931
|
+
print(f"\n📍 **Repository:** {repo_url}")
|
932
|
+
print(f"🏷️ **Error Type:** {error_type}")
|
933
|
+
if existing_id:
|
934
|
+
print(f"🔗 **Existing Issue:** Found issue #{existing_id} (would update)")
|
935
|
+
else:
|
936
|
+
print(f"🆕 **New Issue:** Would create new issue")
|
937
|
+
|
938
|
+
# Intelligent error analysis and guidance
|
939
|
+
error_analysis = self._analyze_error_for_user_guidance(error_type, error_message)
|
940
|
+
|
941
|
+
print(f"\n🧠 **Error Analysis:**")
|
942
|
+
print(f" {error_analysis['description']}")
|
943
|
+
|
944
|
+
if error_analysis['fixable']:
|
945
|
+
print(f"\n✅ **Good News:** This error can potentially be fixed!")
|
946
|
+
print(f" {error_analysis['fix_guidance']}")
|
947
|
+
|
948
|
+
if error_analysis['required_inputs']:
|
949
|
+
print(f"\n📝 **Required Information:**")
|
950
|
+
for req in error_analysis['required_inputs']:
|
951
|
+
print(f" • {req}")
|
952
|
+
else:
|
953
|
+
print(f"\n❌ **This error requires manual intervention:**")
|
954
|
+
print(f" {error_analysis['manual_steps']}")
|
955
|
+
|
956
|
+
print(f"\n📝 **Proposed Issue Title:**")
|
957
|
+
print(f" {issue_title}")
|
958
|
+
|
959
|
+
print("\n" + "="*80)
|
960
|
+
print("🤔 What would you like to do?")
|
961
|
+
print("="*80)
|
962
|
+
|
963
|
+
if error_analysis['fixable']:
|
964
|
+
print("1. 🔧 Fix - Provide missing information and retry")
|
965
|
+
print("2. 🚀 Create Issue - Log this error as a GitHub issue")
|
966
|
+
print("3. 📋 Details - Show full error details and proposed issue")
|
967
|
+
print("4. ❌ Abort - Skip and end workflow")
|
968
|
+
else:
|
969
|
+
print("1. 🚀 Create Issue - Log this error as a GitHub issue")
|
970
|
+
print("2. 📋 Details - Show full error details and proposed issue")
|
971
|
+
print("3. ❌ Abort - Skip and end workflow")
|
972
|
+
|
973
|
+
while True:
|
974
|
+
try:
|
975
|
+
if error_analysis['fixable']:
|
976
|
+
choice = input("\nEnter your choice (1-4): ").strip()
|
977
|
+
max_choice = 4
|
978
|
+
else:
|
979
|
+
choice = input("\nEnter your choice (1-3): ").strip()
|
980
|
+
max_choice = 3
|
981
|
+
|
982
|
+
if choice == "1":
|
983
|
+
if error_analysis['fixable']:
|
984
|
+
print("\n🔧 Let's fix this error together!")
|
985
|
+
return self._attempt_error_fix(error_type, error_analysis, repo_url)
|
986
|
+
else:
|
987
|
+
print("\n🚀 Creating GitHub issue...")
|
988
|
+
return self._proceed_with_issue_creation(issue_title, issue_body, repo_url, existing_id, error_type)
|
989
|
+
|
990
|
+
elif choice == "2":
|
991
|
+
if error_analysis['fixable']:
|
992
|
+
print("\n🚀 Creating GitHub issue...")
|
993
|
+
return self._proceed_with_issue_creation(issue_title, issue_body, repo_url, existing_id, error_type)
|
994
|
+
else:
|
995
|
+
print(f"\n📊 **Full Error Details:**")
|
996
|
+
print(f" Raw Error: {error_message}")
|
997
|
+
print(f"\n📄 **Proposed Issue Body:**")
|
998
|
+
print(" " + "\n ".join(issue_body.split("\n")))
|
999
|
+
print(f"\n🔄 Returning to menu...")
|
1000
|
+
continue
|
1001
|
+
|
1002
|
+
elif choice == "3":
|
1003
|
+
if error_analysis['fixable']:
|
1004
|
+
print(f"\n📊 **Full Error Details:**")
|
1005
|
+
print(f" Raw Error: {error_message}")
|
1006
|
+
print(f"\n📄 **Proposed Issue Body:**")
|
1007
|
+
print(" " + "\n ".join(issue_body.split("\n")))
|
1008
|
+
print(f"\n🔄 Returning to menu...")
|
1009
|
+
continue
|
1010
|
+
else:
|
1011
|
+
print("\n❌ User chose to abort. Skipping issue creation.")
|
1012
|
+
return {
|
1013
|
+
"final_result": "User aborted: workflow ended",
|
1014
|
+
"issues_opened": 0,
|
1015
|
+
"operation_type": "user_abort",
|
1016
|
+
"error_message": None,
|
1017
|
+
}
|
1018
|
+
|
1019
|
+
elif choice == "4" and error_analysis['fixable']:
|
1020
|
+
print("\n❌ User chose to abort. Skipping issue creation.")
|
1021
|
+
return {
|
1022
|
+
"final_result": "User aborted: workflow ended",
|
1023
|
+
"issues_opened": 0,
|
1024
|
+
"operation_type": "user_abort",
|
1025
|
+
"error_message": None,
|
1026
|
+
}
|
1027
|
+
|
1028
|
+
else:
|
1029
|
+
print(f"❓ Invalid choice '{choice}'. Please enter a valid option.")
|
1030
|
+
continue
|
1031
|
+
|
1032
|
+
except (KeyboardInterrupt, EOFError):
|
1033
|
+
print(f"\n\n⚠️ User interrupted! Aborting workflow.")
|
1034
|
+
return {
|
1035
|
+
"final_result": "User interrupted: workflow aborted",
|
1036
|
+
"issues_opened": 0,
|
1037
|
+
"operation_type": "user_interrupt",
|
1038
|
+
"error_message": None,
|
1039
|
+
}
|
1040
|
+
|
1041
|
+
def _proceed_with_issue_creation(self, issue_title: str, issue_body: str, repo_url: str, existing_id: Optional[int], error_type: str) -> dict:
|
1042
|
+
"""
|
1043
|
+
Proceed with actual GitHub issue creation after user confirmation in dry-run mode.
|
1044
|
+
"""
|
1045
|
+
try:
|
1046
|
+
issue_result = self.git_agent.run(
|
1047
|
+
GitAgentInput(
|
1048
|
+
query=f"open issue {issue_title} for repository {repo_url}: {issue_body}",
|
1049
|
+
issue_id=existing_id,
|
1050
|
+
)
|
1051
|
+
)
|
1052
|
+
|
1053
|
+
if issue_result.error_message:
|
1054
|
+
self.logger.error(f"Issue creation failed: {issue_result.error_message}")
|
1055
|
+
return {
|
1056
|
+
"final_result": f"Issue creation failed: {issue_result.error_message}",
|
1057
|
+
"issues_opened": 0,
|
1058
|
+
"operation_type": "issue_error",
|
1059
|
+
}
|
1060
|
+
|
1061
|
+
# Track new issue ID for deduplication
|
1062
|
+
if existing_id is None:
|
1063
|
+
new_id = self._parse_issue_number(issue_result.result)
|
1064
|
+
if new_id is not None:
|
1065
|
+
self._record_issue_id(repo_url, error_type, new_id)
|
1066
|
+
|
1067
|
+
self.logger.info("GitHub issue created successfully")
|
1068
|
+
print(f"\n✅ Success! GitHub issue created: {issue_result.result}")
|
1069
|
+
|
1070
|
+
return {
|
1071
|
+
"final_result": f"R2D workflow failed, issue created: {issue_result.result}",
|
1072
|
+
"issues_opened": 1,
|
1073
|
+
"operation_type": "issue_success",
|
1074
|
+
"error_message": None,
|
1075
|
+
}
|
1076
|
+
|
1077
|
+
except Exception as e:
|
1078
|
+
self.logger.error(f"Error in issue creation: {e}")
|
1079
|
+
return {
|
1080
|
+
"final_result": f"Issue creation failed: {str(e)}",
|
1081
|
+
"issues_opened": 0,
|
1082
|
+
"operation_type": "issue_error",
|
1083
|
+
}
|
1084
|
+
|
1085
|
+
def _analyze_error_for_user_guidance(self, error_type: str, error_message: str) -> dict:
|
1086
|
+
"""
|
1087
|
+
Analyze the specific error and provide intelligent guidance for fixing it.
|
1088
|
+
Returns a dictionary with analysis results and actionable recommendations.
|
1089
|
+
"""
|
1090
|
+
analysis = {
|
1091
|
+
"description": "Unknown error occurred",
|
1092
|
+
"fixable": False,
|
1093
|
+
"fix_guidance": "",
|
1094
|
+
"required_inputs": [],
|
1095
|
+
"manual_steps": "Please check the logs and create a GitHub issue",
|
1096
|
+
"retry_method": None
|
1097
|
+
}
|
1098
|
+
|
1099
|
+
if error_type == "auth_failed" or "missing_terraform_token" in error_message.lower():
|
1100
|
+
analysis.update({
|
1101
|
+
"description": "Terraform Cloud authentication is missing. The TF_TOKEN environment variable is not set.",
|
1102
|
+
"fixable": True,
|
1103
|
+
"fix_guidance": "Terraform requires a valid token to authenticate with Terraform Cloud. You can get this token from your Terraform Cloud account.",
|
1104
|
+
"required_inputs": [
|
1105
|
+
"TF_TOKEN: Your Terraform Cloud API token",
|
1106
|
+
"Optional: TF_WORKSPACE: Terraform Cloud workspace name"
|
1107
|
+
],
|
1108
|
+
"retry_method": "terraform_auth_retry"
|
1109
|
+
})
|
1110
|
+
|
1111
|
+
elif error_type == "api_key_error" or "401" in error_message:
|
1112
|
+
analysis.update({
|
1113
|
+
"description": "API authentication failed. The API key might be missing or invalid.",
|
1114
|
+
"fixable": True,
|
1115
|
+
"fix_guidance": "The system needs valid API credentials to function properly.",
|
1116
|
+
"required_inputs": [
|
1117
|
+
"OPENAI_API_KEY: Your OpenAI API key (if using OpenAI)",
|
1118
|
+
"ANTHROPIC_API_KEY: Your Anthropic API key (if using Claude)",
|
1119
|
+
"GITHUB_TOKEN: Your GitHub Personal Access Token"
|
1120
|
+
],
|
1121
|
+
"retry_method": "api_key_retry"
|
1122
|
+
})
|
1123
|
+
|
1124
|
+
elif error_type == "terraform_init":
|
1125
|
+
if "backend" in error_message.lower():
|
1126
|
+
analysis.update({
|
1127
|
+
"description": "Terraform backend configuration issue. The backend might not be properly configured.",
|
1128
|
+
"fixable": True,
|
1129
|
+
"fix_guidance": "Terraform backend needs proper configuration or credentials.",
|
1130
|
+
"required_inputs": [
|
1131
|
+
"Backend configuration details",
|
1132
|
+
"Access credentials for the backend"
|
1133
|
+
],
|
1134
|
+
"retry_method": "terraform_backend_retry"
|
1135
|
+
})
|
1136
|
+
else:
|
1137
|
+
analysis.update({
|
1138
|
+
"description": "Terraform initialization failed for unknown reasons.",
|
1139
|
+
"fixable": False,
|
1140
|
+
"manual_steps": "Check Terraform configuration files, ensure providers are properly specified, and verify network connectivity."
|
1141
|
+
})
|
1142
|
+
|
1143
|
+
elif error_type == "network_error":
|
1144
|
+
analysis.update({
|
1145
|
+
"description": "Network connectivity issue. Cannot reach external services.",
|
1146
|
+
"fixable": True,
|
1147
|
+
"fix_guidance": "Check your internet connection and try again. You may also need to configure proxy settings.",
|
1148
|
+
"required_inputs": [
|
1149
|
+
"Confirm network connectivity",
|
1150
|
+
"Proxy settings (if behind a corporate firewall)"
|
1151
|
+
],
|
1152
|
+
"retry_method": "network_retry"
|
1153
|
+
})
|
1154
|
+
|
1155
|
+
elif error_type == "permission_error":
|
1156
|
+
analysis.update({
|
1157
|
+
"description": "Permission denied. The system lacks necessary permissions.",
|
1158
|
+
"fixable": False,
|
1159
|
+
"manual_steps": "Check file permissions, directory access rights, and ensure the process has necessary privileges."
|
1160
|
+
})
|
1161
|
+
|
1162
|
+
return analysis
|
1163
|
+
|
1164
|
+
def _attempt_error_fix(self, error_type: str, error_analysis: dict, repo_url: str) -> dict:
|
1165
|
+
"""
|
1166
|
+
Attempt to fix the error by collecting required information from the user
|
1167
|
+
and retrying the operation with the new configuration.
|
1168
|
+
"""
|
1169
|
+
print(f"\n🛠️ **Error Fix Mode: {error_type}**")
|
1170
|
+
print(f"📋 {error_analysis['fix_guidance']}")
|
1171
|
+
|
1172
|
+
# Collect required inputs from user
|
1173
|
+
user_inputs = {}
|
1174
|
+
for requirement in error_analysis['required_inputs']:
|
1175
|
+
key = requirement.split(':')[0].strip()
|
1176
|
+
description = requirement.split(':', 1)[1].strip() if ':' in requirement else requirement
|
1177
|
+
|
1178
|
+
print(f"\n📝 **{key}:**")
|
1179
|
+
print(f" {description}")
|
1180
|
+
|
1181
|
+
# Handle sensitive inputs (tokens/keys) differently
|
1182
|
+
if any(sensitive in key.lower() for sensitive in ['token', 'key', 'password']):
|
1183
|
+
value = input(f"Enter {key} (will be hidden): ").strip()
|
1184
|
+
if value:
|
1185
|
+
# Set environment variable
|
1186
|
+
os.environ[key] = value
|
1187
|
+
user_inputs[key] = "***HIDDEN***"
|
1188
|
+
print(f"✅ {key} has been set")
|
1189
|
+
else:
|
1190
|
+
print(f"⚠️ {key} was not provided")
|
1191
|
+
else:
|
1192
|
+
value = input(f"Enter {key}: ").strip()
|
1193
|
+
if value:
|
1194
|
+
user_inputs[key] = value
|
1195
|
+
print(f"✅ {key}: {value}")
|
1196
|
+
else:
|
1197
|
+
print(f"⚠️ {key} was not provided")
|
1198
|
+
|
1199
|
+
# Ask user if they want to retry with the new information
|
1200
|
+
print(f"\n🔄 **Ready to Retry**")
|
1201
|
+
print(f"📊 Collected information:")
|
1202
|
+
for key, value in user_inputs.items():
|
1203
|
+
print(f" • {key}: {value}")
|
1204
|
+
|
1205
|
+
retry_choice = input(f"\nWould you like to retry the operation with this information? (y/N): ").strip().lower()
|
1206
|
+
|
1207
|
+
if retry_choice in ['y', 'yes']:
|
1208
|
+
print(f"\n🚀 Retrying the operation...")
|
1209
|
+
return self._retry_operation_with_fixes(error_type, user_inputs, repo_url)
|
1210
|
+
else:
|
1211
|
+
print(f"\n❌ User chose not to retry. Creating GitHub issue instead...")
|
1212
|
+
# Fall back to issue creation
|
1213
|
+
return {
|
1214
|
+
"final_result": "User provided information but chose not to retry",
|
1215
|
+
"issues_opened": 0,
|
1216
|
+
"operation_type": "user_no_retry",
|
1217
|
+
"error_message": None,
|
1218
|
+
}
|
1219
|
+
|
1220
|
+
def _retry_operation_with_fixes(self, error_type: str, user_inputs: dict, repo_url: str) -> dict:
|
1221
|
+
"""
|
1222
|
+
Retry the failed operation with the user-provided fixes.
|
1223
|
+
"""
|
1224
|
+
try:
|
1225
|
+
if error_type in ["auth_failed", "terraform_init"]:
|
1226
|
+
# For Terraform auth issues, retry the terraform init
|
1227
|
+
print("🔄 Retrying Terraform initialization with new credentials...")
|
1228
|
+
|
1229
|
+
# Get the repo path from the current state (we need to store this better)
|
1230
|
+
# For now, assume it's in /workspace/<repo_name>
|
1231
|
+
repo_name = repo_url.split('/')[-1].replace('.git', '')
|
1232
|
+
repo_path = f"/workspace/{repo_name}"
|
1233
|
+
|
1234
|
+
# Retry terraform init
|
1235
|
+
tf_result = self.terraform_agent.run(
|
1236
|
+
TerraformAgentInput(
|
1237
|
+
query=f"terraform init in {repo_path}",
|
1238
|
+
thread_id=str(uuid.uuid4()),
|
1239
|
+
)
|
1240
|
+
)
|
1241
|
+
|
1242
|
+
if tf_result.error_message:
|
1243
|
+
print(f"❌ Retry failed: {tf_result.error_message}")
|
1244
|
+
return {
|
1245
|
+
"final_result": f"Retry failed: {tf_result.error_message}",
|
1246
|
+
"issues_opened": 0,
|
1247
|
+
"operation_type": "retry_failed",
|
1248
|
+
"error_message": tf_result.error_message,
|
1249
|
+
}
|
1250
|
+
else:
|
1251
|
+
print(f"✅ Success! Terraform init completed successfully.")
|
1252
|
+
print(f"🎉 Continuing with Terraform workflow...")
|
1253
|
+
|
1254
|
+
# Continue with the full terraform workflow
|
1255
|
+
return self._continue_terraform_workflow_after_fix(repo_path)
|
1256
|
+
|
1257
|
+
elif error_type == "api_key_error":
|
1258
|
+
print("🔄 API credentials have been updated. The system should work better now.")
|
1259
|
+
return {
|
1260
|
+
"final_result": "API credentials updated successfully",
|
1261
|
+
"issues_opened": 0,
|
1262
|
+
"operation_type": "credentials_fixed",
|
1263
|
+
"error_message": None,
|
1264
|
+
}
|
1265
|
+
|
1266
|
+
else:
|
1267
|
+
print(f"🚧 Retry logic for {error_type} is not yet implemented.")
|
1268
|
+
return {
|
1269
|
+
"final_result": f"Fix attempted but retry logic for {error_type} not implemented",
|
1270
|
+
"issues_opened": 0,
|
1271
|
+
"operation_type": "fix_not_implemented",
|
1272
|
+
"error_message": None,
|
1273
|
+
}
|
1274
|
+
|
1275
|
+
except Exception as e:
|
1276
|
+
self.logger.error(f"Error during retry operation: {e}")
|
1277
|
+
return {
|
1278
|
+
"final_result": f"Retry operation failed: {str(e)}",
|
1279
|
+
"issues_opened": 0,
|
1280
|
+
"operation_type": "retry_error",
|
1281
|
+
"error_message": str(e),
|
1282
|
+
}
|
1283
|
+
|
1284
|
+
def _continue_terraform_workflow_after_fix(self, repo_path: str) -> dict:
|
1285
|
+
"""
|
1286
|
+
Continue the Terraform workflow after a successful fix.
|
1287
|
+
"""
|
1288
|
+
try:
|
1289
|
+
print("🚀 Continuing with Terraform plan...")
|
1290
|
+
|
1291
|
+
# Run terraform plan
|
1292
|
+
plan_result = self.terraform_agent.run(
|
1293
|
+
TerraformAgentInput(
|
1294
|
+
query=f"terraform plan in {repo_path}",
|
1295
|
+
thread_id=str(uuid.uuid4()),
|
1296
|
+
)
|
1297
|
+
)
|
1298
|
+
|
1299
|
+
if plan_result.error_message:
|
1300
|
+
print(f"⚠️ Terraform plan encountered issues: {plan_result.error_message}")
|
1301
|
+
return {
|
1302
|
+
"final_result": f"Terraform init fixed, but plan failed: {plan_result.error_message}",
|
1303
|
+
"issues_opened": 0,
|
1304
|
+
"operation_type": "plan_failed_after_fix",
|
1305
|
+
"error_message": plan_result.error_message,
|
1306
|
+
}
|
1307
|
+
else:
|
1308
|
+
print(f"✅ Terraform plan completed successfully!")
|
1309
|
+
print(f"📋 Plan summary: {plan_result.result}")
|
1310
|
+
|
1311
|
+
return {
|
1312
|
+
"final_result": f"🎉 R2D workflow completed successfully after fix! Plan result: {plan_result.result}",
|
1313
|
+
"issues_opened": 0,
|
1314
|
+
"operation_type": "workflow_completed_after_fix",
|
1315
|
+
"error_message": None,
|
1316
|
+
"success": True,
|
1317
|
+
}
|
1318
|
+
|
1319
|
+
except Exception as e:
|
1320
|
+
self.logger.error(f"Error continuing workflow after fix: {e}")
|
1321
|
+
return {
|
1322
|
+
"final_result": f"Error continuing workflow after fix: {str(e)}",
|
1323
|
+
"issues_opened": 0,
|
1324
|
+
"operation_type": "continue_workflow_error",
|
1325
|
+
"error_message": str(e),
|
1326
|
+
}
|
1327
|
+
|
1328
|
+
# --- Utility Methods (preserved from original implementation) ---
|
1329
|
+
|
1330
|
+
def _generate_dynamic_branch_name(self) -> str:
|
1331
|
+
"""Generate timestamp-based branch name."""
|
1332
|
+
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
1333
|
+
return f"r2d-{timestamp}"
|
1334
|
+
|
1335
|
+
|
1336
|
+
# --- Issue tracker helpers ---
|
1337
|
+
def _get_existing_issue_id(self, repo_url: str, error_type: str) -> Optional[int]:
|
1338
|
+
try:
|
1339
|
+
return self.issue_tracker.get_issue(repo_url, error_type)
|
1340
|
+
except Exception as e:
|
1341
|
+
self.logger.error(f"Issue tracker lookup failed: {e}")
|
1342
|
+
return None
|
1343
|
+
|
1344
|
+
def _record_issue_id(self, repo_url: str, error_type: str, issue_id: int) -> None:
|
1345
|
+
try:
|
1346
|
+
self.issue_tracker.record_issue(repo_url, error_type, issue_id)
|
1347
|
+
except Exception as e:
|
1348
|
+
self.logger.error(f"Issue tracker update failed: {e}")
|
1349
|
+
|
1350
|
+
def _parse_issue_number(self, text: str) -> Optional[int]:
|
1351
|
+
import re
|
1352
|
+
match = re.search(r'/issues/(\d+)', text)
|
1353
|
+
if match:
|
1354
|
+
try:
|
1355
|
+
return int(match.group(1))
|
1356
|
+
except ValueError:
|
1357
|
+
return None
|
1358
|
+
return None
|
1359
|
+
|
1360
|
+
|
1361
|
+
def _detect_infrastructure_stack(self, repo_path: str) -> Dict[str, int]:
|
1362
|
+
"""
|
1363
|
+
Detect infrastructure tooling in the repository.
|
1364
|
+
|
1365
|
+
Returns:
|
1366
|
+
Dict mapping file types to counts (e.g. {"*.tf": 3, "*.yml": 2})
|
1367
|
+
"""
|
1368
|
+
self.logger.info(f"Detecting infrastructure stack in {repo_path}")
|
1369
|
+
|
1370
|
+
# Define patterns for infrastructure files
|
1371
|
+
stack_patterns = {
|
1372
|
+
"terraform": "*.tf",
|
1373
|
+
"ansible": "*.yml",
|
1374
|
+
"powershell": "*.ps1",
|
1375
|
+
"shell": "*.sh",
|
1376
|
+
}
|
1377
|
+
|
1378
|
+
stack_detected = {}
|
1379
|
+
|
1380
|
+
for stack_type, pattern in stack_patterns.items():
|
1381
|
+
try:
|
1382
|
+
# Try multiple approaches for file detection
|
1383
|
+
count = 0
|
1384
|
+
|
1385
|
+
# Method 1: Try find command (most accurate) - wrapped in bash
|
1386
|
+
try:
|
1387
|
+
find_result = self.shell_agent.run(
|
1388
|
+
ShellAgentInput(
|
1389
|
+
command=f"bash -c \"find . -name '{pattern}' -type f | wc -l\"",
|
1390
|
+
cwd=repo_path,
|
1391
|
+
)
|
1392
|
+
)
|
1393
|
+
|
1394
|
+
if find_result.exit_code == 0:
|
1395
|
+
count = int(find_result.output.strip())
|
1396
|
+
self.logger.debug(
|
1397
|
+
f"Found {count} {stack_type} files using find command"
|
1398
|
+
)
|
1399
|
+
else:
|
1400
|
+
raise RuntimeError(
|
1401
|
+
f"Find command failed: {find_result.error_message}"
|
1402
|
+
)
|
1403
|
+
|
1404
|
+
except Exception as e:
|
1405
|
+
self.logger.warning(f"Find command failed for {stack_type}: {e}")
|
1406
|
+
|
1407
|
+
# Method 2: Fallback to ls with globbing - wrapped in bash
|
1408
|
+
try:
|
1409
|
+
ls_result = self.shell_agent.run(
|
1410
|
+
ShellAgentInput(
|
1411
|
+
command=f'bash -c "ls -1 {pattern} 2>/dev/null | wc -l || echo 0"',
|
1412
|
+
cwd=repo_path,
|
1413
|
+
)
|
1414
|
+
)
|
1415
|
+
if ls_result.exit_code == 0:
|
1416
|
+
count = int(ls_result.output.strip())
|
1417
|
+
self.logger.debug(
|
1418
|
+
f"Found {count} {stack_type} files using ls fallback"
|
1419
|
+
)
|
1420
|
+
else:
|
1421
|
+
# Method 3: Final fallback using bash expansion
|
1422
|
+
bash_result = self.shell_agent.run(
|
1423
|
+
ShellAgentInput(
|
1424
|
+
command=f"bash -c 'shopt -s nullglob; files=({pattern}); echo ${{#files[@]}}'",
|
1425
|
+
cwd=repo_path,
|
1426
|
+
)
|
1427
|
+
)
|
1428
|
+
if bash_result.exit_code == 0:
|
1429
|
+
count = int(bash_result.output.strip())
|
1430
|
+
self.logger.debug(
|
1431
|
+
f"Found {count} {stack_type} files using bash expansion"
|
1432
|
+
)
|
1433
|
+
|
1434
|
+
except Exception as fallback_e:
|
1435
|
+
self.logger.error(
|
1436
|
+
f"All detection methods failed for {stack_type}: {fallback_e}"
|
1437
|
+
)
|
1438
|
+
count = 0
|
1439
|
+
|
1440
|
+
# Store result if files found
|
1441
|
+
if count > 0:
|
1442
|
+
stack_detected[pattern] = count
|
1443
|
+
self.logger.info(f"✅ Found {count} {stack_type} files ({pattern})")
|
1444
|
+
else:
|
1445
|
+
self.logger.debug(f"No {stack_type} files found")
|
1446
|
+
|
1447
|
+
except Exception as e:
|
1448
|
+
self.logger.error(f"Error detecting {stack_type} files: {e}")
|
1449
|
+
|
1450
|
+
self.logger.info(f"Stack detection completed: {stack_detected}")
|
1451
|
+
return stack_detected
|
1452
|
+
|
1453
|
+
def _run_enhanced_terraform_workflow(
|
1454
|
+
self, repo_path: str, thread_id: Optional[str]
|
1455
|
+
) -> TerraformAgentOutput:
|
1456
|
+
"""
|
1457
|
+
Run enhanced Terraform workflow with sophisticated features:
|
1458
|
+
- Terraform validation
|
1459
|
+
- Terraform init
|
1460
|
+
- Terraform plan with detailed output
|
1461
|
+
- Security scanning (placeholder)
|
1462
|
+
- Cost estimation (placeholder)
|
1463
|
+
"""
|
1464
|
+
self.logger.info("Starting enhanced Terraform workflow")
|
1465
|
+
|
1466
|
+
try:
|
1467
|
+
# Step 1: Terraform validation
|
1468
|
+
self.logger.info("Step 1: Terraform validation")
|
1469
|
+
validate_result = self.terraform_agent.run(
|
1470
|
+
TerraformAgentInput(
|
1471
|
+
query=f"terraform validate in {repo_path}", thread_id=thread_id
|
1472
|
+
)
|
1473
|
+
)
|
1474
|
+
|
1475
|
+
if validate_result.error_message:
|
1476
|
+
self.logger.error(
|
1477
|
+
f"Terraform validation failed: {validate_result.error_message}"
|
1478
|
+
)
|
1479
|
+
return TerraformAgentOutput(
|
1480
|
+
result="Terraform validation failed",
|
1481
|
+
thread_id=thread_id or "unknown",
|
1482
|
+
error_message=f"Validation failed: {validate_result.error_message}",
|
1483
|
+
operation_type="validate",
|
1484
|
+
)
|
1485
|
+
|
1486
|
+
# Step 2: Terraform init
|
1487
|
+
self.logger.info("Step 2: Terraform init")
|
1488
|
+
init_result = self.terraform_agent.run(
|
1489
|
+
TerraformAgentInput(
|
1490
|
+
query=f"terraform init in {repo_path}", thread_id=thread_id
|
1491
|
+
)
|
1492
|
+
)
|
1493
|
+
|
1494
|
+
if init_result.error_message:
|
1495
|
+
self.logger.error(f"Terraform init failed: {init_result.error_message}")
|
1496
|
+
return TerraformAgentOutput(
|
1497
|
+
result="Terraform init failed",
|
1498
|
+
thread_id=thread_id or "unknown",
|
1499
|
+
error_message=f"Init failed: {init_result.error_message}",
|
1500
|
+
operation_type="init",
|
1501
|
+
)
|
1502
|
+
|
1503
|
+
# Step 3: Terraform plan with detailed output
|
1504
|
+
self.logger.info("Step 3: Terraform plan with detailed analysis")
|
1505
|
+
plan_result = self.terraform_agent.run(
|
1506
|
+
TerraformAgentInput(
|
1507
|
+
query=f"terraform plan -detailed-exitcode -out=tfplan in {repo_path}",
|
1508
|
+
thread_id=thread_id,
|
1509
|
+
)
|
1510
|
+
)
|
1511
|
+
|
1512
|
+
# Step 4: Additional analysis (placeholder for future features)
|
1513
|
+
additional_insights = self._analyze_terraform_plan(repo_path)
|
1514
|
+
|
1515
|
+
# Combine results
|
1516
|
+
enhanced_summary = f"""Enhanced Terraform Workflow Results:
|
1517
|
+
✅ Validation: {validate_result.result}
|
1518
|
+
✅ Init: {init_result.result}
|
1519
|
+
📋 Plan: {plan_result.result}
|
1520
|
+
🔍 Analysis: {additional_insights}"""
|
1521
|
+
|
1522
|
+
if plan_result.error_message:
|
1523
|
+
return TerraformAgentOutput(
|
1524
|
+
result=enhanced_summary,
|
1525
|
+
thread_id=thread_id or "unknown",
|
1526
|
+
error_message=plan_result.error_message,
|
1527
|
+
operation_type="enhanced_plan",
|
1528
|
+
)
|
1529
|
+
|
1530
|
+
return TerraformAgentOutput(
|
1531
|
+
result=enhanced_summary,
|
1532
|
+
thread_id=thread_id or "unknown",
|
1533
|
+
error_message=None,
|
1534
|
+
operation_type="enhanced_plan",
|
1535
|
+
)
|
1536
|
+
|
1537
|
+
except Exception as e:
|
1538
|
+
self.logger.error(f"Enhanced Terraform workflow failed: {e}")
|
1539
|
+
return TerraformAgentOutput(
|
1540
|
+
result="Enhanced Terraform workflow failed",
|
1541
|
+
thread_id=thread_id or "unknown",
|
1542
|
+
error_message=str(e),
|
1543
|
+
operation_type="enhanced_workflow_error",
|
1544
|
+
)
|
1545
|
+
|
1546
|
+
def _analyze_terraform_plan(self, repo_path: str) -> str:
|
1547
|
+
"""
|
1548
|
+
Analyze Terraform plan for additional insights.
|
1549
|
+
This is a placeholder for future sophisticated features.
|
1550
|
+
"""
|
1551
|
+
insights = []
|
1552
|
+
|
1553
|
+
# Placeholder analysis features
|
1554
|
+
insights.append("Resource count analysis: Available in future release")
|
1555
|
+
insights.append("Security scanning: Available in future release")
|
1556
|
+
insights.append("Cost estimation: Available in future release")
|
1557
|
+
insights.append("Compliance checking: Available in future release")
|
1558
|
+
|
1559
|
+
# Basic file structure analysis with fallback
|
1560
|
+
try:
|
1561
|
+
# Try find command first
|
1562
|
+
file_result = self.shell_agent.run(
|
1563
|
+
ShellAgentInput(
|
1564
|
+
command="find . -name '*.tf' -exec basename {} \\; | sort | uniq -c",
|
1565
|
+
cwd=repo_path,
|
1566
|
+
)
|
1567
|
+
)
|
1568
|
+
if file_result.exit_code == 0:
|
1569
|
+
insights.append(
|
1570
|
+
f"Terraform files structure: {file_result.output.strip()}"
|
1571
|
+
)
|
1572
|
+
else:
|
1573
|
+
# Fallback to ls
|
1574
|
+
ls_result = self.shell_agent.run(
|
1575
|
+
ShellAgentInput(
|
1576
|
+
command="ls *.tf 2>/dev/null | wc -l || echo 0", cwd=repo_path
|
1577
|
+
)
|
1578
|
+
)
|
1579
|
+
if ls_result.exit_code == 0:
|
1580
|
+
count = ls_result.output.strip()
|
1581
|
+
insights.append(f"Terraform files found: {count}")
|
1582
|
+
except Exception as e:
|
1583
|
+
insights.append(f"Could not analyze file structure: {e}")
|
1584
|
+
|
1585
|
+
return " | ".join(insights)
|
1586
|
+
|
1587
|
+
# --- LangGraph State Machine ---
|
1588
|
+
|
1589
|
+
def _build_graph(self):
|
1590
|
+
"""
|
1591
|
+
Build and compile the LangGraph state machine.
|
1592
|
+
Creates nodes for planner and each workflow step, sets up organic routing.
|
1593
|
+
"""
|
1594
|
+
graph_builder = StateGraph(SupervisorAgentState)
|
1595
|
+
|
1596
|
+
# Add nodes
|
1597
|
+
graph_builder.add_node("planner_llm", self._planner_llm_node)
|
1598
|
+
graph_builder.add_node("clone_repo_node", self._clone_repo_node)
|
1599
|
+
graph_builder.add_node("stack_detect_node", self._stack_detect_node)
|
1600
|
+
graph_builder.add_node("terraform_workflow_node", self._terraform_workflow_node)
|
1601
|
+
graph_builder.add_node("issue_create_node", self._issue_create_node)
|
1602
|
+
|
1603
|
+
# Set entry point
|
1604
|
+
graph_builder.set_entry_point("planner_llm")
|
1605
|
+
|
1606
|
+
# Configure routing map for planner
|
1607
|
+
planner_routing_map = {
|
1608
|
+
"clone_repo_node": "clone_repo_node",
|
1609
|
+
"stack_detect_node": "stack_detect_node",
|
1610
|
+
"terraform_workflow_node": "terraform_workflow_node",
|
1611
|
+
"issue_create_node": "issue_create_node",
|
1612
|
+
END: END,
|
1613
|
+
}
|
1614
|
+
|
1615
|
+
# Configure routing map for workflow continuation
|
1616
|
+
workflow_routing_map = {
|
1617
|
+
"stack_detect_node": "stack_detect_node",
|
1618
|
+
"terraform_workflow_node": "terraform_workflow_node",
|
1619
|
+
"issue_create_node": "issue_create_node",
|
1620
|
+
END: END,
|
1621
|
+
}
|
1622
|
+
|
1623
|
+
# Add conditional edges from planner (initial routing)
|
1624
|
+
graph_builder.add_conditional_edges(
|
1625
|
+
"planner_llm", self._route_after_planner, planner_routing_map
|
1626
|
+
)
|
1627
|
+
|
1628
|
+
# Add conditional edges from workflow nodes (sequential continuation)
|
1629
|
+
graph_builder.add_conditional_edges(
|
1630
|
+
"clone_repo_node", self._route_workflow_continuation, workflow_routing_map
|
1631
|
+
)
|
1632
|
+
|
1633
|
+
graph_builder.add_conditional_edges(
|
1634
|
+
"stack_detect_node", self._route_workflow_continuation, workflow_routing_map
|
1635
|
+
)
|
1636
|
+
|
1637
|
+
graph_builder.add_conditional_edges(
|
1638
|
+
"terraform_workflow_node",
|
1639
|
+
self._route_workflow_continuation,
|
1640
|
+
workflow_routing_map,
|
1641
|
+
)
|
1642
|
+
|
1643
|
+
# Issue creation always ends the workflow
|
1644
|
+
graph_builder.add_edge("issue_create_node", END)
|
1645
|
+
|
1646
|
+
# Compile with checkpointer
|
1647
|
+
return graph_builder.compile(checkpointer=self.checkpointer)
|
1648
|
+
|
1649
|
+
# --- Main Run Method: Organic LangGraph Execution ---
|
1650
|
+
|
1651
|
+
def run(self, agent_input: SupervisorAgentInput) -> SupervisorAgentOutput:
|
1652
|
+
"""
|
1653
|
+
Execute R2D workflow using organic LangGraph state machine.
|
1654
|
+
The LLM brain decides routing between specialized agents.
|
1655
|
+
"""
|
1656
|
+
repo_url = agent_input.repo_url
|
1657
|
+
# Normalize the repository URL for issue creation
|
1658
|
+
normalized_repo_url = repo_url.rstrip("/").rstrip(".git")
|
1659
|
+
thread_id = agent_input.thread_id or str(uuid.uuid4())
|
1660
|
+
|
1661
|
+
# Generate dynamic branch name if not provided
|
1662
|
+
branch_name = agent_input.branch_name or self._generate_dynamic_branch_name()
|
1663
|
+
|
1664
|
+
# If initialization failed due to missing secrets, abort early
|
1665
|
+
if self.startup_error:
|
1666
|
+
self.logger.error(
|
1667
|
+
f"Cannot start workflow for {repo_url}: {self.startup_error}"
|
1668
|
+
)
|
1669
|
+
|
1670
|
+
issues_opened = 0
|
1671
|
+
try:
|
1672
|
+
issue_result = self.git_agent.run(
|
1673
|
+
GitAgentInput(
|
1674
|
+
query=(
|
1675
|
+
f"open issue for repository {normalized_repo_url}: 🚫 Missing token - {self.startup_error}"
|
1676
|
+
)
|
1677
|
+
)
|
1678
|
+
)
|
1679
|
+
if not issue_result.error_message:
|
1680
|
+
issues_opened = 1
|
1681
|
+
except Exception as e:
|
1682
|
+
self.logger.error(
|
1683
|
+
f"Failed to invoke GitAgent for missing token issue: {e}"
|
1684
|
+
)
|
1685
|
+
|
1686
|
+
output = SupervisorAgentOutput(
|
1687
|
+
repo_url=repo_url,
|
1688
|
+
branch_created=False,
|
1689
|
+
branch_name=branch_name,
|
1690
|
+
stack_detected={},
|
1691
|
+
terraform_summary=None,
|
1692
|
+
unsupported=False,
|
1693
|
+
issues_opened=issues_opened,
|
1694
|
+
success=False,
|
1695
|
+
message=self.startup_error
|
1696
|
+
+ (" (GitHub issue created)" if issues_opened else " (GitHub issue creation also failed)")
|
1697
|
+
)
|
1698
|
+
log_event(
|
1699
|
+
"supervisor_agent_run_end",
|
1700
|
+
repo_url=repo_url,
|
1701
|
+
thread_id=thread_id,
|
1702
|
+
success=False,
|
1703
|
+
error=self.startup_error,
|
1704
|
+
)
|
1705
|
+
return output
|
1706
|
+
|
1707
|
+
self.logger.info(f"Starting R2D workflow for {repo_url}, branch: {branch_name}")
|
1708
|
+
log_event(
|
1709
|
+
"supervisor_agent_run_start",
|
1710
|
+
repo_url=repo_url,
|
1711
|
+
branch_name=branch_name,
|
1712
|
+
thread_id=thread_id,
|
1713
|
+
)
|
1714
|
+
|
1715
|
+
# Create initial state
|
1716
|
+
initial_state: SupervisorAgentState = {
|
1717
|
+
"input_message": HumanMessage(
|
1718
|
+
content=f"Execute R2D workflow for repository {repo_url}"
|
1719
|
+
),
|
1720
|
+
"repo_url": repo_url,
|
1721
|
+
"branch_name": branch_name,
|
1722
|
+
"thread_id": thread_id,
|
1723
|
+
"dry_run": agent_input.dry_run,
|
1724
|
+
"repo_path": None,
|
1725
|
+
"stack_detected": {},
|
1726
|
+
"branch_created": False,
|
1727
|
+
"final_result": "",
|
1728
|
+
"operation_type": "",
|
1729
|
+
"terraform_summary": None,
|
1730
|
+
"issues_opened": 0,
|
1731
|
+
"unsupported": False,
|
1732
|
+
"error_message": None,
|
1733
|
+
"tool_output": [],
|
1734
|
+
}
|
1735
|
+
|
1736
|
+
try:
|
1737
|
+
# Execute the LangGraph workflow
|
1738
|
+
self.logger.info("Executing organic LangGraph R2D workflow")
|
1739
|
+
final_state = self.runnable.invoke(
|
1740
|
+
initial_state, {"configurable": {"thread_id": thread_id}}
|
1741
|
+
)
|
1742
|
+
|
1743
|
+
# Extract results from final state
|
1744
|
+
operation_type = final_state.get("operation_type", "")
|
1745
|
+
issues_opened = final_state.get("issues_opened", 0)
|
1746
|
+
|
1747
|
+
# Determine success: workflow succeeds only if terraform completes without issues
|
1748
|
+
# If issues were opened, it means there was a failure somewhere in the workflow
|
1749
|
+
success = (
|
1750
|
+
final_state.get("error_message") is None
|
1751
|
+
and issues_opened == 0
|
1752
|
+
and operation_type != "issue_success"
|
1753
|
+
)
|
1754
|
+
|
1755
|
+
message = final_state.get("final_result", "R2D workflow completed")
|
1756
|
+
|
1757
|
+
output = SupervisorAgentOutput(
|
1758
|
+
repo_url=repo_url,
|
1759
|
+
branch_created=final_state.get("branch_created", False),
|
1760
|
+
branch_name=final_state.get("branch_name", branch_name),
|
1761
|
+
stack_detected=final_state.get("stack_detected", {}),
|
1762
|
+
terraform_summary=final_state.get("terraform_summary"),
|
1763
|
+
unsupported=final_state.get("unsupported", False),
|
1764
|
+
issues_opened=issues_opened,
|
1765
|
+
success=success,
|
1766
|
+
message=message,
|
1767
|
+
)
|
1768
|
+
|
1769
|
+
log_event(
|
1770
|
+
"supervisor_agent_run_end",
|
1771
|
+
repo_url=repo_url,
|
1772
|
+
thread_id=thread_id,
|
1773
|
+
success=success,
|
1774
|
+
)
|
1775
|
+
|
1776
|
+
return output
|
1777
|
+
|
1778
|
+
except Exception as e:
|
1779
|
+
self.logger.error(f"R2D workflow execution failed: {e}", exc_info=True)
|
1780
|
+
|
1781
|
+
# Enhanced error handling: Automatically create GitHub issue for ANY workflow failure
|
1782
|
+
issues_opened = 0
|
1783
|
+
try:
|
1784
|
+
self.logger.info(
|
1785
|
+
"Attempting to create GitHub issue for workflow execution failure"
|
1786
|
+
)
|
1787
|
+
|
1788
|
+
# Import text utilities for error handling
|
1789
|
+
from diagram_to_iac.tools.text_utils import (
|
1790
|
+
generate_organic_issue_title,
|
1791
|
+
enhance_error_message_for_issue,
|
1792
|
+
create_issue_metadata_section,
|
1793
|
+
)
|
1794
|
+
|
1795
|
+
# Determine error type for better title generation
|
1796
|
+
error_message = str(e)
|
1797
|
+
error_type = "workflow_error"
|
1798
|
+
if "api key" in error_message.lower() or "401" in error_message.lower():
|
1799
|
+
error_type = "api_key_error"
|
1800
|
+
elif (
|
1801
|
+
"llm" in error_message.lower() or "openai" in error_message.lower()
|
1802
|
+
):
|
1803
|
+
error_type = "llm_error"
|
1804
|
+
elif (
|
1805
|
+
"network" in error_message.lower()
|
1806
|
+
or "connection" in error_message.lower()
|
1807
|
+
):
|
1808
|
+
error_type = "network_error"
|
1809
|
+
elif "timeout" in error_message.lower():
|
1810
|
+
error_type = "timeout_error"
|
1811
|
+
elif (
|
1812
|
+
"permission" in error_message.lower()
|
1813
|
+
or "forbidden" in error_message.lower()
|
1814
|
+
):
|
1815
|
+
error_type = "permission_error"
|
1816
|
+
|
1817
|
+
# Create context for organic title generation
|
1818
|
+
error_context = {
|
1819
|
+
"error_type": error_type,
|
1820
|
+
"stack_detected": {},
|
1821
|
+
"error_message": error_message,
|
1822
|
+
"repo_url": repo_url,
|
1823
|
+
"branch_name": branch_name,
|
1824
|
+
}
|
1825
|
+
|
1826
|
+
# Generate organic, thoughtful issue title
|
1827
|
+
issue_title = generate_organic_issue_title(error_context)
|
1828
|
+
|
1829
|
+
# Create enhanced issue body with metadata and clean error formatting
|
1830
|
+
metadata_section = create_issue_metadata_section(error_context)
|
1831
|
+
|
1832
|
+
enhanced_error = enhance_error_message_for_issue(error_message, error_context)
|
1833
|
+
|
1834
|
+
issue_body = f"{metadata_section}{enhanced_error}\n\n**Workflow Stage:** Initial workflow execution\n**Error Type:** Critical system error preventing R2D workflow startup"
|
1835
|
+
|
1836
|
+
if agent_input.dry_run:
|
1837
|
+
if self.demonstrator:
|
1838
|
+
self.demonstrator.show_issue(issue_title, issue_body)
|
1839
|
+
else:
|
1840
|
+
self.logger.info(f"DRY RUN: GitHub issue processing for: Title: {issue_title}")
|
1841
|
+
print("=== DRY RUN: GitHub issue would be created/checked ===")
|
1842
|
+
print(f"Title: {issue_title}")
|
1843
|
+
print(f"Body:\n{issue_body}")
|
1844
|
+
else:
|
1845
|
+
# Create or update GitHub issue for workflow failure
|
1846
|
+
existing_id = self._get_existing_issue_id(repo_url, error_type)
|
1847
|
+
git_input = GitAgentInput(
|
1848
|
+
query=f"open issue {issue_title} for repository {repo_url}: {issue_body}",
|
1849
|
+
issue_id=existing_id,
|
1850
|
+
)
|
1851
|
+
issue_result = self.git_agent.run(git_input)
|
1852
|
+
if not issue_result.error_message:
|
1853
|
+
issues_opened = 1
|
1854
|
+
# Record new issue id if created
|
1855
|
+
if existing_id is None:
|
1856
|
+
new_id = self._parse_issue_number(issue_result.result)
|
1857
|
+
if new_id is not None:
|
1858
|
+
self._record_issue_id(repo_url, error_type, new_id)
|
1859
|
+
|
1860
|
+
self.logger.info(f"Successfully created GitHub issue for workflow failure: {issue_result.result}")
|
1861
|
+
else:
|
1862
|
+
self.logger.warning(f"Failed to create GitHub issue for workflow failure: {issue_result.error_message}")
|
1863
|
+
|
1864
|
+
|
1865
|
+
except Exception as issue_error:
|
1866
|
+
self.logger.error(
|
1867
|
+
f"Failed to create GitHub issue for workflow failure: {issue_error}"
|
1868
|
+
)
|
1869
|
+
|
1870
|
+
output = SupervisorAgentOutput(
|
1871
|
+
repo_url=repo_url,
|
1872
|
+
branch_created=False,
|
1873
|
+
branch_name=branch_name,
|
1874
|
+
stack_detected={},
|
1875
|
+
terraform_summary=None,
|
1876
|
+
unsupported=False, # Changed: Don't mark as unsupported, this is a system error
|
1877
|
+
issues_opened=issues_opened,
|
1878
|
+
success=False,
|
1879
|
+
message=f"Workflow execution failed: {str(e)}"
|
1880
|
+
+ (
|
1881
|
+
f" (GitHub issue created)"
|
1882
|
+
if issues_opened > 0
|
1883
|
+
else " (GitHub issue creation also failed)"
|
1884
|
+
),
|
1885
|
+
)
|
1886
|
+
log_event(
|
1887
|
+
"supervisor_agent_run_end",
|
1888
|
+
repo_url=repo_url,
|
1889
|
+
thread_id=thread_id,
|
1890
|
+
success=False,
|
1891
|
+
error=str(e),
|
1892
|
+
)
|
1893
|
+
return output
|
1894
|
+
|
1895
|
+
# --- Memory and Conversation Management ---
|
1896
|
+
|
1897
|
+
def get_conversation_history(self) -> List[Dict[str, any]]:
|
1898
|
+
"""Get conversation history from memory."""
|
1899
|
+
try:
|
1900
|
+
return self.memory.get_conversation_history()
|
1901
|
+
except Exception as e:
|
1902
|
+
self.logger.error(f"Failed to get conversation history: {e}")
|
1903
|
+
return []
|
1904
|
+
|
1905
|
+
def get_memory_state(self) -> Dict[str, any]:
|
1906
|
+
"""Get current memory state."""
|
1907
|
+
try:
|
1908
|
+
return {
|
1909
|
+
"conversation_history": self.get_conversation_history(),
|
1910
|
+
"memory_type": type(self.memory).__name__,
|
1911
|
+
}
|
1912
|
+
except Exception as e:
|
1913
|
+
self.logger.error(f"Failed to get memory state: {e}")
|
1914
|
+
return {"error": str(e)}
|
1915
|
+
|
1916
|
+
|
1917
|
+
def detect_stack_files(repo_path: str, shell_agent: ShellAgent) -> Dict[str, int]:
|
1918
|
+
"""Detect basic stack files (.tf and .sh) in the given repository."""
|
1919
|
+
# Check if repo_path exists before proceeding
|
1920
|
+
if not os.path.exists(repo_path):
|
1921
|
+
raise RuntimeError(f"Repository path does not exist: {repo_path}")
|
1922
|
+
|
1923
|
+
patterns = ["*.tf", "*.sh"]
|
1924
|
+
detected: Dict[str, int] = {}
|
1925
|
+
|
1926
|
+
for pattern in patterns:
|
1927
|
+
count = 0
|
1928
|
+
try:
|
1929
|
+
result = shell_agent.run(
|
1930
|
+
ShellAgentInput(
|
1931
|
+
command=f"bash -c \"find . -name '{pattern}' -type f | wc -l\"",
|
1932
|
+
cwd=repo_path,
|
1933
|
+
)
|
1934
|
+
)
|
1935
|
+
if result.exit_code == 0:
|
1936
|
+
count = int(result.output.strip())
|
1937
|
+
else:
|
1938
|
+
raise RuntimeError(result.error_message or "find failed")
|
1939
|
+
except Exception:
|
1940
|
+
# Fallback to Python-based search
|
1941
|
+
for root, _, files in os.walk(repo_path):
|
1942
|
+
count += len(fnmatch.filter(files, pattern))
|
1943
|
+
|
1944
|
+
if count > 0:
|
1945
|
+
detected[pattern] = count
|
1946
|
+
|
1947
|
+
return detected
|