diagram-to-iac 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. diagram_to_iac/__init__.py +10 -0
  2. diagram_to_iac/actions/__init__.py +7 -0
  3. diagram_to_iac/actions/git_entry.py +174 -0
  4. diagram_to_iac/actions/supervisor_entry.py +116 -0
  5. diagram_to_iac/actions/terraform_agent_entry.py +207 -0
  6. diagram_to_iac/agents/__init__.py +26 -0
  7. diagram_to_iac/agents/demonstrator_langgraph/__init__.py +10 -0
  8. diagram_to_iac/agents/demonstrator_langgraph/agent.py +826 -0
  9. diagram_to_iac/agents/git_langgraph/__init__.py +10 -0
  10. diagram_to_iac/agents/git_langgraph/agent.py +1018 -0
  11. diagram_to_iac/agents/git_langgraph/pr.py +146 -0
  12. diagram_to_iac/agents/hello_langgraph/__init__.py +9 -0
  13. diagram_to_iac/agents/hello_langgraph/agent.py +621 -0
  14. diagram_to_iac/agents/policy_agent/__init__.py +15 -0
  15. diagram_to_iac/agents/policy_agent/agent.py +507 -0
  16. diagram_to_iac/agents/policy_agent/integration_example.py +191 -0
  17. diagram_to_iac/agents/policy_agent/tools/__init__.py +14 -0
  18. diagram_to_iac/agents/policy_agent/tools/tfsec_tool.py +259 -0
  19. diagram_to_iac/agents/shell_langgraph/__init__.py +21 -0
  20. diagram_to_iac/agents/shell_langgraph/agent.py +122 -0
  21. diagram_to_iac/agents/shell_langgraph/detector.py +50 -0
  22. diagram_to_iac/agents/supervisor_langgraph/__init__.py +17 -0
  23. diagram_to_iac/agents/supervisor_langgraph/agent.py +1947 -0
  24. diagram_to_iac/agents/supervisor_langgraph/demonstrator.py +22 -0
  25. diagram_to_iac/agents/supervisor_langgraph/guards.py +23 -0
  26. diagram_to_iac/agents/supervisor_langgraph/pat_loop.py +49 -0
  27. diagram_to_iac/agents/supervisor_langgraph/router.py +9 -0
  28. diagram_to_iac/agents/terraform_langgraph/__init__.py +15 -0
  29. diagram_to_iac/agents/terraform_langgraph/agent.py +1216 -0
  30. diagram_to_iac/agents/terraform_langgraph/parser.py +76 -0
  31. diagram_to_iac/core/__init__.py +7 -0
  32. diagram_to_iac/core/agent_base.py +19 -0
  33. diagram_to_iac/core/enhanced_memory.py +302 -0
  34. diagram_to_iac/core/errors.py +4 -0
  35. diagram_to_iac/core/issue_tracker.py +49 -0
  36. diagram_to_iac/core/memory.py +132 -0
  37. diagram_to_iac/services/__init__.py +10 -0
  38. diagram_to_iac/services/observability.py +59 -0
  39. diagram_to_iac/services/step_summary.py +77 -0
  40. diagram_to_iac/tools/__init__.py +11 -0
  41. diagram_to_iac/tools/api_utils.py +108 -26
  42. diagram_to_iac/tools/git/__init__.py +45 -0
  43. diagram_to_iac/tools/git/git.py +956 -0
  44. diagram_to_iac/tools/hello/__init__.py +30 -0
  45. diagram_to_iac/tools/hello/cal_utils.py +31 -0
  46. diagram_to_iac/tools/hello/text_utils.py +97 -0
  47. diagram_to_iac/tools/llm_utils/__init__.py +20 -0
  48. diagram_to_iac/tools/llm_utils/anthropic_driver.py +87 -0
  49. diagram_to_iac/tools/llm_utils/base_driver.py +90 -0
  50. diagram_to_iac/tools/llm_utils/gemini_driver.py +89 -0
  51. diagram_to_iac/tools/llm_utils/openai_driver.py +93 -0
  52. diagram_to_iac/tools/llm_utils/router.py +303 -0
  53. diagram_to_iac/tools/sec_utils.py +4 -2
  54. diagram_to_iac/tools/shell/__init__.py +17 -0
  55. diagram_to_iac/tools/shell/shell.py +415 -0
  56. diagram_to_iac/tools/text_utils.py +277 -0
  57. diagram_to_iac/tools/tf/terraform.py +851 -0
  58. diagram_to_iac-0.8.0.dist-info/METADATA +99 -0
  59. diagram_to_iac-0.8.0.dist-info/RECORD +64 -0
  60. {diagram_to_iac-0.6.0.dist-info → diagram_to_iac-0.8.0.dist-info}/WHEEL +1 -1
  61. diagram_to_iac-0.8.0.dist-info/entry_points.txt +4 -0
  62. diagram_to_iac/agents/codegen_agent.py +0 -0
  63. diagram_to_iac/agents/consensus_agent.py +0 -0
  64. diagram_to_iac/agents/deployment_agent.py +0 -0
  65. diagram_to_iac/agents/github_agent.py +0 -0
  66. diagram_to_iac/agents/interpretation_agent.py +0 -0
  67. diagram_to_iac/agents/question_agent.py +0 -0
  68. diagram_to_iac/agents/supervisor.py +0 -0
  69. diagram_to_iac/agents/vision_agent.py +0 -0
  70. diagram_to_iac/core/config.py +0 -0
  71. diagram_to_iac/tools/cv_utils.py +0 -0
  72. diagram_to_iac/tools/gh_utils.py +0 -0
  73. diagram_to_iac/tools/tf_utils.py +0 -0
  74. diagram_to_iac-0.6.0.dist-info/METADATA +0 -16
  75. diagram_to_iac-0.6.0.dist-info/RECORD +0 -32
  76. diagram_to_iac-0.6.0.dist-info/entry_points.txt +0 -2
  77. {diagram_to_iac-0.6.0.dist-info → diagram_to_iac-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,621 @@
1
+ import os
2
+ import re
3
+ import os
4
+ from typing import TypedDict, Annotated, Optional, List, Dict, Any
5
+ import yaml
6
+ import uuid
7
+ import logging
8
+
9
+ from langchain_core.messages import HumanMessage, BaseMessage
10
+ from langgraph.graph import StateGraph, END
11
+ from langgraph.checkpoint.memory import MemorySaver
12
+ from pydantic import BaseModel, Field
13
+
14
+ # Import tools and enhanced utilities
15
+ from diagram_to_iac.tools.hello.cal_utils import add_two, multiply_two
16
+ from diagram_to_iac.tools.llm_utils.router import get_llm, LLMRouter
17
+ from diagram_to_iac.core.agent_base import AgentBase
18
+ from diagram_to_iac.tools.hello.text_utils import extract_numbers_from_text, extract_numbers_from_text_with_duplicates
19
+ from diagram_to_iac.core.memory import (
20
+ create_memory,
21
+ LangGraphMemoryAdapter,
22
+ agent_state_enabled,
23
+ load_agent_state,
24
+ save_agent_state,
25
+ current_git_sha,
26
+ )
27
+
28
+
29
+ # --- Pydantic Schemas for Agent I/O ---
30
+ class HelloAgentInput(BaseModel):
31
+ query: str = Field(..., description="The input query or question for the HelloAgent")
32
+ thread_id: str | None = Field(None, description="Optional thread ID for conversation history.")
33
+
34
+ class HelloAgentOutput(BaseModel):
35
+ answer: str = Field(..., description="The final answer or result from the HelloAgent")
36
+ thread_id: str = Field(..., description="The thread ID used for the conversation.")
37
+ error_message: Optional[str] = Field(None, description="Optional error message if the agent run failed.")
38
+
39
+
40
+ # --- Helper Functions ---
41
+ # extract_numbers_from_text is now imported from text_utils
42
+
43
+
44
+ # --- Agent State Definition ---
45
+ class HelloAgentState(TypedDict):
46
+ input_message: HumanMessage
47
+ tool_output: Annotated[list[BaseMessage], lambda x, y: x + y]
48
+ final_answer: str
49
+ error_message: Optional[str] # New field for error messages
50
+
51
+
52
+ # --- Main Agent Class ---
53
+ class HelloAgent(AgentBase):
54
+ """
55
+ HelloAgent is an example LangGraph-based agent that can perform simple arithmetic
56
+ operations (addition, multiplication) or provide direct answers using an LLM.
57
+ It demonstrates configuration loading, Pydantic I/O schemas, error handling,
58
+ logging, and persistent conversation state via SQLite.
59
+ """
60
+ def __init__(self, config_path: str = None, memory_type: str = "persistent"):
61
+ """
62
+ Initializes the HelloAgent.
63
+
64
+ Args:
65
+ config_path: Optional path to a YAML configuration file. If None,
66
+ loads from a default path.
67
+ memory_type: Type of memory to use ("persistent", "memory", or "langgraph")
68
+
69
+ Initializes configuration, logger, enhanced LLM router, memory system,
70
+ checkpointer, and compiles the LangGraph runnable.
71
+ """
72
+ # Configure logger for this agent instance
73
+ # Using __name__ for the logger is a common practice to get module-based loggers
74
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
75
+ # BasicConfig should ideally be called once at application entry point.
76
+ # Guard to avoid reconfiguring if already set (e.g., by another agent or app init)
77
+ if not logging.getLogger().hasHandlers():
78
+ logging.basicConfig(
79
+ level=logging.INFO, # Default level, can be overridden by env var or config later
80
+ format='%(asctime)s - %(name)s - %(levelname)s - %(threadName)s - %(message)s',
81
+ datefmt='%Y-%m-%d %H:%M:%S'
82
+ )
83
+
84
+ if config_path is None:
85
+ base_dir = os.path.dirname(os.path.abspath(__file__))
86
+ config_path = os.path.join(base_dir, 'config.yaml')
87
+ self.logger.debug(f"Default config path set to: {config_path}")
88
+
89
+ try:
90
+ with open(config_path, 'r') as f:
91
+ self.config = yaml.safe_load(f)
92
+ if self.config is None:
93
+ self.logger.warning(f"Configuration file at {config_path} is empty. Using default values.")
94
+ self._set_default_config()
95
+ else:
96
+ self.logger.info(f"Configuration loaded successfully from {config_path}")
97
+ except FileNotFoundError:
98
+ self.logger.warning(f"Configuration file not found at {config_path}. Using default values.")
99
+ self._set_default_config()
100
+ except yaml.YAMLError as e:
101
+ self.logger.error(f"Error parsing YAML configuration from {config_path}: {e}. Using default values.", exc_info=True)
102
+ self._set_default_config()
103
+
104
+ # Ensure a dummy API key is set so tests can initialize the router without real credentials
105
+ if not os.getenv("OPENAI_API_KEY"):
106
+ os.environ["OPENAI_API_KEY"] = "test-key"
107
+
108
+ # Initialize enhanced LLM router
109
+ self.llm_router = LLMRouter()
110
+ self.logger.info("Enhanced LLM router initialized with multi-provider support")
111
+
112
+ # Initialize enhanced memory system
113
+ self.memory = create_memory(memory_type)
114
+ self.logger.info(f"Enhanced memory system initialized: {type(self.memory).__name__}")
115
+
116
+ self.logger.info(f"Initialized with LLM model: {self.config.get('llm', {}).get('model_name', 'N/A')}, Temperature: {self.config.get('llm', {}).get('temperature', 'N/A')}")
117
+
118
+ # Initialize checkpointer - Reverting to MemorySaver
119
+ self.logger.info("Using MemorySaver for checkpointer.")
120
+ self.checkpointer = MemorySaver()
121
+
122
+ # Register tools
123
+ self.tools = {"add_two": add_two, "multiply_two": multiply_two}
124
+ self.logger.info(f"Tools registered: {list(self.tools.keys())}")
125
+
126
+ self.runnable = self._build_graph()
127
+
128
+ # Load persistent agent state and determine if we should resume
129
+ self.persistent_state_enabled = agent_state_enabled()
130
+ self.agent_state = load_agent_state() if self.persistent_state_enabled else {}
131
+ self.current_sha = current_git_sha() if self.persistent_state_enabled else None
132
+ self._resume = (
133
+ self.persistent_state_enabled
134
+ and self.agent_state.get("commit_sha") == self.current_sha
135
+ and "last_successful_node" in self.agent_state
136
+ )
137
+
138
+ def _set_default_config(self):
139
+ self.logger.info("Setting default configuration for HelloAgent.")
140
+ self.config = {
141
+ 'llm': {
142
+ 'model_name': 'gpt-4o-mini', # Default fallback
143
+ 'temperature': 0.0
144
+ }
145
+ }
146
+
147
+ def _planner_llm_node(self, state: HelloAgentState):
148
+ """LLM decides if tool is needed, and which specific tool to route to."""
149
+ # Config values for the LLM
150
+ llm_config = self.config.get('llm', {})
151
+ # Don't provide defaults here - check if they're explicitly set in config
152
+ model_name = llm_config.get('model_name')
153
+ temperature = llm_config.get('temperature')
154
+
155
+ # Use enhanced LLM router for agent-specific model selection
156
+ try:
157
+ if model_name is not None or temperature is not None:
158
+ actual_model_name = model_name if model_name is not None else "gpt-4o-mini"
159
+ actual_temperature = temperature if temperature is not None else 0.0
160
+ self.logger.debug(
161
+ f"Planner using LLM: {actual_model_name}, Temp: {actual_temperature}"
162
+ )
163
+ llm = self.llm_router.get_llm(
164
+ model_name=actual_model_name,
165
+ temperature=actual_temperature,
166
+ agent_name="hello_agent",
167
+ )
168
+ else:
169
+ self.logger.debug("Planner using agent-specific LLM configuration")
170
+ llm = self.llm_router.get_llm_for_agent("hello_agent")
171
+ except Exception as e:
172
+ self.logger.error(
173
+ f"Failed to get LLM from router: {e}. Falling back to local creation."
174
+ )
175
+ fallback_model = model_name or self.config.get("llm", {}).get("model_name", "gpt-4o-mini")
176
+ fallback_temp = temperature if temperature is not None else self.config.get("llm", {}).get("temperature", 0.0)
177
+ llm = self.llm_router._create_llm_instance(
178
+ {
179
+ "model": fallback_model,
180
+ "temperature": fallback_temp,
181
+ "provider": self.llm_router._detect_provider(fallback_model),
182
+ }
183
+ )
184
+
185
+ # Store conversation in memory
186
+ query_content = state['input_message'].content
187
+ self.memory.add_to_conversation("user", query_content, {"agent": "hello_agent", "node": "planner"})
188
+
189
+ try:
190
+ self.logger.debug(f"Planner LLM input message content: {query_content}")
191
+ analysis_prompt_template = self.config.get('prompts', {}).get('planner_prompt', """User input: "{user_input}"
192
+
193
+ Analyze this input and determine the appropriate action:
194
+ 1. If it's asking for addition (words like 'add', 'plus', 'sum', '+' symbol), respond with "{route_add}"
195
+ 2. If it's asking for multiplication (words like 'multiply', 'times', '*' symbol), respond with "{route_multiply}"
196
+ 3. If it's a general question not requiring math tools, provide a direct answer
197
+
198
+ Important: Only use routing responses if the input contains numbers that can be operated on.""")
199
+
200
+ routing_keys = self.config.get('routing_keys', {
201
+ "addition": "ROUTE_TO_ADDITION",
202
+ "multiplication": "ROUTE_TO_MULTIPLICATION"
203
+ })
204
+
205
+ analysis_prompt = analysis_prompt_template.format(
206
+ user_input=query_content,
207
+ route_add=routing_keys['addition'],
208
+ route_multiply=routing_keys['multiplication']
209
+ )
210
+ self.logger.debug(f"Planner LLM prompt: {analysis_prompt}")
211
+
212
+ response = llm.invoke([HumanMessage(content=analysis_prompt)])
213
+ self.logger.debug(f"Planner LLM raw response content: {response.content}")
214
+ response_content = response.content.strip()
215
+
216
+ # Store LLM response in memory
217
+ self.memory.add_to_conversation("assistant", response_content, {"agent": "hello_agent", "node": "planner", "model": model_name})
218
+
219
+ new_state_update = {}
220
+ if routing_keys['addition'] in response_content:
221
+ new_state_update = {"final_answer": "route_to_addition", "error_message": None}
222
+ elif routing_keys['multiplication'] in response_content:
223
+ new_state_update = {"final_answer": "route_to_multiplication", "error_message": None}
224
+ else:
225
+ new_state_update = {"final_answer": response.content, "error_message": None}
226
+
227
+ self.logger.info(f"Planner LLM decision: {new_state_update.get('final_answer', 'N/A')}")
228
+ return new_state_update
229
+ except Exception as e:
230
+ self.logger.error(f"LLM error in _planner_llm_node: {e}", exc_info=True)
231
+ # Store error in memory
232
+ self.memory.add_to_conversation("system", f"Error in planner: {str(e)}", {"agent": "hello_agent", "node": "planner", "error": True})
233
+ return {
234
+ "final_answer": "Sorry, I encountered an issue processing your request with the language model.",
235
+ "error_message": str(e),
236
+ "tool_output": state.get("tool_output", [])
237
+ }
238
+
239
+ def _addition_tool_node(self, state: HelloAgentState):
240
+ """
241
+ Handles addition operations. Extracts numbers from input, invokes the add_two tool,
242
+ and updates the state with the result or an error message.
243
+ Input from state: state['input_message'].content
244
+ Output to state: Updates 'final_answer', 'tool_output', and 'error_message'.
245
+ """
246
+ self.logger.info(f"Addition tool node invoked for input: {state['input_message'].content}")
247
+ try:
248
+ text_content = state['input_message'].content
249
+ found_numbers = extract_numbers_from_text_with_duplicates(text_content)
250
+ self.logger.debug(f"Numbers found for addition: {found_numbers}")
251
+
252
+ # Store tool invocation in memory
253
+ self.memory.add_to_conversation("system", f"Addition tool invoked with numbers: {found_numbers}",
254
+ {"agent": "hello_agent", "node": "addition_tool", "numbers": found_numbers})
255
+
256
+ new_state_update = {}
257
+ if len(found_numbers) < 2:
258
+ self.logger.warning(f"Not enough numbers found in '{text_content}' for addition.")
259
+ error_msg = self.config.get('error_messages', {}).get(
260
+ 'numbers_not_found_addition',
261
+ "Addition tool: Could not find two numbers in the input for addition."
262
+ )
263
+ # Store error in memory
264
+ self.memory.add_to_conversation("system", error_msg,
265
+ {"agent": "hello_agent", "node": "addition_tool", "error": True})
266
+ return {"final_answer": error_msg}
267
+
268
+ # If we have enough numbers, proceed to try tool invocation
269
+ num1, num2 = found_numbers[0], found_numbers[1]
270
+ # result = add_two.invoke({"x": num1, "y": num2}) # Old direct call
271
+ result = self.tools['add_two'].invoke({"x": num1, "y": num2})
272
+ self.logger.debug(f"Addition tool raw result: {result}")
273
+
274
+ # Store successful result in memory
275
+ self.memory.add_to_conversation("system", f"Addition result: {num1} + {num2} = {result}",
276
+ {"agent": "hello_agent", "node": "addition_tool", "operation": "addition", "result": result})
277
+
278
+ new_state_update = {
279
+ "final_answer": str(result),
280
+ "tool_output": [HumanMessage(content=f"Addition tool result: {num1} + {num2} = {result}")],
281
+ "error_message": None
282
+ }
283
+
284
+ self.logger.info(f"Addition tool node result: {new_state_update.get('final_answer', 'N/A')}")
285
+ return new_state_update
286
+ except Exception as e:
287
+ self.logger.error(f"Addition tool error: {e}", exc_info=True)
288
+ # Store error in memory
289
+ self.memory.add_to_conversation("system", f"Addition tool error: {str(e)}",
290
+ {"agent": "hello_agent", "node": "addition_tool", "error": True})
291
+ return {
292
+ "final_answer": "Sorry, I couldn't perform the addition due to an error.",
293
+ "error_message": str(e),
294
+ "tool_output": state.get("tool_output", [])
295
+ }
296
+
297
+ def _multiplication_tool_node(self, state: HelloAgentState):
298
+ """
299
+ Handles multiplication operations. Extracts numbers from input, invokes the multiply_two tool,
300
+ and updates the state with the result or an error message.
301
+ Input from state: state['input_message'].content
302
+ Output to state: Updates 'final_answer', 'tool_output', and 'error_message'.
303
+ """
304
+ self.logger.info(f"Multiplication tool node invoked for input: {state['input_message'].content}")
305
+ try:
306
+ text_content = state['input_message'].content
307
+ found_numbers = extract_numbers_from_text_with_duplicates(text_content)
308
+ self.logger.debug(f"Numbers found for multiplication: {found_numbers}")
309
+
310
+ # Store tool invocation in memory
311
+ self.memory.add_to_conversation("system", f"Multiplication tool invoked with numbers: {found_numbers}",
312
+ {"agent": "hello_agent", "node": "multiplication_tool", "numbers": found_numbers})
313
+
314
+ new_state_update = {}
315
+ if len(found_numbers) < 2:
316
+ self.logger.warning(f"Not enough numbers found in '{text_content}' for multiplication.")
317
+ error_msg = self.config.get('error_messages', {}).get(
318
+ 'numbers_not_found_multiplication',
319
+ "Multiplication tool: Could not find two numbers in the input for multiplication."
320
+ )
321
+ # Store error in memory
322
+ self.memory.add_to_conversation("system", error_msg,
323
+ {"agent": "hello_agent", "node": "multiplication_tool", "error": True})
324
+ return {"final_answer": error_msg}
325
+
326
+ num1, num2 = found_numbers[0], found_numbers[1]
327
+ # result = multiply_two.invoke({"x": num1, "y": num2}) # Old direct call
328
+ result = self.tools['multiply_two'].invoke({"x": num1, "y": num2})
329
+ self.logger.debug(f"Multiplication tool raw result: {result}")
330
+
331
+ # Store successful result in memory
332
+ self.memory.add_to_conversation("system", f"Multiplication result: {num1} * {num2} = {result}",
333
+ {"agent": "hello_agent", "node": "multiplication_tool", "operation": "multiplication", "result": result})
334
+
335
+ new_state_update = {
336
+ "final_answer": str(result),
337
+ "tool_output": [HumanMessage(content=f"Multiplication tool result: {num1} * {num2} = {result}")],
338
+ "error_message": None
339
+ }
340
+
341
+ self.logger.info(f"Multiplication tool node result: {new_state_update.get('final_answer', 'N/A')}")
342
+ return new_state_update
343
+ except Exception as e:
344
+ self.logger.error(f"Multiplication tool error: {e}", exc_info=True)
345
+ # Store error in memory
346
+ self.memory.add_to_conversation("system", f"Multiplication tool error: {str(e)}",
347
+ {"agent": "hello_agent", "node": "multiplication_tool", "error": True})
348
+ return {
349
+ "final_answer": "Sorry, I couldn't perform the multiplication due to an error.",
350
+ "error_message": str(e),
351
+ "tool_output": state.get("tool_output", [])
352
+ }
353
+
354
+ def _route_after_planner(self, state: HelloAgentState):
355
+ """
356
+ Routes to the appropriate tool node or ends the graph based on the 'final_answer'
357
+ field in the state, which is set by the planner. Also routes to END if an error
358
+ is present in the state.
359
+ Input from state: state['final_answer'], state['error_message']
360
+ Output: Name of the next node (str) or END.
361
+ """
362
+ self.logger.debug(f"Routing after planner. Current state final_answer: '{state.get('final_answer')}', error_message: '{state.get('error_message')}'")
363
+ if state.get("error_message"): # Check if an error occurred in the planner node
364
+ self.logger.warning(f"Error detected in planner state, routing to END. Error: {state['error_message']}")
365
+ return END
366
+
367
+ final_answer = state.get("final_answer", "")
368
+
369
+ # Use the same routing keys as the planner for consistency
370
+ if final_answer == "route_to_addition":
371
+ return "addition_tool"
372
+ elif final_answer == "route_to_multiplication":
373
+ return "multiplication_tool"
374
+ return END
375
+
376
+ def _build_graph(self):
377
+ graph_builder = StateGraph(HelloAgentState)
378
+ graph_builder.add_node("planner_llm", self._planner_llm_node)
379
+ graph_builder.add_node("addition_tool", self._addition_tool_node)
380
+ graph_builder.add_node("multiplication_tool", self._multiplication_tool_node)
381
+
382
+ graph_builder.set_entry_point("planner_llm")
383
+
384
+ routing_map = self.config.get('routing_map', {
385
+ "addition_tool": "addition_tool",
386
+ "multiplication_tool": "multiplication_tool",
387
+ END: END
388
+ })
389
+ graph_builder.add_conditional_edges(
390
+ "planner_llm",
391
+ self._route_after_planner,
392
+ routing_map
393
+ )
394
+ graph_builder.add_edge("addition_tool", END)
395
+ graph_builder.add_edge("multiplication_tool", END)
396
+
397
+ # Use the instance checkpointer
398
+ return graph_builder.compile(checkpointer=self.checkpointer)
399
+
400
+ def run(self, agent_input: HelloAgentInput) -> HelloAgentOutput:
401
+ """Runs the agent with the given input."""
402
+ current_thread_id = agent_input.thread_id if agent_input.thread_id is not None else str(uuid.uuid4())
403
+ self.logger.info(f"Run invoked with query: '{agent_input.query}', thread_id: {current_thread_id}")
404
+ if self._resume and self.agent_state.get("final_answer"):
405
+ self.logger.info("Using saved agent state to skip execution")
406
+ return HelloAgentOutput(answer=self.agent_state.get("final_answer", ""), thread_id=current_thread_id, error_message=self.agent_state.get("error_message"))
407
+
408
+
409
+
410
+ initial_state = {
411
+ "input_message": HumanMessage(content=agent_input.query),
412
+ "tool_output": [],
413
+ "error_message": None
414
+ }
415
+
416
+ langgraph_config = {"configurable": {"thread_id": current_thread_id}}
417
+
418
+ result_state = self.runnable.invoke(initial_state, config=langgraph_config)
419
+
420
+ output: HelloAgentOutput
421
+ final_answer_str = result_state.get("final_answer", "No answer found.")
422
+ error_msg_str = result_state.get("error_message")
423
+
424
+ if error_msg_str:
425
+ if final_answer_str and final_answer_str.startswith("Sorry, I encountered an issue"):
426
+ self.logger.error(f"Run completed with error. Final answer: '{final_answer_str}', Error detail: {error_msg_str}, Thread ID: {current_thread_id}")
427
+ # final_answer_str remains the "Sorry..." message
428
+ else:
429
+ routing_keys_values = [
430
+ self.config.get('routing_keys', {}).get('addition', 'ROUTE_TO_ADDITION'),
431
+ self.config.get('routing_keys', {}).get('multiplication', 'ROUTE_TO_MULTIPLICATION')
432
+ ]
433
+ if final_answer_str in routing_keys_values and error_msg_str: # If final_answer is a route key but there's an error
434
+ self.logger.error(f"Run completed with error. Error detail: {error_msg_str}, (Final answer was a routing key: {final_answer_str}), Thread ID: {current_thread_id}")
435
+ final_answer_str = f"An error occurred: {error_msg_str}" # Prioritize the error message string
436
+ elif error_msg_str: # If there's an error message and final_answer is not a routing key
437
+ self.logger.info(f"Run completed with specific message/error. Final answer: '{final_answer_str}', Error detail: {error_msg_str}, Thread ID: {current_thread_id}")
438
+ # final_answer_str might already be the specific error (e.g., "Not enough numbers...")
439
+ # or it could be a direct LLM response if planner failed to route but didn't set "Sorry..."
440
+ # If final_answer is not a "Sorry..." message, and error_message is present, it might be better to use error_message.
441
+ # This logic depends on how nodes set final_answer vs error_message.
442
+ # For now, if error_message is present and final_answer isn't a "Sorry..." message, we assume final_answer might be a more specific error or relevant info.
443
+ # If error_msg_str is set, it implies something went wrong.
444
+ # The final_answer_str should reflect this.
445
+ # If final_answer is already a "Sorry..." message, it's fine.
446
+ # If final_answer is something else (e.g. a routing key, or previous valid data) but error_message is now set,
447
+ # we should probably use the error_message or the "Sorry..." message.
448
+ # Let's simplify: if error_msg_str is present, the answer should reflect an error.
449
+ if not (final_answer_str and final_answer_str.startswith("Sorry, I encountered an issue")):
450
+ # If final_answer isn't already a generic "Sorry..." message, but an error occurred,
451
+ # use the more specific error_message if it's not just a technical detail.
452
+ # This part of logic might need more refinement based on how error_message vs final_answer is set in nodes.
453
+ # For the current error handling, nodes set final_answer to "Sorry..." AND error_message to details.
454
+ # Or for validation like "not enough numbers", final_answer is the specific warning, and error_message is technical.
455
+ # The current code already prioritizes the "Sorry..." message if it exists.
456
+ # If not, and error_message exists, it means final_answer is something else (e.g. "Not enough numbers...").
457
+ # This is fine.
458
+ pass # Current logic for final_answer_str should be okay based on node error setting.
459
+
460
+
461
+ else:
462
+ self.logger.info(
463
+ f"Run completed successfully. Output answer: '{final_answer_str}', Thread ID: {current_thread_id}"
464
+ )
465
+
466
+ history = list(self.runnable.get_state_history(langgraph_config))
467
+ last_node = None
468
+ if len(history) >= 2:
469
+ chron = list(reversed(history))
470
+ before_end = chron[-2]
471
+ if before_end.next:
472
+ last_node = before_end.next[0]
473
+
474
+ self.agent_state = {
475
+ "commit_sha": self.current_sha,
476
+ "last_successful_node": last_node,
477
+ "guard_results": {"startup": True},
478
+ "final_answer": final_answer_str,
479
+ "error_message": error_msg_str,
480
+ "thread_id": current_thread_id,
481
+ }
482
+ if self.persistent_state_enabled:
483
+ save_agent_state(self.agent_state)
484
+
485
+ return HelloAgentOutput(
486
+ answer=final_answer_str,
487
+ thread_id=current_thread_id,
488
+ error_message=error_msg_str,
489
+ )
490
+
491
+ def get_conversation_history(self) -> List[Dict[str, Any]]:
492
+ """
493
+ Get the conversation history from memory.
494
+
495
+ Returns:
496
+ List[Dict]: List of conversation messages with metadata
497
+ """
498
+ return self.memory.get_conversation_history()
499
+
500
+ def get_memory_state(self) -> Dict[str, Any]:
501
+ """
502
+ Get the current memory state.
503
+
504
+ Returns:
505
+ Dict: Current state stored in memory
506
+ """
507
+ return self.memory.get_state()
508
+
509
+ def clear_memory(self) -> None:
510
+ """Clear all memory including conversation history."""
511
+ self.memory.clear_state()
512
+ self.logger.info("Memory cleared including conversation history")
513
+
514
+ def plan(self, input_text: str, **kwargs):
515
+ """
516
+ Generates a plan for the agent to execute (required by AgentBase).
517
+ For HelloAgent, the plan is simply to analyze the input and determine the appropriate action.
518
+
519
+ Args:
520
+ input_text: The input query to plan for
521
+ **kwargs: Additional parameters (e.g., thread_id)
522
+
523
+ Returns:
524
+ dict: A plan containing the input and any additional context
525
+ """
526
+ self.logger.info(f"Planning for input: '{input_text}'")
527
+
528
+ plan = {
529
+ "input_text": input_text,
530
+ "predicted_action": "analyze_and_route",
531
+ "description": "Analyze input to determine if math operations are needed or if direct LLM response is sufficient"
532
+ }
533
+
534
+ # Simple analysis to predict the route
535
+ if any(word in input_text.lower() for word in ['add', 'plus', 'sum', '+']):
536
+ plan["predicted_route"] = "addition_tool"
537
+ elif any(word in input_text.lower() for word in ['multiply', 'times', '*']):
538
+ plan["predicted_route"] = "multiplication_tool"
539
+ else:
540
+ plan["predicted_route"] = "llm_response"
541
+
542
+ return plan
543
+
544
+ def report(self, result=None, **kwargs):
545
+ """
546
+ Reports the results or progress of the agent's execution (required by AgentBase).
547
+
548
+ Args:
549
+ result: The result to report (HelloAgentOutput or string)
550
+ **kwargs: Additional parameters
551
+
552
+ Returns:
553
+ dict: A report containing execution details
554
+ """
555
+ if isinstance(result, HelloAgentOutput):
556
+ report = {
557
+ "status": "completed",
558
+ "answer": result.answer,
559
+ "thread_id": result.thread_id,
560
+ "error_message": result.error_message,
561
+ "success": result.error_message is None
562
+ }
563
+ elif isinstance(result, str):
564
+ report = {
565
+ "status": "completed",
566
+ "answer": result,
567
+ "success": True
568
+ }
569
+ else:
570
+ report = {
571
+ "status": "no_result",
572
+ "message": "No result provided to report"
573
+ }
574
+
575
+ self.logger.info(f"Agent execution report: {report}")
576
+ return report
577
+
578
+
579
+ # --- Main execution for demonstration ---
580
+ if __name__ == "__main__":
581
+ # Example: Instantiate the agent (will load config from default path)
582
+ agent = HelloAgent() # Uses MemorySaver by default now
583
+
584
+ print("Hello LangGraph Agent (Configurable, Pydantic I/O, MemorySaver)!")
585
+ print("=" * 50)
586
+
587
+ # Create agent instance
588
+ agent = HelloAgent()
589
+
590
+ test_queries = [
591
+ "What is 4 + 5?",
592
+ "What is 4 * 5?",
593
+ "Please add 7 and 3 together",
594
+ "Multiply 6 times 8",
595
+ "What is 2 divided by 2?",
596
+ "Can you add 10 and 20 for me and also tell me a joke?"
597
+ ]
598
+
599
+ for query_text in test_queries:
600
+ agent_input_obj = HelloAgentInput(query=query_text)
601
+ output_obj = agent.run(agent_input_obj)
602
+ print(f"Input Query: '{query_text}'\nAgent Answer: '{output_obj.answer}'")
603
+ print("-" * 50)
604
+
605
+ # Example with thread_id
606
+ print("\nRunning with a specific thread_id (for potential conversation history):")
607
+ convo_thread_id = "my-test-conversation-123"
608
+ input1 = HelloAgentInput(query="What is 10 + 10?", thread_id=convo_thread_id)
609
+ output1 = agent.run(input1)
610
+ print(f"Input Query: '{input1.query}' (Thread: {convo_thread_id})\nAgent Answer: '{output1.answer}'")
611
+
612
+ # Potentially, a follow-up on the same thread (MemorySaver would keep state)
613
+ # For this agent, most interactions are single-shot math, but if it had memory:
614
+ # input2 = HelloAgentInput(query="And what about adding 5 to that?", thread_id=convo_thread_id)
615
+ # output2 = agent.run(input2)
616
+ # print(f"Input Query: '{input2.query}' (Thread: {convo_thread_id})\nAgent Answer: '{output2.answer}'")
617
+ print("-" * 50)
618
+
619
+ if not os.getenv("OPENAI_API_KEY"):
620
+ print("WARNING: OPENAI_API_KEY is not set. LLM calls might fail if not configured otherwise.")
621
+ print("Please set it as an environment variable if using OpenAI models without explicit API key config.")
@@ -0,0 +1,15 @@
1
+ """
2
+ Policy Agent Module
3
+
4
+ Security policy enforcement agent for Terraform configurations using tfsec scanning.
5
+ This agent provides policy gate functionality to block terraform apply operations
6
+ on critical security violations.
7
+ """
8
+
9
+ from .agent import PolicyAgent, PolicyAgentInput, PolicyAgentOutput
10
+
11
+ __all__ = [
12
+ "PolicyAgent",
13
+ "PolicyAgentInput",
14
+ "PolicyAgentOutput"
15
+ ]