diagram-to-iac 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. diagram_to_iac/__init__.py +10 -0
  2. diagram_to_iac/actions/__init__.py +7 -0
  3. diagram_to_iac/actions/git_entry.py +174 -0
  4. diagram_to_iac/actions/supervisor_entry.py +116 -0
  5. diagram_to_iac/actions/terraform_agent_entry.py +207 -0
  6. diagram_to_iac/agents/__init__.py +26 -0
  7. diagram_to_iac/agents/demonstrator_langgraph/__init__.py +10 -0
  8. diagram_to_iac/agents/demonstrator_langgraph/agent.py +826 -0
  9. diagram_to_iac/agents/git_langgraph/__init__.py +10 -0
  10. diagram_to_iac/agents/git_langgraph/agent.py +1018 -0
  11. diagram_to_iac/agents/git_langgraph/pr.py +146 -0
  12. diagram_to_iac/agents/hello_langgraph/__init__.py +9 -0
  13. diagram_to_iac/agents/hello_langgraph/agent.py +621 -0
  14. diagram_to_iac/agents/policy_agent/__init__.py +15 -0
  15. diagram_to_iac/agents/policy_agent/agent.py +507 -0
  16. diagram_to_iac/agents/policy_agent/integration_example.py +191 -0
  17. diagram_to_iac/agents/policy_agent/tools/__init__.py +14 -0
  18. diagram_to_iac/agents/policy_agent/tools/tfsec_tool.py +259 -0
  19. diagram_to_iac/agents/shell_langgraph/__init__.py +21 -0
  20. diagram_to_iac/agents/shell_langgraph/agent.py +122 -0
  21. diagram_to_iac/agents/shell_langgraph/detector.py +50 -0
  22. diagram_to_iac/agents/supervisor_langgraph/__init__.py +17 -0
  23. diagram_to_iac/agents/supervisor_langgraph/agent.py +1947 -0
  24. diagram_to_iac/agents/supervisor_langgraph/demonstrator.py +22 -0
  25. diagram_to_iac/agents/supervisor_langgraph/guards.py +23 -0
  26. diagram_to_iac/agents/supervisor_langgraph/pat_loop.py +49 -0
  27. diagram_to_iac/agents/supervisor_langgraph/router.py +9 -0
  28. diagram_to_iac/agents/terraform_langgraph/__init__.py +15 -0
  29. diagram_to_iac/agents/terraform_langgraph/agent.py +1216 -0
  30. diagram_to_iac/agents/terraform_langgraph/parser.py +76 -0
  31. diagram_to_iac/core/__init__.py +7 -0
  32. diagram_to_iac/core/agent_base.py +19 -0
  33. diagram_to_iac/core/enhanced_memory.py +302 -0
  34. diagram_to_iac/core/errors.py +4 -0
  35. diagram_to_iac/core/issue_tracker.py +49 -0
  36. diagram_to_iac/core/memory.py +132 -0
  37. diagram_to_iac/services/__init__.py +10 -0
  38. diagram_to_iac/services/observability.py +59 -0
  39. diagram_to_iac/services/step_summary.py +77 -0
  40. diagram_to_iac/tools/__init__.py +11 -0
  41. diagram_to_iac/tools/api_utils.py +108 -26
  42. diagram_to_iac/tools/git/__init__.py +45 -0
  43. diagram_to_iac/tools/git/git.py +956 -0
  44. diagram_to_iac/tools/hello/__init__.py +30 -0
  45. diagram_to_iac/tools/hello/cal_utils.py +31 -0
  46. diagram_to_iac/tools/hello/text_utils.py +97 -0
  47. diagram_to_iac/tools/llm_utils/__init__.py +20 -0
  48. diagram_to_iac/tools/llm_utils/anthropic_driver.py +87 -0
  49. diagram_to_iac/tools/llm_utils/base_driver.py +90 -0
  50. diagram_to_iac/tools/llm_utils/gemini_driver.py +89 -0
  51. diagram_to_iac/tools/llm_utils/openai_driver.py +93 -0
  52. diagram_to_iac/tools/llm_utils/router.py +303 -0
  53. diagram_to_iac/tools/sec_utils.py +4 -2
  54. diagram_to_iac/tools/shell/__init__.py +17 -0
  55. diagram_to_iac/tools/shell/shell.py +415 -0
  56. diagram_to_iac/tools/text_utils.py +277 -0
  57. diagram_to_iac/tools/tf/terraform.py +851 -0
  58. diagram_to_iac-0.8.0.dist-info/METADATA +99 -0
  59. diagram_to_iac-0.8.0.dist-info/RECORD +64 -0
  60. {diagram_to_iac-0.6.0.dist-info → diagram_to_iac-0.8.0.dist-info}/WHEEL +1 -1
  61. diagram_to_iac-0.8.0.dist-info/entry_points.txt +4 -0
  62. diagram_to_iac/agents/codegen_agent.py +0 -0
  63. diagram_to_iac/agents/consensus_agent.py +0 -0
  64. diagram_to_iac/agents/deployment_agent.py +0 -0
  65. diagram_to_iac/agents/github_agent.py +0 -0
  66. diagram_to_iac/agents/interpretation_agent.py +0 -0
  67. diagram_to_iac/agents/question_agent.py +0 -0
  68. diagram_to_iac/agents/supervisor.py +0 -0
  69. diagram_to_iac/agents/vision_agent.py +0 -0
  70. diagram_to_iac/core/config.py +0 -0
  71. diagram_to_iac/tools/cv_utils.py +0 -0
  72. diagram_to_iac/tools/gh_utils.py +0 -0
  73. diagram_to_iac/tools/tf_utils.py +0 -0
  74. diagram_to_iac-0.6.0.dist-info/METADATA +0 -16
  75. diagram_to_iac-0.6.0.dist-info/RECORD +0 -32
  76. diagram_to_iac-0.6.0.dist-info/entry_points.txt +0 -2
  77. {diagram_to_iac-0.6.0.dist-info → diagram_to_iac-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,303 @@
1
+ import os
2
+ import yaml
3
+ from typing import Dict, Any, Optional, List
4
+ from pathlib import Path
5
+ from langchain_core.language_models.chat_models import BaseChatModel
6
+
7
+ # Import driver architecture
8
+ from .base_driver import BaseLLMDriver
9
+ from .openai_driver import OpenAIDriver
10
+ from .anthropic_driver import AnthropicDriver
11
+ from .gemini_driver import GoogleDriver
12
+
13
+ try:
14
+ from langchain_core.messages import HumanMessage
15
+ LANGCHAIN_CORE_AVAILABLE = True
16
+ except ImportError:
17
+ LANGCHAIN_CORE_AVAILABLE = False
18
+
19
+ class LLMRouter:
20
+ """
21
+ Enhanced LLM Router that supports multiple providers and model policy configuration.
22
+ Loads configuration from model_policy.yaml and routes to appropriate LLM providers.
23
+ Uses driver architecture for provider-specific optimizations.
24
+ """
25
+
26
+ def __init__(self, config_path: Optional[str] = None):
27
+ """Initialize the router with model policy configuration and drivers."""
28
+ self.config = self._load_model_policy(config_path)
29
+ self._provider_cache = {}
30
+
31
+ # Initialize drivers
32
+ self._drivers = {
33
+ "openai": OpenAIDriver(),
34
+ "anthropic": AnthropicDriver(),
35
+ "google": GoogleDriver()
36
+ }
37
+
38
+ def _load_model_policy(self, config_path: Optional[str] = None) -> Dict[str, Any]:
39
+ """Load model policy from YAML configuration."""
40
+ if config_path is None:
41
+ # Default to project's model_policy.yaml
42
+ base_dir = Path(__file__).parent.parent.parent.parent.parent
43
+ config_path = base_dir / "config" / "model_policy.yaml"
44
+
45
+ try:
46
+ with open(config_path, 'r') as f:
47
+ return yaml.safe_load(f) or {}
48
+ except FileNotFoundError:
49
+ print(f"Warning: Model policy file not found at {config_path}. Using defaults.")
50
+ return self._get_default_config()
51
+ except yaml.YAMLError as e:
52
+ print(f"Warning: Error parsing model policy YAML: {e}. Using defaults.")
53
+ return self._get_default_config()
54
+
55
+ def _get_default_config(self) -> Dict[str, Any]:
56
+ """Return default configuration when model_policy.yaml is not available."""
57
+ return {
58
+ "default": {
59
+ "model": "gpt-4o-mini",
60
+ "temperature": 0.0,
61
+ "provider": "openai"
62
+ },
63
+ "models": {
64
+ "gpt-4o-mini": {"provider": "openai", "api_key_env": "OPENAI_API_KEY"},
65
+ "gpt-4o": {"provider": "openai", "api_key_env": "OPENAI_API_KEY"},
66
+ "gpt-3.5-turbo": {"provider": "openai", "api_key_env": "OPENAI_API_KEY"}
67
+ }
68
+ }
69
+
70
+ def _detect_provider(self, model_name: str) -> str:
71
+ """Detect provider based on model name patterns."""
72
+ model_lower = model_name.lower()
73
+
74
+ if any(pattern in model_lower for pattern in ['gpt', 'openai']):
75
+ return 'openai'
76
+ elif any(pattern in model_lower for pattern in ['claude', 'anthropic']):
77
+ return 'anthropic'
78
+ elif any(pattern in model_lower for pattern in ['gemini', 'google']):
79
+ return 'google'
80
+ else:
81
+ return 'openai' # Default fallback
82
+
83
+ def _check_api_key(self, provider: str) -> bool:
84
+ """Check if required API key is available for the provider."""
85
+ key_mapping = {
86
+ 'openai': 'OPENAI_API_KEY',
87
+ 'anthropic': 'ANTHROPIC_API_KEY',
88
+ 'google': 'GOOGLE_API_KEY'
89
+ }
90
+
91
+ required_key = key_mapping.get(provider)
92
+ if required_key and not os.getenv(required_key):
93
+ return False
94
+ return True
95
+
96
+ def get_llm_for_agent(self, agent_name: str) -> BaseChatModel:
97
+ """
98
+ Get an LLM instance configured for a specific agent.
99
+ Uses agent-specific configuration from model_policy.yaml.
100
+ """
101
+ config = self._resolve_model_config(agent_name)
102
+
103
+ # Check if API key is available for the provider
104
+ if not self._check_api_key(config['provider']):
105
+ raise ValueError(f"API key not found for provider: {config['provider']}")
106
+
107
+ return self._create_llm_instance(config)
108
+
109
+ def get_llm(self, model_name: str = None, temperature: float = None, agent_name: str = None) -> BaseChatModel:
110
+ """
111
+ Initializes and returns an LLM instance using model_policy.yaml configuration.
112
+ Uses provided parameters or falls back to agent-specific or global defaults.
113
+ """
114
+ # If agent_name is provided but other params are None, use agent-specific config
115
+ if agent_name and model_name is None and temperature is None:
116
+ return self.get_llm_for_agent(agent_name)
117
+
118
+ # Resolve model and temperature from policy configuration
119
+ effective_model_name, effective_temperature = self._resolve_model_config_legacy(
120
+ model_name, temperature, agent_name
121
+ )
122
+
123
+ # Detect provider for the model
124
+ provider = self._detect_provider(effective_model_name)
125
+
126
+ # Check API key availability
127
+ if not self._check_api_key(provider):
128
+ # Fallback to default provider if API key is missing
129
+ fallback_config = self.config.get('default', {})
130
+ effective_model_name = fallback_config.get('model', 'gpt-4o-mini')
131
+ effective_temperature = fallback_config.get('temperature', 0.0)
132
+ provider = fallback_config.get('provider', 'openai')
133
+
134
+ # Create configuration dict
135
+ config = {
136
+ 'model': effective_model_name,
137
+ 'temperature': effective_temperature,
138
+ 'provider': provider
139
+ }
140
+
141
+ # Create and return the appropriate LLM instance
142
+ return self._create_llm_instance(config)
143
+
144
+ def _resolve_model_config(self, agent_name: str) -> Dict[str, Any]:
145
+ """
146
+ Resolve model configuration for a specific agent.
147
+ Returns a dict with all config values, inheriting from defaults.
148
+ """
149
+ # Start with all default values
150
+ default_config = self.config.get('default', {})
151
+ config = default_config.copy() # Copy all default values
152
+
153
+ # Apply agent-specific configuration if available
154
+ if agent_name:
155
+ agent_config = self.config.get('agents', {}).get(agent_name, {})
156
+ # Update config with any agent-specific overrides
157
+ config.update(agent_config)
158
+
159
+ # Auto-detect provider if not specified in either default or agent config
160
+ if 'provider' not in config:
161
+ config['provider'] = self._detect_provider(config.get('model', 'gpt-4o-mini'))
162
+
163
+ return config
164
+
165
+ def _resolve_model_config_legacy(self, model_name: str, temperature: float, agent_name: str) -> tuple[str, float]:
166
+ """Resolve model name and temperature from configuration hierarchy (legacy method)."""
167
+ # Start with defaults
168
+ defaults = self.config.get('default', {})
169
+ effective_model_name = defaults.get('model', 'gpt-4o-mini')
170
+ effective_temperature = defaults.get('temperature', 0.0)
171
+
172
+ # Apply agent-specific configuration if available
173
+ if agent_name:
174
+ agent_config = self.config.get('agents', {}).get(agent_name, {})
175
+ if 'model' in agent_config:
176
+ effective_model_name = agent_config['model']
177
+ if 'temperature' in agent_config:
178
+ effective_temperature = agent_config['temperature']
179
+
180
+ # Override with explicit parameters
181
+ if model_name is not None:
182
+ effective_model_name = model_name
183
+ if temperature is not None:
184
+ effective_temperature = temperature
185
+
186
+ return effective_model_name, effective_temperature
187
+
188
+ def _create_llm_instance(self, config: Dict[str, Any]) -> BaseChatModel:
189
+ """Create an LLM instance using the appropriate driver."""
190
+ provider = config['provider']
191
+
192
+ # Get the driver for this provider
193
+ driver = self._drivers.get(provider)
194
+ if not driver:
195
+ raise ValueError(f"No driver available for provider: {provider}")
196
+
197
+ # Use driver to create LLM instance
198
+ return driver.create_llm(config)
199
+
200
+ def get_supported_models(self, provider: str = None) -> Dict[str, List[str]]:
201
+ """Get supported models for all providers or a specific provider."""
202
+ if provider:
203
+ driver = self._drivers.get(provider)
204
+ if not driver:
205
+ return {}
206
+ return {provider: driver.get_supported_models()}
207
+
208
+ # Return all supported models
209
+ return {
210
+ provider: driver.get_supported_models()
211
+ for provider, driver in self._drivers.items()
212
+ }
213
+
214
+ def get_model_capabilities(self, provider: str, model: str) -> Dict[str, Any]:
215
+ """Get capabilities for a specific model."""
216
+ driver = self._drivers.get(provider)
217
+ if not driver:
218
+ return {}
219
+ return driver.get_model_capabilities(model)
220
+
221
+ def estimate_cost(self, provider: str, model: str, input_tokens: int, output_tokens: int) -> float:
222
+ """Estimate cost for a specific model and token usage."""
223
+ driver = self._drivers.get(provider)
224
+ if not driver:
225
+ return 0.0
226
+ return driver.estimate_cost(model, input_tokens, output_tokens)
227
+
228
+ def get_all_model_info(self) -> Dict[str, Dict[str, Any]]:
229
+ """Get comprehensive information about all available models."""
230
+ info = {}
231
+ for provider, driver in self._drivers.items():
232
+ info[provider] = {
233
+ "models": driver.get_supported_models(),
234
+ "capabilities": {
235
+ model: driver.get_model_capabilities(model)
236
+ for model in driver.get_supported_models()
237
+ }
238
+ }
239
+ return info
240
+
241
+
242
+ # Create global router instance
243
+ _router_instance = None
244
+
245
+ def get_llm(model_name: str = None, temperature: float = None, agent_name: str = None) -> BaseChatModel:
246
+ """
247
+ Global function to get an LLM instance using the router.
248
+ Provides backward compatibility with existing code.
249
+ """
250
+ global _router_instance
251
+ if _router_instance is None:
252
+ _router_instance = LLMRouter()
253
+ return _router_instance.get_llm(model_name, temperature, agent_name)
254
+
255
+ # Example usage
256
+ if __name__ == '__main__':
257
+ # Example usage (requires OPENAI_API_KEY to be set for default gpt-4o-mini)
258
+ try:
259
+ print("Testing enhanced LLM Router with model_policy.yaml support")
260
+ print("=" * 60)
261
+
262
+ print("\n1. Testing get_llm with no parameters (should use defaults):")
263
+ llm_default = get_llm()
264
+ print(f" ✓ LLM Type: {type(llm_default).__name__}")
265
+ print(f" ✓ Model: {llm_default.model_name}")
266
+ print(f" ✓ Temperature: {llm_default.temperature}")
267
+
268
+ print("\n2. Testing get_llm with specified parameters:")
269
+ llm_custom = get_llm(model_name="gpt-3.5-turbo", temperature=0.5)
270
+ print(f" ✓ LLM Type: {type(llm_custom).__name__}")
271
+ print(f" ✓ Model: {llm_custom.model_name}")
272
+ print(f" ✓ Temperature: {llm_custom.temperature}")
273
+
274
+ print("\n3. Testing agent-specific configuration:")
275
+ llm_codegen = get_llm(agent_name="codegen_agent")
276
+ print(f" ✓ LLM Type: {type(llm_codegen).__name__}")
277
+ print(f" ✓ Model: {llm_codegen.model_name}")
278
+ print(f" ✓ Temperature: {llm_codegen.temperature}")
279
+
280
+ print("\n4. Testing agent with overrides:")
281
+ llm_question = get_llm(agent_name="question_agent")
282
+ print(f" ✓ LLM Type: {type(llm_question).__name__}")
283
+ print(f" ✓ Model: {llm_question.model_name}")
284
+ print(f" ✓ Temperature: {llm_question.temperature}")
285
+
286
+ print("\n5. Testing fallback behavior with non-existent model:")
287
+ llm_fallback = get_llm(model_name="non-existent-model")
288
+ print(f" ✓ LLM Type: {type(llm_fallback).__name__}")
289
+ print(f" ✓ Model: {llm_fallback.model_name}")
290
+ print(f" ✓ Temperature: {llm_fallback.temperature}")
291
+
292
+ # Test actual LLM invocation if API key is available
293
+ if os.getenv("OPENAI_API_KEY") and LANGCHAIN_CORE_AVAILABLE:
294
+ print("\n6. Testing actual LLM invocation:")
295
+ response = llm_default.invoke([HumanMessage(content="Hello! Respond with just 'Working!'")])
296
+ print(f" ✓ LLM Response: {response.content}")
297
+ else:
298
+ print("\n6. Skipping LLM invocation test (OPENAI_API_KEY not set or langchain_core not available)")
299
+
300
+ except ValueError as e:
301
+ print(f"ValueError: {e}")
302
+ except Exception as e:
303
+ print(f"An unexpected error occurred during get_llm tests: {e}")
@@ -51,10 +51,12 @@ def load_yaml_secrets() -> None:
51
51
  # Strip the "_ENCODED" suffix
52
52
  # base_name = key.removesuffix("_ENCODED")
53
53
 
54
- # GitHub Actions requires the env var "GITHUB_TOKEN"
55
- # not "REPO_TOKEN", so map accordingly
54
+ # Map specific keys to their expected environment variable names
56
55
  if key == "REPO_API_KEY":
57
56
  env_name = "GITHUB_TOKEN"
57
+ elif key == "TF_API_KEY":
58
+ # env_name = "TF_TOKEN_APP_TERRAFORM_IO"
59
+ env_name = "TFE_TOKEN"
58
60
  else:
59
61
  env_name = key
60
62
 
@@ -0,0 +1,17 @@
1
+ from .shell import (
2
+ shell_exec,
3
+ ShellExecutor,
4
+ ShellExecInput,
5
+ ShellExecOutput,
6
+ get_shell_executor,
7
+ _shell_executor,
8
+ )
9
+
10
+ __all__ = [
11
+ "shell_exec",
12
+ "ShellExecutor",
13
+ "ShellExecInput",
14
+ "ShellExecOutput",
15
+ "get_shell_executor",
16
+ "_shell_executor",
17
+ ]