diagram-to-iac 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,10 @@ from .base_driver import BaseLLMDriver
9
9
  from .openai_driver import OpenAIDriver
10
10
  from .anthropic_driver import AnthropicDriver
11
11
  from .gemini_driver import GoogleDriver
12
+ from .grok_driver import GrokDriver
13
+
14
+ # Import ConfigLoader for centralized configuration
15
+ from ...core.config_loader import ConfigLoader
12
16
 
13
17
  try:
14
18
  from langchain_core.messages import HumanMessage
@@ -28,11 +32,15 @@ class LLMRouter:
28
32
  self.config = self._load_model_policy(config_path)
29
33
  self._provider_cache = {}
30
34
 
31
- # Initialize drivers
35
+ # Initialize ConfigLoader for accessing centralized configuration
36
+ self._config_loader = ConfigLoader()
37
+
38
+ # Initialize drivers (including new Grok driver)
32
39
  self._drivers = {
33
40
  "openai": OpenAIDriver(),
34
41
  "anthropic": AnthropicDriver(),
35
- "google": GoogleDriver()
42
+ "google": GoogleDriver(),
43
+ "grok": GrokDriver()
36
44
  }
37
45
 
38
46
  def _load_model_policy(self, config_path: Optional[str] = None) -> Dict[str, Any]:
@@ -77,6 +85,8 @@ class LLMRouter:
77
85
  return 'anthropic'
78
86
  elif any(pattern in model_lower for pattern in ['gemini', 'google']):
79
87
  return 'google'
88
+ elif any(pattern in model_lower for pattern in ['grok', 'x.ai']):
89
+ return 'grok'
80
90
  else:
81
91
  return 'openai' # Default fallback
82
92
 
@@ -85,7 +95,8 @@ class LLMRouter:
85
95
  key_mapping = {
86
96
  'openai': 'OPENAI_API_KEY',
87
97
  'anthropic': 'ANTHROPIC_API_KEY',
88
- 'google': 'GOOGLE_API_KEY'
98
+ 'google': 'GOOGLE_API_KEY',
99
+ 'grok': 'GROK_API_KEY'
89
100
  }
90
101
 
91
102
  required_key = key_mapping.get(provider)
@@ -93,23 +104,154 @@ class LLMRouter:
93
104
  return False
94
105
  return True
95
106
 
107
+ def _get_available_providers(self) -> List[str]:
108
+ """Get list of providers with available API keys."""
109
+ available = []
110
+ for provider in self._drivers.keys():
111
+ if self._check_api_key(provider):
112
+ available.append(provider)
113
+ return available
114
+
115
+ def _get_provider_selection_config(self) -> Dict[str, Any]:
116
+ """Get provider selection configuration from centralized config."""
117
+ try:
118
+ app_config = self._config_loader.get_config()
119
+ return app_config.get('ai', {}).get('provider_selection', {})
120
+ except Exception:
121
+ # Fallback to default behavior if config loading fails
122
+ return {
123
+ 'strategy': 'auto',
124
+ 'preferred_order': ['openai', 'anthropic', 'google', 'grok'],
125
+ 'fallback': {'enabled': True, 'retry_attempts': 2}
126
+ }
127
+
128
+ def _select_best_provider(self, requested_provider: Optional[str] = None,
129
+ requested_model: Optional[str] = None) -> tuple[str, str]:
130
+ """
131
+ Intelligently select the best available provider and model.
132
+
133
+ Args:
134
+ requested_provider: Explicitly requested provider (takes precedence)
135
+ requested_model: Explicitly requested model (used for provider detection)
136
+
137
+ Returns:
138
+ tuple: (selected_provider, selected_model)
139
+ """
140
+ config = self._get_provider_selection_config()
141
+ strategy = config.get('strategy', 'auto')
142
+
143
+ # If provider explicitly requested, try that first
144
+ if requested_provider and self._check_api_key(requested_provider):
145
+ fallback_model = self._get_fallback_model_for_provider(requested_provider)
146
+ return requested_provider, requested_model or fallback_model
147
+
148
+ # If model specified, detect its provider and check availability
149
+ if requested_model:
150
+ detected_provider = self._detect_provider(requested_model)
151
+ if self._check_api_key(detected_provider):
152
+ return detected_provider, requested_model
153
+
154
+ # Get available providers
155
+ available_providers = self._get_available_providers()
156
+ if not available_providers:
157
+ raise ValueError(
158
+ "No AI providers available. Please set at least one API key: "
159
+ "OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, or GROK_API_KEY"
160
+ )
161
+
162
+ # Apply selection strategy
163
+ if strategy == 'auto' or strategy == 'prefer_cost' or strategy == 'prefer_performance':
164
+ preferred_order = config.get('preferred_order', ['openai', 'anthropic', 'google', 'grok'])
165
+
166
+ # Filter to only available providers, maintaining preference order
167
+ for provider in preferred_order:
168
+ if provider in available_providers:
169
+ model = self._get_optimal_model_for_provider(provider, strategy, config)
170
+ return provider, model
171
+
172
+ # Fallback to first available provider if strategy selection fails
173
+ first_provider = available_providers[0]
174
+ model = self._get_fallback_model_for_provider(first_provider)
175
+ return first_provider, model
176
+
177
+ def _get_optimal_model_for_provider(self, provider: str, strategy: str, config: Dict[str, Any]) -> str:
178
+ """Get the optimal model for a provider based on selection strategy."""
179
+ if strategy == 'prefer_cost':
180
+ cost_models = config.get('cost_optimization', {}).get('prefer_models', [])
181
+ for model in cost_models:
182
+ if self._detect_provider(model) == provider:
183
+ return model
184
+ elif strategy == 'prefer_performance':
185
+ perf_models = config.get('performance_optimization', {}).get('prefer_models', [])
186
+ for model in perf_models:
187
+ if self._detect_provider(model) == provider:
188
+ return model
189
+
190
+ # Fallback to provider's default model
191
+ return self._get_fallback_model_for_provider(provider)
192
+
193
+ def _get_fallback_model_for_provider(self, provider: str) -> str:
194
+ """Get default/fallback model for a specific provider."""
195
+ fallback_models = {
196
+ 'openai': 'gpt-4o-mini',
197
+ 'anthropic': 'claude-3-haiku',
198
+ 'google': 'gemini-pro',
199
+ 'grok': 'grok-1.5'
200
+ }
201
+ return fallback_models.get(provider, 'gpt-4o-mini')
202
+
96
203
  def get_llm_for_agent(self, agent_name: str) -> BaseChatModel:
97
204
  """
98
- Get an LLM instance configured for a specific agent.
99
- Uses agent-specific configuration from model_policy.yaml.
205
+ Get an LLM instance configured for a specific agent with intelligent provider selection.
206
+ Uses agent-specific configuration from model_policy.yaml and falls back to available providers.
100
207
  """
101
208
  config = self._resolve_model_config(agent_name)
102
209
 
103
- # Check if API key is available for the provider
104
- if not self._check_api_key(config['provider']):
105
- raise ValueError(f"API key not found for provider: {config['provider']}")
106
-
107
- return self._create_llm_instance(config)
210
+ # Try intelligent provider selection with fallback
211
+ try:
212
+ # Check if configured provider is available
213
+ if self._check_api_key(config['provider']):
214
+ return self._create_llm_instance(config)
215
+ else:
216
+ # Provider not available, use intelligent selection
217
+ print(f"Warning: Configured provider '{config['provider']}' not available for agent '{agent_name}'. Using intelligent fallback.")
218
+
219
+ selected_provider, selected_model = self._select_best_provider(
220
+ requested_model=config.get('model')
221
+ )
222
+
223
+ # Update config with selected provider and model
224
+ fallback_config = config.copy()
225
+ fallback_config['provider'] = selected_provider
226
+ fallback_config['model'] = selected_model
227
+
228
+ return self._create_llm_instance(fallback_config)
229
+
230
+ except Exception as e:
231
+ # Last resort fallback
232
+ available_providers = self._get_available_providers()
233
+ if not available_providers:
234
+ raise ValueError(
235
+ f"No AI providers available for agent '{agent_name}'. "
236
+ f"Please set at least one API key: OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, or GROK_API_KEY"
237
+ ) from e
238
+
239
+ # Use first available provider with its default model
240
+ fallback_provider = available_providers[0]
241
+ fallback_model = self._get_fallback_model_for_provider(fallback_provider)
242
+
243
+ fallback_config = config.copy()
244
+ fallback_config['provider'] = fallback_provider
245
+ fallback_config['model'] = fallback_model
246
+
247
+ print(f"Warning: Fallback to {fallback_provider}/{fallback_model} for agent '{agent_name}' due to error: {e}")
248
+ return self._create_llm_instance(fallback_config)
108
249
 
109
250
  def get_llm(self, model_name: str = None, temperature: float = None, agent_name: str = None) -> BaseChatModel:
110
251
  """
111
- Initializes and returns an LLM instance using model_policy.yaml configuration.
252
+ Initializes and returns an LLM instance with intelligent provider selection.
112
253
  Uses provided parameters or falls back to agent-specific or global defaults.
254
+ Automatically selects best available provider if configured provider is unavailable.
113
255
  """
114
256
  # If agent_name is provided but other params are None, use agent-specific config
115
257
  if agent_name and model_name is None and temperature is None:
@@ -120,26 +262,53 @@ class LLMRouter:
120
262
  model_name, temperature, agent_name
121
263
  )
122
264
 
123
- # Detect provider for the model
124
- provider = self._detect_provider(effective_model_name)
125
-
126
- # Check API key availability
127
- if not self._check_api_key(provider):
128
- # Fallback to default provider if API key is missing
265
+ # Use intelligent provider selection
266
+ try:
267
+ # Try to detect provider for the requested model
268
+ initial_provider = self._detect_provider(effective_model_name)
269
+
270
+ # Use intelligent selection to find best available option
271
+ selected_provider, selected_model = self._select_best_provider(
272
+ requested_provider=initial_provider if self._check_api_key(initial_provider) else None,
273
+ requested_model=effective_model_name
274
+ )
275
+
276
+ # Create configuration dict
277
+ config = {
278
+ 'model': selected_model,
279
+ 'temperature': effective_temperature,
280
+ 'provider': selected_provider
281
+ }
282
+
283
+ # Create and return the appropriate LLM instance
284
+ return self._create_llm_instance(config)
285
+
286
+ except Exception as e:
287
+ # Last resort: use fallback configuration from policy
288
+ print(f"Warning: Intelligent provider selection failed: {e}. Using fallback configuration.")
289
+
129
290
  fallback_config = self.config.get('default', {})
130
- effective_model_name = fallback_config.get('model', 'gpt-4o-mini')
131
- effective_temperature = fallback_config.get('temperature', 0.0)
132
- provider = fallback_config.get('provider', 'openai')
133
-
134
- # Create configuration dict
135
- config = {
136
- 'model': effective_model_name,
137
- 'temperature': effective_temperature,
138
- 'provider': provider
139
- }
140
-
141
- # Create and return the appropriate LLM instance
142
- return self._create_llm_instance(config)
291
+ fallback_provider = fallback_config.get('provider', 'openai')
292
+
293
+ # If default provider is not available, try any available provider
294
+ if not self._check_api_key(fallback_provider):
295
+ available_providers = self._get_available_providers()
296
+ if available_providers:
297
+ fallback_provider = available_providers[0]
298
+ effective_model_name = self._get_fallback_model_for_provider(fallback_provider)
299
+ else:
300
+ raise ValueError(
301
+ "No AI providers available. Please set at least one API key: "
302
+ "OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, or GROK_API_KEY"
303
+ ) from e
304
+
305
+ config = {
306
+ 'model': effective_model_name,
307
+ 'temperature': effective_temperature,
308
+ 'provider': fallback_provider
309
+ }
310
+
311
+ return self._create_llm_instance(config)
143
312
 
144
313
  def _resolve_model_config(self, agent_name: str) -> Dict[str, Any]:
145
314
  """
@@ -230,6 +399,7 @@ class LLMRouter:
230
399
  info = {}
231
400
  for provider, driver in self._drivers.items():
232
401
  info[provider] = {
402
+ "available": self._check_api_key(provider),
233
403
  "models": driver.get_supported_models(),
234
404
  "capabilities": {
235
405
  model: driver.get_model_capabilities(model)
@@ -237,6 +407,26 @@ class LLMRouter:
237
407
  }
238
408
  }
239
409
  return info
410
+
411
+ def get_provider_status(self) -> Dict[str, Any]:
412
+ """Get status information about all providers and intelligent selection."""
413
+ available_providers = self._get_available_providers()
414
+ config = self._get_provider_selection_config()
415
+
416
+ return {
417
+ "available_providers": available_providers,
418
+ "total_providers": len(self._drivers),
419
+ "selection_strategy": config.get('strategy', 'auto'),
420
+ "preferred_order": config.get('preferred_order', []),
421
+ "provider_details": {
422
+ provider: {
423
+ "available": self._check_api_key(provider),
424
+ "api_key_env": f"{provider.upper()}_API_KEY",
425
+ "default_model": self._get_fallback_model_for_provider(provider)
426
+ }
427
+ for provider in self._drivers.keys()
428
+ }
429
+ }
240
430
 
241
431
 
242
432
  # Create global router instance
@@ -1,21 +1,22 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: diagram-to-iac
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Convert architecture diagrams into IaC modules
5
5
  Author-email: vindpro <admin@vindpro.com>
6
6
  Description-Content-Type: text/markdown
7
- Requires-Dist: anthropic==0.54.0
7
+ Requires-Dist: anthropic==0.55.0
8
8
  Requires-Dist: google_api_python_client==2.173.0
9
9
  Requires-Dist: langchain_anthropic==0.3.15
10
10
  Requires-Dist: langchain-core<1.0.0,>=0.3.62
11
11
  Requires-Dist: langchain_google_genai==2.1.5
12
- Requires-Dist: langchain_openai==0.3.24
12
+ Requires-Dist: langchain_openai==0.3.25
13
13
  Requires-Dist: langgraph==0.4.8
14
- Requires-Dist: openai==1.90.0
14
+ Requires-Dist: openai==1.91.0
15
15
  Requires-Dist: protobuf>=5.27.0
16
16
  Requires-Dist: pydantic==2.11.7
17
17
  Requires-Dist: PyYAML==6.0.2
18
18
  Requires-Dist: Requests==2.32.4
19
+ Requires-Dist: typing_extensions==4.14.0
19
20
  Requires-Dist: GitPython<4.0,>=3.1
20
21
 
21
22
  # diagram-to-iac
@@ -28,27 +29,39 @@ An automated DevOps-in-a-Box system that intelligently handles complete Repo-to-
28
29
 
29
30
  The **R2D (Repo-to-Deployment) Action** is a self-healing, Terraform-first DevOps automation solution that lives inside a single GitHub Action. When you supply any repository URL, our SupervisorAgent marshals specialized agents to handle the complete deployment workflow.
30
31
 
31
- ### Quick Start with GitHub Actions
32
+ ### 🎯 2-Minute Quick Start
33
+
34
+ **Step 1:** Add the unified workflow to your repository:
32
35
 
33
36
  ```yaml
34
- name: Deploy Infrastructure
37
+ # .github/workflows/r2d-unified.yml
38
+ name: R2D - DevOps in a Box
35
39
  on:
36
- push:
37
- branches: [main]
40
+ issues:
41
+ types: [opened, edited]
42
+ pull_request:
43
+ types: [closed]
44
+ workflow_dispatch:
45
+ inputs:
46
+ repo_url:
47
+ description: 'Repository URL to deploy'
48
+ required: false
49
+ type: string
38
50
 
39
51
  jobs:
40
- deploy:
41
- runs-on: ubuntu-latest
42
- steps:
43
- - name: Deploy with R2D
44
- uses: amartyamandal/diagram-to-iac/.github/actions/r2d@v1
45
- with:
46
- repo: ${{ github.server_url }}/${{ github.repository }}
47
- env:
48
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
49
- TF_CLOUD_TOKEN: ${{ secrets.TF_CLOUD_TOKEN }}
52
+ r2d-deploy:
53
+ uses: amartyamandal/diagram-to-iac/.github/workflows/r2d-unified.yml@main
54
+ secrets: inherit
55
+ with:
56
+ repo_url: ${{ inputs.repo_url || github.server_url }}/${{ github.repository }}
50
57
  ```
51
58
 
59
+ **Step 2:** Configure your secrets (see [complete guide](docs/R2D_USER_GUIDE.md))
60
+
61
+ **Step 3:** Trigger deployment via issue, PR merge, or manual run
62
+
63
+ > 📚 **[Complete Setup Guide](docs/R2D_USER_GUIDE.md)** - Everything you need in one place!
64
+
52
65
  ### Key Features
53
66
 
54
67
  - **🤖 Self-Healing**: Automatically creates GitHub Issues for errors and suggests fixes
@@ -167,89 +180,93 @@ In GitHub Actions, the following artifacts are automatically collected:
167
180
 
168
181
  ## 🔧 GitHub Actions Usage
169
182
 
170
- Add the R2D action to your workflow for automated infrastructure deployment:
183
+ The R2D system now uses a **unified workflow** that handles all deployment scenarios automatically. This replaces the previous multiple workflow files with a single, intelligent solution.
184
+
185
+ ### ⚡ Simple Setup (Recommended)
171
186
 
172
- ### Basic Setup
187
+ Add this single workflow file to get all R2D capabilities:
173
188
 
174
189
  ```yaml
175
- name: Deploy Infrastructure
190
+ # .github/workflows/r2d-unified.yml
191
+ name: R2D - DevOps in a Box
176
192
  on:
177
- push:
178
- branches: [main]
193
+ issues:
194
+ types: [opened, edited]
195
+ pull_request:
196
+ types: [closed]
197
+ workflow_dispatch:
198
+ inputs:
199
+ repo_url:
200
+ description: 'Repository URL to deploy'
201
+ required: false
202
+ type: string
179
203
 
180
204
  jobs:
181
- deploy:
182
- runs-on: ubuntu-latest
183
- steps:
184
- - name: Deploy with R2D
185
- uses: amartyamandal/diagram-to-iac/.github/actions/r2d@v1
186
- with:
187
- repo: ${{ github.server_url }}/${{ github.repository }}
188
- env:
189
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
190
- TF_CLOUD_TOKEN: ${{ secrets.TF_CLOUD_TOKEN }}
205
+ r2d-deploy:
206
+ uses: amartyamandal/diagram-to-iac/.github/workflows/r2d-unified.yml@main
207
+ secrets: inherit
208
+ with:
209
+ repo_url: ${{ inputs.repo_url || github.server_url }}/${{ github.repository }}
191
210
  ```
192
211
 
193
- ### Advanced Configuration
212
+ ### 🎮 Multiple Ways to Deploy
194
213
 
195
- ```yaml
196
- - name: Deploy with Custom Settings
197
- uses: amartyamandal/diagram-to-iac/.github/actions/r2d@v1
198
- with:
199
- repo: 'https://github.com/my-org/infrastructure'
200
- package_version: 'v2.1.0'
201
- dry_run: 'false'
202
- branch_name: 'deploy-prod'
203
- thread_id: 'prod-deployment-001'
204
- env:
205
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
206
- TF_CLOUD_TOKEN: ${{ secrets.TF_CLOUD_TOKEN }}
207
- OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
208
- ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
209
- ```
214
+ - **📝 Issue-based**: Create an issue with "deploy" in title/body
215
+ - **🔀 PR merge**: Automatic deployment when PRs are merged
216
+ - **🎯 Manual**: Use workflow_dispatch with optional custom repo URL
217
+ - **🔒 External repos**: Deploy any accessible repository
210
218
 
211
- ### Required Secrets
219
+ ### 📋 Required Secrets
212
220
 
213
- Configure these in your repository settings:
221
+ Configure these in your repository settings → Secrets and variables → Actions:
214
222
 
215
223
  | Secret | Description | Required |
216
224
  |--------|-------------|----------|
217
225
  | `GITHUB_TOKEN` | GitHub API access (auto-provided) | ✅ Yes |
218
- | `TF_CLOUD_TOKEN` | Terraform Cloud API token | ✅ Yes |
219
- | `OPENAI_API_KEY` | OpenAI API key | ❌ Optional |
220
- | `ANTHROPIC_API_KEY` | Claude API key | ❌ Optional |
221
- | `GOOGLE_API_KEY` | Gemini API key | ❌ Optional |
226
+ | `TF_CLOUD_TOKEN` | Terraform Cloud workspace token | ✅ Yes |
227
+ | `OPENAI_API_KEY` | OpenAI API key for AI features | ❌ Optional |
228
+ | `ANTHROPIC_API_KEY` | Claude API key for AI features | ❌ Optional |
222
229
 
223
- ### Example Workflows
230
+ > 📚 **[Complete Setup Guide](docs/R2D_USER_GUIDE.md)** includes examples, troubleshooting, and advanced configurations.
224
231
 
225
- See [`.github/actions/r2d/examples/`](.github/actions/r2d/examples/) for complete workflow examples:
232
+ ## 📈 Observability & Monitoring
226
233
 
227
- - **Basic Deployment**: Simple push-to-deploy workflow
228
- - **Multi-Environment**: Deploy to dev/staging/prod with approvals
229
- - **PR Validation**: Validate infrastructure changes in pull requests
230
- - **Drift Detection**: Scheduled infrastructure drift monitoring
231
- - **External Repository**: Deploy from external repositories
234
+ After each workflow run, comprehensive artifacts are generated:
232
235
 
236
+ - **📊 Step Summary Dashboard**: `step-summary.md` with Terraform changes, security findings, and metrics
237
+ - **📋 Structured Logs**: JSONL format in `logs/` directory for analysis
238
+ - **🔍 GitHub Integration**: Automatic issue creation and PR suggestions
239
+ - **📦 Artifacts**: Plans, reports, and logs uploaded as GitHub Actions artifacts
233
240
 
234
- After each workflow run, a Markdown dashboard is generated at `step-summary.md`
235
- showing a high level overview of Terraform modules, resource changes and tfsec
236
- findings. The dashboard is derived from the JSONL logs and can be viewed
237
- directly in the repository or uploaded as a build artifact.
241
+ ## 🧪 Development & Testing
238
242
 
243
+ For local development and testing:
239
244
 
245
+ ```bash
246
+ # Install development dependencies
247
+ pip install -e .[dev]
240
248
 
241
- This repository provides a container action that runs the `SupervisorAgent` on the current repository. Add the action to a workflow as shown below:
249
+ # Run the CLI locally
250
+ diagram-to-iac https://github.com/user/repo --dry-run
242
251
 
243
- ```yaml
244
- jobs:
245
- supervisor:
246
- runs-on: ubuntu-latest
247
- steps:
248
- - uses: actions/checkout@v4
249
- - name: Run Supervisor Agent
250
- uses: ./.github/actions/supervisor
251
- env:
252
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
252
+ # Run tests
253
+ pytest
253
254
  ```
254
255
 
255
- The action reads `GITHUB_REPOSITORY` and `GITHUB_TOKEN` automatically to clone the repository and execute the agent.
256
+ ## 📚 Documentation
257
+
258
+ ## 📚 Documentation
259
+
260
+ - **[R2D User Guide](docs/R2D_USER_GUIDE.md)** - Complete setup and usage guide
261
+ - **[Migration Guide](docs/MIGRATION_GUIDE.md)** - Migrate from old workflows to unified approach
262
+ - **[Container Action](.github/actions/r2d/)** - Technical details of the container action
263
+ - **[Agent Architecture](src/diagram_to_iac/agents/)** - How the AI agents work together
264
+ - **[Simplification Summary](docs/SIMPLIFICATION_COMPLETION_SUMMARY.md)** - What changed in the unified approach
265
+
266
+ ## 🤝 Contributing
267
+
268
+ This project follows strict coding standards and security practices. See the development guidelines in the repository for contribution instructions.
269
+
270
+ ---
271
+
272
+ > **"One container, many minds—zero manual toil."** 🤖
@@ -3,13 +3,13 @@ diagram_to_iac/cli.py,sha256=uumG1frF42eCkdLIZxyxQB1x6lDwtG-qKL4vcHnLLXY,400
3
3
  diagram_to_iac/r2d.py,sha256=I7XSuUtu8TdvAhK4tCMLc3U_3ZtP7DJGfq168aeI3Mk,13208
4
4
  diagram_to_iac/actions/__init__.py,sha256=P1CjjY4FYUA0Tcx8FQNLYYSI9fhv8yKd_TmRGtmhW50,229
5
5
  diagram_to_iac/actions/git_entry.py,sha256=mhY6gYquUPVvyvnTC2S90z_uXEe1asqWLoi1989aB_Q,5403
6
- diagram_to_iac/actions/supervisor_entry.py,sha256=vWhFn-4M0jQnrUQUSCb0I_YNxzGsKiBV0QNzkIkQfNE,3586
6
+ diagram_to_iac/actions/supervisor_entry.py,sha256=Nm6YIBbEzpL1huza3a0ThA0hdimptRd5rndLUsOMSH4,9282
7
7
  diagram_to_iac/actions/terraform_agent_entry.py,sha256=gKkX4fIRdBDZpwPQO_v2t1SSO0SQuzaxQ0StKegGK8U,6852
8
8
  diagram_to_iac/agents/__init__.py,sha256=GHInKSPq56ZPYSKsyti6_wk82dhn2hOqfxNHkZZOj_0,735
9
9
  diagram_to_iac/agents/demonstrator_langgraph/__init__.py,sha256=nghMYMEEarfkR0V6AH1fDCV-mXBLnmFP2sO4OPxJ4cI,371
10
10
  diagram_to_iac/agents/demonstrator_langgraph/agent.py,sha256=9ZH2H5iAB2DfMhCr-OzImVZlwoeXIP8RKl6_VG47W2I,35349
11
11
  diagram_to_iac/agents/git_langgraph/__init__.py,sha256=x6nCnOu-Vcl-qVqW1swhdaE_sQqUSvEUUtWk4eePBUo,295
12
- diagram_to_iac/agents/git_langgraph/agent.py,sha256=bG3GHub62Rm8q9XrkLMN4kLBH6BTiLYrxQ9heNGsoCY,47640
12
+ diagram_to_iac/agents/git_langgraph/agent.py,sha256=_lGwyTTgTsS4ZtaQLCceGJa5zeLvux_Hqda1-fqAgXg,49288
13
13
  diagram_to_iac/agents/git_langgraph/pr.py,sha256=qXopN5XAF1DIac5vbH-QasihkuAiWmC9JY8pLYlm-sQ,8601
14
14
  diagram_to_iac/agents/hello_langgraph/__init__.py,sha256=lviuDAPJezmpaXR-H7JxfIT9wvg1xO2t6JLyeKSSx0Y,266
15
15
  diagram_to_iac/agents/hello_langgraph/agent.py,sha256=R49yfFGxqMPBBu36ztDH9lBE_-s7VFyRB33gnNSXxek,33777
@@ -22,8 +22,9 @@ diagram_to_iac/agents/shell_langgraph/__init__.py,sha256=teAx1L87McCj9_24NUdET3O
22
22
  diagram_to_iac/agents/shell_langgraph/agent.py,sha256=dZWzjVQ9oX_BtNHQ1Zrzy2oQpuY1e5BS51-SGcWpoSw,4341
23
23
  diagram_to_iac/agents/shell_langgraph/detector.py,sha256=wLw0uDP_V2m1z6SRk7QNCzoUMYCfXwu3DNg8EWue9yk,1493
24
24
  diagram_to_iac/agents/supervisor_langgraph/__init__.py,sha256=iLN60d20cqoXOLyuLvJkiwrzapE84em222Tnyndq2dc,385
25
- diagram_to_iac/agents/supervisor_langgraph/agent.py,sha256=NdqYFyVw1bc4UK-IQOmcc1ZEIQVVakg1z5OC_uN_kqc,84790
25
+ diagram_to_iac/agents/supervisor_langgraph/agent.py,sha256=1qfgYSQQx1JNgN9ORFCl35NrklRbN7ZcUdP5AbGBsGo,101342
26
26
  diagram_to_iac/agents/supervisor_langgraph/demonstrator.py,sha256=OT-bElEyLZBedzcc5DtZnp1yhjYVjx4jRzt52f5SoSU,803
27
+ diagram_to_iac/agents/supervisor_langgraph/github_listener.py,sha256=Ko9dOnS9CUqbjTogEyhEmVhkiaW8OiwLzX6k18lSrac,16377
27
28
  diagram_to_iac/agents/supervisor_langgraph/guards.py,sha256=XzBgjXnwbOgLkGm7AqXX4tQdGBerq_6pKvduKPqIwF0,720
28
29
  diagram_to_iac/agents/supervisor_langgraph/pat_loop.py,sha256=feY8ZPGQxqkUuHOMSdpilGDUjOvaky8xImLuVe98hrw,1566
29
30
  diagram_to_iac/agents/supervisor_langgraph/router.py,sha256=7hZXXEmtvG__w7UAaOhoPaHdubUv-oMKbQdMTMXk-qY,276
@@ -37,7 +38,9 @@ diagram_to_iac/core/enhanced_memory.py,sha256=Ga5wtI45zEcbwL_F1YqJaXBRpWK0iJPa69
37
38
  diagram_to_iac/core/errors.py,sha256=gZwZocnIcBlS4YccIBdjG8XztRCtMe4Cu6KWxLzebDM,115
38
39
  diagram_to_iac/core/issue_tracker.py,sha256=0eo289hn94yCoFCkLaYiDOIJBjk33i2dk6eLeYe_9YE,1659
39
40
  diagram_to_iac/core/memory.py,sha256=P9URX8m2nab65ZPF36uf6Z9hEXQGXrjrXa8dPXG7pm8,4444
41
+ diagram_to_iac/core/registry.py,sha256=AM2fv9lzrNvFfkyt7VMxQ5SWIOWhdBu4_3Aaspdokj8,25758
40
42
  diagram_to_iac/services/__init__.py,sha256=I5R8g7vYX4tCldRf1Jf9vEhm5mylc-MfFicqLnY6a3E,238
43
+ diagram_to_iac/services/commenter.py,sha256=iXvHXOeih64FbE34PuGPk6fhI4RmC62ZSVtFwmMqiOA,22146
41
44
  diagram_to_iac/services/observability.py,sha256=yxbnjMc4TO1SM8RZZMHf2E8uVOLpxFhiTjsTkymDi6Y,1856
42
45
  diagram_to_iac/services/step_summary.py,sha256=g3MuMZ51IDubI0oWcF7qMvseNgDS6D90AsKK_1s5xDQ,2808
43
46
  diagram_to_iac/tools/__init__.py,sha256=F2pcKhoPP5KDeQIGcqKXD1J30KFKc9qxMw1jxzrs9qY,434
@@ -49,17 +52,18 @@ diagram_to_iac/tools/git/git.py,sha256=0NYz9NqQWf-5YTX7R3nBPyLmzvih-jhd0gYY8KZDm
49
52
  diagram_to_iac/tools/hello/__init__.py,sha256=f6GpkiQxvuGaRMm34yQilGACxUI4c5edJQTDjZtskjQ,891
50
53
  diagram_to_iac/tools/hello/cal_utils.py,sha256=B-0iOJHNL1IgYPlWUdrAwEf1r9LUKBAnGyx1xQz05ZE,1507
51
54
  diagram_to_iac/tools/hello/text_utils.py,sha256=ZaVQYw6GVqaq9EDTQfG3gTAudeN8CuFUUb7IETZhUCA,3952
52
- diagram_to_iac/tools/llm_utils/__init__.py,sha256=IQ6cQprJtV4j5s_RVnt94rCGXfuvZ0PiTH6Y0gK242o,440
55
+ diagram_to_iac/tools/llm_utils/__init__.py,sha256=xkSbnB3_eqFX7UDZPl9s4blh5IKPzRh52sHdwmJ4VMM,494
53
56
  diagram_to_iac/tools/llm_utils/anthropic_driver.py,sha256=tb8HVGB6Ng9ZwImRJtSy2X0965ZE3Vm5g8HbMfcLyBY,3674
54
57
  diagram_to_iac/tools/llm_utils/base_driver.py,sha256=sDUxk6_iNn3WU_HyRz2hW3YGTn8_7aucqEUnGTj2PeU,2503
55
58
  diagram_to_iac/tools/llm_utils/gemini_driver.py,sha256=VO1mJ3o10oSFo5hTBs6h8TJsXyAuah4FRr6Ua-9aNYc,3794
59
+ diagram_to_iac/tools/llm_utils/grok_driver.py,sha256=hcq4m6ZEgjVsLXaaGlW5SWHEqyjY4KUDy88xSZFUa6Y,2955
56
60
  diagram_to_iac/tools/llm_utils/openai_driver.py,sha256=ZqzXEYEutwqRw3qWx-GH85Mj2afxK4NlhCOMq_MabqQ,3962
57
- diagram_to_iac/tools/llm_utils/router.py,sha256=WHGanstQjUlo2SmDFKGAL6xtIb3xXWlgZ5CIzHWN8I8,12906
61
+ diagram_to_iac/tools/llm_utils/router.py,sha256=hl-y1CCvRoBWSpKpkDI_SSyi9YIT2ZA6y6awn7_ErkM,22117
58
62
  diagram_to_iac/tools/shell/__init__.py,sha256=6UZjBcnbPabA6Qy7t4j-dCi3S2sE6sB2bTE9PIL98bA,292
59
63
  diagram_to_iac/tools/shell/shell.py,sha256=ZOJ7Vo3l_R2Gm6Ml2FL0RX__-C_JOsUrLJVvBMwAy9E,21122
60
64
  diagram_to_iac/tools/tf/terraform.py,sha256=j1boWRo6JKpNGf1OwnWoWboO0gMYTizCOHDSxozoFZw,37343
61
- diagram_to_iac-1.0.3.dist-info/METADATA,sha256=u03hdyylQnNt18ESYsD-1DrWd8tYcypOVfKykOUjXuQ,9019
62
- diagram_to_iac-1.0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
63
- diagram_to_iac-1.0.3.dist-info/entry_points.txt,sha256=DfGCnmgWWGHtQpqU8VqcUWs5k_be-bfO67z1vOuTitA,277
64
- diagram_to_iac-1.0.3.dist-info/top_level.txt,sha256=k1cV0YODiCUU46qlmbQaquMcbMXhNm05NZLxsinDUBA,15
65
- diagram_to_iac-1.0.3.dist-info/RECORD,,
65
+ diagram_to_iac-1.0.5.dist-info/METADATA,sha256=2P5RDic-pAsOp3aueq4yn6hkOQsDfl59cvAn-uyYdkI,9821
66
+ diagram_to_iac-1.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
67
+ diagram_to_iac-1.0.5.dist-info/entry_points.txt,sha256=DfGCnmgWWGHtQpqU8VqcUWs5k_be-bfO67z1vOuTitA,277
68
+ diagram_to_iac-1.0.5.dist-info/top_level.txt,sha256=k1cV0YODiCUU46qlmbQaquMcbMXhNm05NZLxsinDUBA,15
69
+ diagram_to_iac-1.0.5.dist-info/RECORD,,