daita-agents 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of daita-agents might be problematic. Click here for more details.

Files changed (69) hide show
  1. daita/__init__.py +208 -0
  2. daita/agents/__init__.py +33 -0
  3. daita/agents/base.py +722 -0
  4. daita/agents/substrate.py +895 -0
  5. daita/cli/__init__.py +145 -0
  6. daita/cli/__main__.py +7 -0
  7. daita/cli/ascii_art.py +44 -0
  8. daita/cli/core/__init__.py +0 -0
  9. daita/cli/core/create.py +254 -0
  10. daita/cli/core/deploy.py +473 -0
  11. daita/cli/core/deployments.py +309 -0
  12. daita/cli/core/import_detector.py +219 -0
  13. daita/cli/core/init.py +382 -0
  14. daita/cli/core/logs.py +239 -0
  15. daita/cli/core/managed_deploy.py +709 -0
  16. daita/cli/core/run.py +648 -0
  17. daita/cli/core/status.py +421 -0
  18. daita/cli/core/test.py +239 -0
  19. daita/cli/core/webhooks.py +172 -0
  20. daita/cli/main.py +588 -0
  21. daita/cli/utils.py +541 -0
  22. daita/config/__init__.py +62 -0
  23. daita/config/base.py +159 -0
  24. daita/config/settings.py +184 -0
  25. daita/core/__init__.py +262 -0
  26. daita/core/decision_tracing.py +701 -0
  27. daita/core/exceptions.py +480 -0
  28. daita/core/focus.py +251 -0
  29. daita/core/interfaces.py +76 -0
  30. daita/core/plugin_tracing.py +550 -0
  31. daita/core/relay.py +695 -0
  32. daita/core/reliability.py +381 -0
  33. daita/core/scaling.py +444 -0
  34. daita/core/tools.py +402 -0
  35. daita/core/tracing.py +770 -0
  36. daita/core/workflow.py +1084 -0
  37. daita/display/__init__.py +1 -0
  38. daita/display/console.py +160 -0
  39. daita/execution/__init__.py +58 -0
  40. daita/execution/client.py +856 -0
  41. daita/execution/exceptions.py +92 -0
  42. daita/execution/models.py +317 -0
  43. daita/llm/__init__.py +60 -0
  44. daita/llm/anthropic.py +166 -0
  45. daita/llm/base.py +373 -0
  46. daita/llm/factory.py +101 -0
  47. daita/llm/gemini.py +152 -0
  48. daita/llm/grok.py +114 -0
  49. daita/llm/mock.py +135 -0
  50. daita/llm/openai.py +109 -0
  51. daita/plugins/__init__.py +141 -0
  52. daita/plugins/base.py +37 -0
  53. daita/plugins/base_db.py +167 -0
  54. daita/plugins/elasticsearch.py +844 -0
  55. daita/plugins/mcp.py +481 -0
  56. daita/plugins/mongodb.py +510 -0
  57. daita/plugins/mysql.py +351 -0
  58. daita/plugins/postgresql.py +331 -0
  59. daita/plugins/redis_messaging.py +500 -0
  60. daita/plugins/rest.py +529 -0
  61. daita/plugins/s3.py +761 -0
  62. daita/plugins/slack.py +729 -0
  63. daita/utils/__init__.py +18 -0
  64. daita_agents-0.1.0.dist-info/METADATA +350 -0
  65. daita_agents-0.1.0.dist-info/RECORD +69 -0
  66. daita_agents-0.1.0.dist-info/WHEEL +5 -0
  67. daita_agents-0.1.0.dist-info/entry_points.txt +2 -0
  68. daita_agents-0.1.0.dist-info/licenses/LICENSE +56 -0
  69. daita_agents-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,421 @@
1
+ """
2
+ Simple status display for Daita CLI.
3
+ Shows project and deployment status like git status.
4
+ """
5
+ import json
6
+ import yaml
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+ from ..utils import find_project_root
10
+
11
+
12
+ async def show_project_status(environment=None, verbose=False):
13
+ """Show project status (like git status)."""
14
+
15
+ # Find project root
16
+ project_root = find_project_root()
17
+ if not project_root:
18
+ raise ValueError("Not in a Daita project. Run 'daita init' first.")
19
+
20
+ # Load project config
21
+ config = _load_project_config(project_root)
22
+ if not config:
23
+ print(" No daita-project.yaml found")
24
+ return
25
+
26
+ project_name = config.get('name', 'Unknown')
27
+ version = config.get('version', '1.0.0')
28
+
29
+ print(f" Project: {project_name} (v{version})")
30
+ print(f"Location: {project_root}")
31
+ print("")
32
+
33
+ # Show components status
34
+ _show_components_status(project_root, config)
35
+
36
+ # Check if user has DAITA_API_KEY for cloud status
37
+ import os
38
+ has_daita_key = bool(os.getenv('DAITA_API_KEY'))
39
+
40
+ if has_daita_key:
41
+ # Show cloud deployment status
42
+ if environment:
43
+ await _show_cloud_environment_status(environment, verbose)
44
+ else:
45
+ await _show_cloud_deployments_status(verbose)
46
+ else:
47
+ # Show local deployment status only
48
+ print(" Cloud Deployments: Upgrade required")
49
+ print(" Get your API key at daita-tech.io")
50
+ print(" Local deployment history:")
51
+ _show_local_deployments_status(project_root, verbose)
52
+
53
+ # Show any issues
54
+ _show_issues(project_root, config)
55
+
56
+ def _show_components_status(project_root, config):
57
+ """Show status of agents and workflows."""
58
+ print(" Components:")
59
+
60
+ # Check agents
61
+ agents = config.get('agents', [])
62
+ agents_dir = project_root / 'agents'
63
+
64
+ # Fallback to filesystem scanning if config is empty
65
+ if not agents and agents_dir.exists():
66
+ agent_files = list(agents_dir.glob('*.py'))
67
+ agent_files = [f for f in agent_files if f.name != '__init__.py']
68
+
69
+ if agent_files:
70
+ print(f" Agents ({len(agent_files)}) [detected from filesystem]:")
71
+ for agent_file in agent_files:
72
+ agent_name = agent_file.stem
73
+ print(f" {agent_name} (not in config)")
74
+ else:
75
+ print(f" Agents: None")
76
+ elif agents:
77
+ print(f" Agents ({len(agents)}):")
78
+ for agent in agents:
79
+ agent_file = agents_dir / f"{agent['name']}.py"
80
+ status = "" if agent_file.exists() else ""
81
+ display_name = agent.get('display_name', agent['name'])
82
+ print(f" {status} {agent['name']} → '{display_name}'")
83
+ else:
84
+ print(f" Agents: None")
85
+
86
+ # Check workflows
87
+ workflows = config.get('workflows', [])
88
+ workflows_dir = project_root / 'workflows'
89
+
90
+ # Fallback to filesystem scanning if config is empty
91
+ if not workflows and workflows_dir.exists():
92
+ workflow_files = list(workflows_dir.glob('*.py'))
93
+ workflow_files = [f for f in workflow_files if f.name != '__init__.py']
94
+
95
+ if workflow_files:
96
+ print(f" Workflows ({len(workflow_files)}) [detected from filesystem]:")
97
+ for workflow_file in workflow_files:
98
+ workflow_name = workflow_file.stem
99
+ print(f" {workflow_name} (not in config)")
100
+ else:
101
+ print(f" Workflows: None")
102
+ elif workflows:
103
+ print(f" Workflows ({len(workflows)}):")
104
+ for workflow in workflows:
105
+ workflow_file = workflows_dir / f"{workflow['name']}.py"
106
+ status = "" if workflow_file.exists() else ""
107
+ display_name = workflow.get('display_name', workflow['name'])
108
+ print(f" {status} {workflow['name']} → '{display_name}'")
109
+ else:
110
+ print(f" Workflows: None")
111
+
112
+ print("")
113
+
114
+ def _show_environment_status(project_root, environment, verbose):
115
+ """Show status for specific environment."""
116
+ print(f" Environment: {environment}")
117
+
118
+ # Load deployment history
119
+ deployments = _load_deployments(project_root)
120
+ env_deployments = [d for d in deployments if d['environment'] == environment]
121
+
122
+ if env_deployments:
123
+ latest = env_deployments[-1]
124
+ deploy_time = latest['timestamp'][:19].replace('T', ' ')
125
+
126
+ print(f" Last deployed: {deploy_time}")
127
+ print(f" Version: {latest.get('version', 'unknown')}")
128
+
129
+ if verbose:
130
+ print(f" Agents: {', '.join(latest.get('agents', []))}")
131
+ print(f" Workflows: {', '.join(latest.get('workflows', []))}")
132
+ else:
133
+ print(f" Never deployed to {environment}")
134
+
135
+ print("")
136
+
137
+ def _show_local_deployments_status(project_root, verbose):
138
+ """Show status of local deployment history."""
139
+ deployments = _load_deployments(project_root)
140
+
141
+ if not deployments:
142
+ print(" No local deployment history")
143
+ print("")
144
+ return
145
+
146
+ # Group by environment
147
+ env_deployments = {}
148
+ for deployment in deployments:
149
+ env = deployment['environment']
150
+ if env not in env_deployments:
151
+ env_deployments[env] = []
152
+ env_deployments[env].append(deployment)
153
+
154
+ # Show each environment
155
+ for env, deps in env_deployments.items():
156
+ latest = deps[-1]
157
+ deploy_time = latest['timestamp'][:16].replace('T', ' ')
158
+ version = latest.get('version', '?')
159
+
160
+ print(f" {env}: v{version} ({deploy_time})")
161
+
162
+ if verbose:
163
+ print(f" {len(latest.get('agents', []))} agents")
164
+ print(f" {len(latest.get('workflows', []))} workflows")
165
+
166
+ print("")
167
+
168
+ async def _show_cloud_deployments_status(verbose):
169
+ """Show status of cloud deployments via API."""
170
+ import os
171
+ import aiohttp
172
+
173
+ # Get current project name to filter deployments when in project directory
174
+ project_root = find_project_root()
175
+ config = _load_project_config(project_root) if project_root else None
176
+ current_project = config.get('name') if config else None
177
+
178
+ try:
179
+ api_key = os.getenv('DAITA_API_KEY')
180
+ api_endpoint = os.getenv('DAITA_API_ENDPOINT', 'https://ondk4sdyv0.execute-api.us-east-1.amazonaws.com')
181
+
182
+ headers = {
183
+ "Authorization": f"Bearer {api_key}",
184
+ "User-Agent": "Daita-CLI/1.0.0"
185
+ }
186
+
187
+ async with aiohttp.ClientSession() as session:
188
+ url = f"{api_endpoint}/api/v1/deployments/api-key"
189
+ params = {}
190
+ if current_project:
191
+ params['project_name'] = current_project
192
+
193
+ async with session.get(url, headers=headers, params=params, timeout=10) as response:
194
+ if response.status == 200:
195
+ data = await response.json()
196
+
197
+ # Handle paginated response from API
198
+ if isinstance(data, dict) and 'deployments' in data:
199
+ deployments = data['deployments']
200
+ else:
201
+ deployments = data if isinstance(data, list) else []
202
+
203
+ if not deployments:
204
+ if current_project:
205
+ print(f" Cloud Deployments ({current_project}): None")
206
+ print(" Run 'daita push staging' to deploy")
207
+ else:
208
+ print(" Cloud Deployments: None")
209
+ print(" Run 'daita push staging' to deploy")
210
+ print("")
211
+ return
212
+
213
+ # Update header to indicate scope
214
+ if current_project:
215
+ print(f" Cloud Deployments ({current_project}) ({len(deployments)}):")
216
+ else:
217
+ print(f" Cloud Deployments (Organization) ({len(deployments)}):")
218
+
219
+ # Show most recent 5 deployments (API returns newest first)
220
+ recent_deployments = deployments[:5]
221
+
222
+ # Find most recent active deployment
223
+ latest_active = None
224
+ for deployment in deployments:
225
+ if deployment.get('status') == 'active':
226
+ latest_active = deployment
227
+ break
228
+
229
+ for deployment in recent_deployments:
230
+ env = deployment.get('environment', 'unknown')
231
+ version = deployment.get('version', '?')
232
+ status = deployment.get('status', 'unknown')
233
+ deployed_at = deployment.get('deployed_at', '')[:16].replace('T', ' ')
234
+
235
+ # Check if this is the current active deployment
236
+ is_current = (latest_active and
237
+ deployment.get('deployment_id') == latest_active.get('deployment_id'))
238
+ current_text = " (current)" if is_current else ""
239
+
240
+ status_icon = "●" if status == 'active' else "○"
241
+ print(f" {status_icon} {env}: v{version} ({deployed_at}){current_text}")
242
+
243
+ print("")
244
+ else:
245
+ print(" Cloud Deployments: Unable to fetch status")
246
+ print(" Check your internet connection and API key")
247
+ print("")
248
+
249
+ except Exception:
250
+ print(" Cloud Deployments: Connection failed")
251
+ print(" Using local deployment history")
252
+ print("")
253
+
254
+ async def _show_cloud_environment_status(environment, verbose):
255
+ """Show cloud status for specific environment."""
256
+ import os
257
+ import aiohttp
258
+
259
+ # Get current project name to filter deployments when in project directory
260
+ project_root = find_project_root()
261
+ config = _load_project_config(project_root) if project_root else None
262
+ current_project = config.get('name') if config else None
263
+
264
+ try:
265
+ api_key = os.getenv('DAITA_API_KEY')
266
+ api_endpoint = os.getenv('DAITA_API_ENDPOINT', 'https://ondk4sdyv0.execute-api.us-east-1.amazonaws.com')
267
+
268
+ headers = {
269
+ "Authorization": f"Bearer {api_key}",
270
+ "User-Agent": "Daita-CLI/1.0.0"
271
+ }
272
+
273
+ async with aiohttp.ClientSession() as session:
274
+ params = {'environment': environment}
275
+ if current_project:
276
+ params['project_name'] = current_project
277
+
278
+ url = f"{api_endpoint}/api/v1/deployments/api-key"
279
+ async with session.get(url, headers=headers, params=params, timeout=10) as response:
280
+ if response.status == 200:
281
+ data = await response.json()
282
+
283
+ # Handle paginated response from API
284
+ if isinstance(data, dict) and 'deployments' in data:
285
+ deployments = data['deployments']
286
+ else:
287
+ deployments = data if isinstance(data, list) else []
288
+
289
+ if not deployments:
290
+ if current_project:
291
+ print(f" Environment: {environment} ({current_project})")
292
+ print(f" Never deployed to {environment}")
293
+ else:
294
+ print(f" Environment: {environment}")
295
+ print(f" Never deployed to {environment}")
296
+ print("")
297
+ return
298
+
299
+ latest = deployments[-1]
300
+ status = latest.get('status', 'unknown')
301
+ version = latest.get('version', '?')
302
+ deployed_at = latest.get('deployed_at', '')[:19].replace('T', ' ')
303
+
304
+ status_icon = "" if status == 'deployed' else ""
305
+ if current_project:
306
+ print(f" Environment: {environment} ({current_project})")
307
+ else:
308
+ print(f" Environment: {environment}")
309
+ print(f" {status_icon} Status: {status}")
310
+ print(f" Last deployed: {deployed_at}")
311
+ print(f" Version: {version}")
312
+
313
+ if verbose and 'functions' in latest:
314
+ functions = latest['functions']
315
+ print(f" Functions: {len(functions)}")
316
+ for func in functions:
317
+ print(f" • {func.get('name', 'unknown')}")
318
+
319
+ print("")
320
+ else:
321
+ print(f" Environment: {environment}")
322
+ print(" Unable to fetch cloud status")
323
+ print("")
324
+
325
+ except Exception:
326
+ print(f" Environment: {environment}")
327
+ print(" Connection failed - using local history")
328
+ print("")
329
+
330
+ def _show_issues(project_root, config):
331
+ """Show any issues with the project."""
332
+ issues = []
333
+
334
+ # Check for missing files
335
+ required_files = ['daita-project.yaml', 'requirements.txt']
336
+ for file_name in required_files:
337
+ if not (project_root / file_name).exists():
338
+ issues.append(f"Missing {file_name}")
339
+
340
+ # Check for missing agent files
341
+ agents = config.get('agents', [])
342
+ for agent in agents:
343
+ agent_file = project_root / 'agents' / f"{agent['name']}.py"
344
+ if not agent_file.exists():
345
+ issues.append(f"Missing agent file: {agent['name']}.py")
346
+
347
+ # Check for missing workflow files
348
+ workflows = config.get('workflows', [])
349
+ for workflow in workflows:
350
+ workflow_file = project_root / 'workflows' / f"{workflow['name']}.py"
351
+ if not workflow_file.exists():
352
+ issues.append(f"Missing workflow file: {workflow['name']}.py")
353
+
354
+ # Check for LLM API key
355
+ if not _has_api_key():
356
+ issues.append("No LLM API key found (set OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.)")
357
+
358
+ # Show issues
359
+ if issues:
360
+ print(" Issues:")
361
+ for issue in issues:
362
+ print(f" {issue}")
363
+ print("")
364
+ else:
365
+ print(" No issues found")
366
+ print("")
367
+
368
+ # Show helpful commands based on API key status
369
+ import os
370
+ has_daita_key = bool(os.getenv('DAITA_API_KEY'))
371
+
372
+ print(" Quick commands:")
373
+ print(" daita create agent my_agent # Create new agent (free)")
374
+ print(" daita test # Test all components (free)")
375
+ print(" daita test --watch # Development mode (free)")
376
+
377
+ if has_daita_key:
378
+ print(" daita push staging # Deploy to cloud")
379
+ print(" daita logs staging # View cloud logs")
380
+ else:
381
+ print(" ")
382
+ print(" Ready for cloud deployment?")
383
+ print(" Get your API key at daita-tech.io")
384
+
385
+ def _load_deployments(project_root):
386
+ """Load deployment history."""
387
+ deployments_file = project_root / '.daita' / 'deployments.json'
388
+
389
+ if not deployments_file.exists():
390
+ return []
391
+
392
+ try:
393
+ with open(deployments_file, 'r') as f:
394
+ return json.load(f)
395
+ except:
396
+ return []
397
+
398
+ def _load_project_config(project_root):
399
+ """Load project configuration."""
400
+ config_file = project_root / 'daita-project.yaml'
401
+ if not config_file.exists():
402
+ return None
403
+
404
+ try:
405
+ with open(config_file, 'r') as f:
406
+ return yaml.safe_load(f)
407
+ except:
408
+ return None
409
+
410
+ def _has_api_key():
411
+ """Check if API key is configured."""
412
+ import os
413
+ return bool(
414
+ os.getenv('OPENAI_API_KEY') or
415
+ os.getenv('ANTHROPIC_API_KEY') or
416
+ os.getenv('GEMINI_API_KEY')
417
+ )
418
+
419
+ # Alias for backward compatibility
420
+ show_status = show_project_status
421
+
daita/cli/core/test.py ADDED
@@ -0,0 +1,239 @@
1
+ """
2
+ Simple testing for Daita CLI.
3
+ Just runs agents and workflows locally.
4
+ """
5
+ import asyncio
6
+ import json
7
+ import sys
8
+ from pathlib import Path
9
+ import importlib.util
10
+ from ..utils import find_project_root
11
+
12
+ async def run_tests(target=None, data_file=None, watch=False, verbose=False):
13
+ """Run tests for agents or workflows."""
14
+
15
+ # Find project root
16
+ project_root = find_project_root()
17
+ if not project_root:
18
+ raise ValueError("Not in a Daita project. Run 'daita init' first.")
19
+
20
+ # Load test data
21
+ test_data = _load_test_data(project_root, data_file)
22
+
23
+ if target:
24
+ # Test specific agent or workflow
25
+ await _test_single(project_root, target, test_data, verbose)
26
+ else:
27
+ # Test everything
28
+ await _test_all(project_root, test_data, verbose)
29
+
30
+ if watch:
31
+ print(f"\n Watching for changes... (Press Ctrl+C to stop)")
32
+ try:
33
+ while True:
34
+ await asyncio.sleep(1)
35
+ except KeyboardInterrupt:
36
+ print(f"\n Stopped watching")
37
+
38
+ async def _test_single(project_root, target, test_data, verbose):
39
+ """Test a single agent or workflow."""
40
+ print(f" Testing: {target}")
41
+
42
+ # Try to find and load the target
43
+ agent_file = project_root / 'agents' / f'{target}.py'
44
+ workflow_file = project_root / 'workflows' / f'{target}.py'
45
+
46
+ if agent_file.exists():
47
+ await _test_agent(agent_file, target, test_data, verbose)
48
+ elif workflow_file.exists():
49
+ await _test_workflow(workflow_file, target, test_data, verbose)
50
+ else:
51
+ print(f" Not found: {target}")
52
+
53
+ async def _test_all(project_root, test_data, verbose):
54
+ """Test all agents and workflows."""
55
+ agents = _list_python_files(project_root / 'agents')
56
+ workflows = _list_python_files(project_root / 'workflows')
57
+
58
+ print(f" Testing {len(agents)} agents and {len(workflows)} workflows")
59
+
60
+ # Test agents
61
+ for agent in agents:
62
+ agent_file = project_root / 'agents' / f'{agent}.py'
63
+ await _test_agent(agent_file, agent, test_data, verbose)
64
+
65
+ # Test workflows
66
+ for workflow in workflows:
67
+ workflow_file = project_root / 'workflows' / f'{workflow}.py'
68
+ await _test_workflow(workflow_file, workflow, test_data, verbose)
69
+
70
+ async def _test_agent(agent_file, name, test_data, verbose):
71
+ """Test a single agent."""
72
+ try:
73
+ # Load the agent factory function
74
+ try:
75
+ agent_factory = _load_python_file(agent_file, 'create_agent')
76
+ except Exception as e:
77
+ print(f" {name}: Failed to load agent - {str(e)}")
78
+ return
79
+
80
+ # Create agent instance
81
+ try:
82
+ agent_instance = agent_factory()
83
+ except Exception as e:
84
+ print(f" {name}: Failed to create agent instance - {str(e)}")
85
+ if verbose:
86
+ import traceback
87
+ traceback.print_exc()
88
+ return
89
+
90
+ # Test basic processing
91
+ try:
92
+ result = await agent_instance.process("process_data", test_data)
93
+
94
+ # Validate result format
95
+ if not isinstance(result, dict):
96
+ print(f" {name}: Warning - agent returned {type(result).__name__} instead of dict")
97
+
98
+ print(f" {name}: OK")
99
+ if verbose:
100
+ print(f" Status: {result.get('status', 'unknown')}")
101
+ print(f" Result type: {type(result).__name__}")
102
+
103
+ except Exception as e:
104
+ print(f" {name}: Processing failed - {str(e)}")
105
+ if verbose:
106
+ import traceback
107
+ traceback.print_exc()
108
+
109
+ except Exception as e:
110
+ print(f" {name}: Unexpected error - {str(e)}")
111
+ if verbose:
112
+ import traceback
113
+ traceback.print_exc()
114
+
115
+ async def _test_workflow(workflow_file, name, test_data, verbose):
116
+ """Test a single workflow."""
117
+ try:
118
+ # Load the workflow factory function
119
+ try:
120
+ workflow_factory = _load_python_file(workflow_file, 'create_workflow')
121
+ except Exception as e:
122
+ print(f" {name}: Failed to load workflow - {str(e)}")
123
+ return
124
+
125
+ # Create workflow instance
126
+ try:
127
+ workflow_instance = workflow_factory()
128
+ except Exception as e:
129
+ print(f" {name}: Failed to create workflow instance - {str(e)}")
130
+ if verbose:
131
+ import traceback
132
+ traceback.print_exc()
133
+ return
134
+
135
+ # Test workflow run
136
+ try:
137
+ result = await workflow_instance.run(test_data)
138
+
139
+ # Validate result format
140
+ if result is not None and not isinstance(result, dict):
141
+ print(f" {name}: Warning - workflow returned {type(result).__name__} instead of dict")
142
+
143
+ print(f" {name}: OK")
144
+ if verbose:
145
+ print(f" Status: {result.get('status', 'unknown') if result else 'completed'}")
146
+ print(f" Result type: {type(result).__name__ if result else 'None'}")
147
+
148
+ except Exception as e:
149
+ print(f" {name}: Workflow run failed - {str(e)}")
150
+ if verbose:
151
+ import traceback
152
+ traceback.print_exc()
153
+
154
+ except Exception as e:
155
+ print(f" {name}: Unexpected error - {str(e)}")
156
+ if verbose:
157
+ import traceback
158
+ traceback.print_exc()
159
+
160
+ def _load_python_file(file_path, factory_function):
161
+ """Load a Python file and get the factory function."""
162
+ try:
163
+ # Add project root to path
164
+ project_root = file_path.parent.parent
165
+ if str(project_root) not in sys.path:
166
+ sys.path.insert(0, str(project_root))
167
+
168
+ # Check if file exists and is readable
169
+ if not file_path.exists():
170
+ raise FileNotFoundError(f"File not found: {file_path}")
171
+
172
+ if not file_path.is_file():
173
+ raise ValueError(f"Path is not a file: {file_path}")
174
+
175
+ # Load module with better error handling
176
+ spec = importlib.util.spec_from_file_location("module", file_path)
177
+ if spec is None:
178
+ raise ImportError(f"Could not create module spec for {file_path}")
179
+
180
+ module = importlib.util.module_from_spec(spec)
181
+ if module is None:
182
+ raise ImportError(f"Could not create module from spec for {file_path}")
183
+
184
+ # Execute module with error handling
185
+ try:
186
+ spec.loader.exec_module(module)
187
+ except Exception as e:
188
+ raise ImportError(f"Failed to execute module {file_path.name}: {str(e)}")
189
+
190
+ # Get factory function
191
+ if hasattr(module, factory_function):
192
+ return getattr(module, factory_function)
193
+ else:
194
+ # List available functions for better error message
195
+ available_functions = [name for name in dir(module) if callable(getattr(module, name)) and not name.startswith('_')]
196
+ raise ValueError(
197
+ f"No {factory_function}() function found in {file_path.name}. "
198
+ f"Available functions: {available_functions}"
199
+ )
200
+
201
+ except (ImportError, ValueError, FileNotFoundError) as e:
202
+ # Re-raise known errors
203
+ raise
204
+ except Exception as e:
205
+ # Catch any other unexpected errors
206
+ raise RuntimeError(f"Unexpected error loading {file_path.name}: {str(e)}")
207
+
208
+ def _load_test_data(project_root, data_file):
209
+ """Load test data from file or use default."""
210
+ if data_file:
211
+ data_path = Path(data_file)
212
+ if not data_path.is_absolute():
213
+ data_path = project_root / data_path
214
+
215
+ if data_path.exists():
216
+ if data_path.suffix == '.json':
217
+ with open(data_path, 'r') as f:
218
+ return json.load(f)
219
+ else:
220
+ with open(data_path, 'r') as f:
221
+ return f.read()
222
+
223
+ # Use default test data
224
+ return {
225
+ "test": True,
226
+ "message": "Default test data"
227
+ }
228
+
229
+ def _list_python_files(directory):
230
+ """List Python files in a directory (excluding __init__.py)."""
231
+ if not directory.exists():
232
+ return []
233
+
234
+ files = []
235
+ for file in directory.glob('*.py'):
236
+ if file.name != '__init__.py':
237
+ files.append(file.stem)
238
+ return files
239
+