empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,11 +8,7 @@ from datetime import datetime
|
|
|
8
8
|
from typing import Any
|
|
9
9
|
|
|
10
10
|
from empathy_os.telemetry.usage_tracker import UsageTracker
|
|
11
|
-
from empathy_os.workflows.progressive.core import
|
|
12
|
-
ProgressiveWorkflowResult,
|
|
13
|
-
Tier,
|
|
14
|
-
TierResult,
|
|
15
|
-
)
|
|
11
|
+
from empathy_os.workflows.progressive.core import ProgressiveWorkflowResult, Tier, TierResult
|
|
16
12
|
|
|
17
13
|
logger = logging.getLogger(__name__)
|
|
18
14
|
|
|
@@ -135,9 +131,11 @@ class ProgressiveTelemetry:
|
|
|
135
131
|
"all_premium_cost": all_premium_cost,
|
|
136
132
|
"tier_breakdown": tier_breakdown,
|
|
137
133
|
"success": result.success,
|
|
138
|
-
"final_cqs":
|
|
139
|
-
|
|
140
|
-
|
|
134
|
+
"final_cqs": (
|
|
135
|
+
result.final_result.failure_analysis.calculate_quality_score()
|
|
136
|
+
if result.final_result.failure_analysis
|
|
137
|
+
else None
|
|
138
|
+
),
|
|
141
139
|
},
|
|
142
140
|
)
|
|
143
141
|
|
|
@@ -206,9 +204,7 @@ class ProgressiveTelemetry:
|
|
|
206
204
|
},
|
|
207
205
|
)
|
|
208
206
|
|
|
209
|
-
logger.warning(
|
|
210
|
-
f"Budget exceeded: ${current_cost:.3f} > ${max_budget:.3f} ({action})"
|
|
211
|
-
)
|
|
207
|
+
logger.warning(f"Budget exceeded: ${current_cost:.3f} > ${max_budget:.3f} ({action})")
|
|
212
208
|
|
|
213
209
|
except Exception as e:
|
|
214
210
|
logger.warning(f"Failed to track budget exceeded: {e}")
|
|
@@ -234,9 +230,7 @@ class ProgressiveTelemetry:
|
|
|
234
230
|
event = {
|
|
235
231
|
"timestamp": datetime.now().isoformat(),
|
|
236
232
|
"event_type": event_type,
|
|
237
|
-
"user_id_hash": (
|
|
238
|
-
self._hash_user_id(self.user_id) if self.user_id else "anonymous"
|
|
239
|
-
),
|
|
233
|
+
"user_id_hash": (self._hash_user_id(self.user_id) if self.user_id else "anonymous"),
|
|
240
234
|
**data,
|
|
241
235
|
}
|
|
242
236
|
|
|
@@ -90,11 +90,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
90
90
|
logger.info(f"Found {len(functions)} functions to test")
|
|
91
91
|
|
|
92
92
|
# Execute with progressive escalation
|
|
93
|
-
return self._execute_progressive(
|
|
94
|
-
items=functions,
|
|
95
|
-
workflow_name="test-gen",
|
|
96
|
-
**kwargs
|
|
97
|
-
)
|
|
93
|
+
return self._execute_progressive(items=functions, workflow_name="test-gen", **kwargs)
|
|
98
94
|
|
|
99
95
|
def _parse_functions(self, file_path: Path) -> list[dict[str, Any]]:
|
|
100
96
|
"""Parse Python file to extract function definitions.
|
|
@@ -133,18 +129,14 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
133
129
|
"args": [arg.arg for arg in node.args.args],
|
|
134
130
|
"docstring": ast.get_docstring(node) or "",
|
|
135
131
|
"code": ast.unparse(node), # Python 3.9+
|
|
136
|
-
"file": str(file_path)
|
|
132
|
+
"file": str(file_path),
|
|
137
133
|
}
|
|
138
134
|
functions.append(func_info)
|
|
139
135
|
|
|
140
136
|
return functions
|
|
141
137
|
|
|
142
138
|
def _execute_tier_impl(
|
|
143
|
-
self,
|
|
144
|
-
tier: Tier,
|
|
145
|
-
items: list[Any],
|
|
146
|
-
context: dict[str, Any] | None,
|
|
147
|
-
**kwargs
|
|
139
|
+
self, tier: Tier, items: list[Any], context: dict[str, Any] | None, **kwargs
|
|
148
140
|
) -> list[dict[str, Any]]:
|
|
149
141
|
"""Execute test generation at specific tier.
|
|
150
142
|
|
|
@@ -165,11 +157,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
165
157
|
|
|
166
158
|
# Build prompt for this tier (prepared for future LLM integration)
|
|
167
159
|
base_task = self._build_test_gen_task(items)
|
|
168
|
-
_prompt = self.meta_orchestrator.build_tier_prompt( # noqa: F841
|
|
169
|
-
tier,
|
|
170
|
-
base_task,
|
|
171
|
-
context
|
|
172
|
-
)
|
|
160
|
+
_prompt = self.meta_orchestrator.build_tier_prompt(tier, base_task, context) # noqa: F841
|
|
173
161
|
|
|
174
162
|
# TODO: Call LLM API with _prompt
|
|
175
163
|
# For now, simulate test generation
|
|
@@ -202,9 +190,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
202
190
|
return task
|
|
203
191
|
|
|
204
192
|
def _simulate_test_generation(
|
|
205
|
-
self,
|
|
206
|
-
tier: Tier,
|
|
207
|
-
functions: list[dict[str, Any]]
|
|
193
|
+
self, tier: Tier, functions: list[dict[str, Any]]
|
|
208
194
|
) -> list[dict[str, Any]]:
|
|
209
195
|
"""Simulate test generation (placeholder for LLM integration).
|
|
210
196
|
|
|
@@ -232,7 +218,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
232
218
|
_base_quality = { # noqa: F841
|
|
233
219
|
Tier.CHEAP: 70,
|
|
234
220
|
Tier.CAPABLE: 85,
|
|
235
|
-
Tier.PREMIUM: 95
|
|
221
|
+
Tier.PREMIUM: 95,
|
|
236
222
|
}[tier]
|
|
237
223
|
|
|
238
224
|
for func in functions:
|
|
@@ -245,17 +231,19 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
245
231
|
# Calculate quality score
|
|
246
232
|
quality_score = analysis.calculate_quality_score()
|
|
247
233
|
|
|
248
|
-
generated_tests.append(
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
234
|
+
generated_tests.append(
|
|
235
|
+
{
|
|
236
|
+
"function_name": func["name"],
|
|
237
|
+
"test_code": test_code,
|
|
238
|
+
"quality_score": quality_score,
|
|
239
|
+
"passed": analysis.test_pass_rate > 0.5,
|
|
240
|
+
"coverage": analysis.coverage_percent,
|
|
241
|
+
"assertions": analysis.assertion_depth,
|
|
242
|
+
"confidence": analysis.confidence_score,
|
|
243
|
+
"syntax_errors": [str(e) for e in analysis.syntax_errors],
|
|
244
|
+
"error": "" if not analysis.syntax_errors else str(analysis.syntax_errors[0]),
|
|
245
|
+
}
|
|
246
|
+
)
|
|
259
247
|
|
|
260
248
|
return generated_tests
|
|
261
249
|
|
|
@@ -312,11 +300,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
312
300
|
|
|
313
301
|
return "\n ".join(setup_lines)
|
|
314
302
|
|
|
315
|
-
def _analyze_generated_test(
|
|
316
|
-
self,
|
|
317
|
-
test_code: str,
|
|
318
|
-
func: dict[str, Any]
|
|
319
|
-
) -> FailureAnalysis:
|
|
303
|
+
def _analyze_generated_test(self, test_code: str, func: dict[str, Any]) -> FailureAnalysis:
|
|
320
304
|
"""Analyze quality of generated test.
|
|
321
305
|
|
|
322
306
|
Args:
|
|
@@ -338,10 +322,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
338
322
|
# 2. Count assertions
|
|
339
323
|
try:
|
|
340
324
|
tree = ast.parse(test_code)
|
|
341
|
-
assertion_count = sum(
|
|
342
|
-
1 for node in ast.walk(tree)
|
|
343
|
-
if isinstance(node, ast.Assert)
|
|
344
|
-
)
|
|
325
|
+
assertion_count = sum(1 for node in ast.walk(tree) if isinstance(node, ast.Assert))
|
|
345
326
|
analysis.assertion_depth = assertion_count
|
|
346
327
|
except Exception as e:
|
|
347
328
|
logger.warning(f"Failed to count assertions: {e}")
|
|
@@ -378,7 +359,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
378
359
|
generated_items=[],
|
|
379
360
|
failure_analysis=FailureAnalysis(),
|
|
380
361
|
cost=0.0,
|
|
381
|
-
duration=0.0
|
|
362
|
+
duration=0.0,
|
|
382
363
|
)
|
|
383
364
|
|
|
384
365
|
task_id = f"{workflow_name}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
@@ -390,7 +371,7 @@ class ProgressiveTestGenWorkflow(ProgressiveWorkflow):
|
|
|
390
371
|
final_result=empty_result,
|
|
391
372
|
total_cost=0.0,
|
|
392
373
|
total_duration=0.0,
|
|
393
|
-
success=False
|
|
374
|
+
success=False,
|
|
394
375
|
)
|
|
395
376
|
|
|
396
377
|
|
|
@@ -416,7 +397,7 @@ def execute_test_file(test_file: Path) -> dict[str, Any]:
|
|
|
416
397
|
["pytest", str(test_file), "-v", "--tb=short"],
|
|
417
398
|
capture_output=True,
|
|
418
399
|
text=True,
|
|
419
|
-
timeout=60
|
|
400
|
+
timeout=60,
|
|
420
401
|
)
|
|
421
402
|
|
|
422
403
|
# Parse pytest output to get pass/fail counts
|
|
@@ -435,7 +416,7 @@ def execute_test_file(test_file: Path) -> dict[str, Any]:
|
|
|
435
416
|
"total": total,
|
|
436
417
|
"pass_rate": pass_rate,
|
|
437
418
|
"output": output,
|
|
438
|
-
"returncode": result.returncode
|
|
419
|
+
"returncode": result.returncode,
|
|
439
420
|
}
|
|
440
421
|
|
|
441
422
|
except subprocess.TimeoutExpired:
|
|
@@ -445,7 +426,7 @@ def execute_test_file(test_file: Path) -> dict[str, Any]:
|
|
|
445
426
|
"total": 0,
|
|
446
427
|
"pass_rate": 0.0,
|
|
447
428
|
"output": "Test execution timed out",
|
|
448
|
-
"returncode": -1
|
|
429
|
+
"returncode": -1,
|
|
449
430
|
}
|
|
450
431
|
except Exception as e:
|
|
451
432
|
logger.error(f"Failed to execute tests: {e}")
|
|
@@ -455,7 +436,7 @@ def execute_test_file(test_file: Path) -> dict[str, Any]:
|
|
|
455
436
|
"total": 0,
|
|
456
437
|
"pass_rate": 0.0,
|
|
457
438
|
"output": str(e),
|
|
458
|
-
"returncode": -1
|
|
439
|
+
"returncode": -1,
|
|
459
440
|
}
|
|
460
441
|
|
|
461
442
|
|
|
@@ -484,12 +465,12 @@ def calculate_coverage(test_file: Path, source_file: Path) -> float:
|
|
|
484
465
|
str(test_file),
|
|
485
466
|
f"--cov={source_file.stem}",
|
|
486
467
|
"--cov-report=term-missing",
|
|
487
|
-
"--no-cov-on-fail"
|
|
468
|
+
"--no-cov-on-fail",
|
|
488
469
|
],
|
|
489
470
|
capture_output=True,
|
|
490
471
|
text=True,
|
|
491
472
|
timeout=60,
|
|
492
|
-
cwd=source_file.parent
|
|
473
|
+
cwd=source_file.parent,
|
|
493
474
|
)
|
|
494
475
|
|
|
495
476
|
output = result.stdout + result.stderr
|
|
@@ -24,11 +24,13 @@ logger = logging.getLogger(__name__)
|
|
|
24
24
|
|
|
25
25
|
class BudgetExceededError(Exception):
|
|
26
26
|
"""Raised when execution cost exceeds configured budget."""
|
|
27
|
+
|
|
27
28
|
pass
|
|
28
29
|
|
|
29
30
|
|
|
30
31
|
class UserCancelledError(Exception):
|
|
31
32
|
"""Raised when user cancels execution during approval prompt."""
|
|
33
|
+
|
|
32
34
|
pass
|
|
33
35
|
|
|
34
36
|
|
|
@@ -96,10 +98,7 @@ class ProgressiveWorkflow:
|
|
|
96
98
|
raise NotImplementedError("Subclasses must implement execute()")
|
|
97
99
|
|
|
98
100
|
def _execute_progressive(
|
|
99
|
-
self,
|
|
100
|
-
items: list[Any],
|
|
101
|
-
workflow_name: str,
|
|
102
|
-
**kwargs
|
|
101
|
+
self, items: list[Any], workflow_name: str, **kwargs
|
|
103
102
|
) -> ProgressiveWorkflowResult:
|
|
104
103
|
"""Execute items with progressive tier escalation.
|
|
105
104
|
|
|
@@ -129,8 +128,7 @@ class ProgressiveWorkflow:
|
|
|
129
128
|
# Estimate cost and request approval
|
|
130
129
|
estimated_cost = self._estimate_total_cost(len(items))
|
|
131
130
|
if not self._request_approval(
|
|
132
|
-
f"Execute {workflow_name} on {len(items)} items",
|
|
133
|
-
estimated_cost
|
|
131
|
+
f"Execute {workflow_name} on {len(items)} items", estimated_cost
|
|
134
132
|
):
|
|
135
133
|
raise UserCancelledError("User declined to proceed")
|
|
136
134
|
|
|
@@ -140,17 +138,10 @@ class ProgressiveWorkflow:
|
|
|
140
138
|
context: dict[str, Any] | None = None
|
|
141
139
|
|
|
142
140
|
while remaining_items and current_tier:
|
|
143
|
-
logger.info(
|
|
144
|
-
f"Executing {len(remaining_items)} items at {current_tier.value} tier"
|
|
145
|
-
)
|
|
141
|
+
logger.info(f"Executing {len(remaining_items)} items at {current_tier.value} tier")
|
|
146
142
|
|
|
147
143
|
# Execute at current tier
|
|
148
|
-
tier_result = self._execute_tier(
|
|
149
|
-
current_tier,
|
|
150
|
-
remaining_items,
|
|
151
|
-
context,
|
|
152
|
-
**kwargs
|
|
153
|
-
)
|
|
144
|
+
tier_result = self._execute_tier(current_tier, remaining_items, context, **kwargs)
|
|
154
145
|
|
|
155
146
|
self.tier_results.append(tier_result)
|
|
156
147
|
|
|
@@ -167,12 +158,10 @@ class ProgressiveWorkflow:
|
|
|
167
158
|
|
|
168
159
|
# Separate successful and failed items
|
|
169
160
|
successful = [
|
|
170
|
-
item for item in tier_result.generated_items
|
|
171
|
-
if item.get("quality_score", 0) >= 80
|
|
161
|
+
item for item in tier_result.generated_items if item.get("quality_score", 0) >= 80
|
|
172
162
|
]
|
|
173
163
|
failed = [
|
|
174
|
-
item for item in tier_result.generated_items
|
|
175
|
-
if item.get("quality_score", 0) < 80
|
|
164
|
+
item for item in tier_result.generated_items if item.get("quality_score", 0) < 80
|
|
176
165
|
]
|
|
177
166
|
|
|
178
167
|
logger.info(
|
|
@@ -189,9 +178,7 @@ class ProgressiveWorkflow:
|
|
|
189
178
|
break
|
|
190
179
|
|
|
191
180
|
should_escalate, reason = self._should_escalate(
|
|
192
|
-
current_tier,
|
|
193
|
-
tier_result,
|
|
194
|
-
attempt=tier_result.attempt
|
|
181
|
+
current_tier, tier_result, attempt=tier_result.attempt
|
|
195
182
|
)
|
|
196
183
|
|
|
197
184
|
if should_escalate:
|
|
@@ -230,16 +217,13 @@ class ProgressiveWorkflow:
|
|
|
230
217
|
"previous_cqs": tier_result.quality_score,
|
|
231
218
|
"failures": failed,
|
|
232
219
|
"examples": tier_result.generated_items[-3:], # Last 3 attempts
|
|
233
|
-
"reason": reason
|
|
220
|
+
"reason": reason,
|
|
234
221
|
}
|
|
235
222
|
|
|
236
223
|
# Request approval for escalation
|
|
237
224
|
escalation_cost = self._estimate_tier_cost(next_tier, len(remaining_items))
|
|
238
225
|
if not self._request_escalation_approval(
|
|
239
|
-
current_tier,
|
|
240
|
-
next_tier,
|
|
241
|
-
len(remaining_items),
|
|
242
|
-
escalation_cost
|
|
226
|
+
current_tier, next_tier, len(remaining_items), escalation_cost
|
|
243
227
|
):
|
|
244
228
|
logger.info("User declined escalation, stopping")
|
|
245
229
|
break
|
|
@@ -265,7 +249,7 @@ class ProgressiveWorkflow:
|
|
|
265
249
|
final_result=self.tier_results[-1],
|
|
266
250
|
total_cost=sum(r.cost for r in self.tier_results),
|
|
267
251
|
total_duration=sum(r.duration for r in self.tier_results),
|
|
268
|
-
success=len(remaining_items) == 0
|
|
252
|
+
success=len(remaining_items) == 0,
|
|
269
253
|
)
|
|
270
254
|
|
|
271
255
|
# Track workflow completion in telemetry
|
|
@@ -275,10 +259,7 @@ class ProgressiveWorkflow:
|
|
|
275
259
|
return result
|
|
276
260
|
|
|
277
261
|
def _execute_single_tier(
|
|
278
|
-
self,
|
|
279
|
-
items: list[Any],
|
|
280
|
-
workflow_name: str,
|
|
281
|
-
**kwargs
|
|
262
|
+
self, items: list[Any], workflow_name: str, **kwargs
|
|
282
263
|
) -> ProgressiveWorkflowResult:
|
|
283
264
|
"""Execute without progressive escalation (single tier).
|
|
284
265
|
|
|
@@ -307,15 +288,11 @@ class ProgressiveWorkflow:
|
|
|
307
288
|
final_result=tier_result,
|
|
308
289
|
total_cost=tier_result.cost,
|
|
309
290
|
total_duration=tier_result.duration,
|
|
310
|
-
success=tier_result.quality_score >= 80
|
|
291
|
+
success=tier_result.quality_score >= 80,
|
|
311
292
|
)
|
|
312
293
|
|
|
313
294
|
def _execute_tier(
|
|
314
|
-
self,
|
|
315
|
-
tier: Tier,
|
|
316
|
-
items: list[Any],
|
|
317
|
-
context: dict[str, Any] | None,
|
|
318
|
-
**kwargs
|
|
295
|
+
self, tier: Tier, items: list[Any], context: dict[str, Any] | None, **kwargs
|
|
319
296
|
) -> TierResult:
|
|
320
297
|
"""Execute items at a specific tier.
|
|
321
298
|
|
|
@@ -353,7 +330,7 @@ class ProgressiveWorkflow:
|
|
|
353
330
|
generated_items=generated_items,
|
|
354
331
|
failure_analysis=failure_analysis,
|
|
355
332
|
cost=cost,
|
|
356
|
-
duration=duration
|
|
333
|
+
duration=duration,
|
|
357
334
|
)
|
|
358
335
|
|
|
359
336
|
except Exception as e:
|
|
@@ -370,15 +347,11 @@ class ProgressiveWorkflow:
|
|
|
370
347
|
cost=0.0,
|
|
371
348
|
duration=duration,
|
|
372
349
|
escalated=True,
|
|
373
|
-
escalation_reason=f"Execution error: {str(e)}"
|
|
350
|
+
escalation_reason=f"Execution error: {str(e)}",
|
|
374
351
|
)
|
|
375
352
|
|
|
376
353
|
def _execute_tier_impl(
|
|
377
|
-
self,
|
|
378
|
-
tier: Tier,
|
|
379
|
-
items: list[Any],
|
|
380
|
-
context: dict[str, Any] | None,
|
|
381
|
-
**kwargs
|
|
354
|
+
self, tier: Tier, items: list[Any], context: dict[str, Any] | None, **kwargs
|
|
382
355
|
) -> list[dict[str, Any]]:
|
|
383
356
|
"""Execute items at specific tier (to be implemented by subclasses).
|
|
384
357
|
|
|
@@ -419,15 +392,10 @@ class ProgressiveWorkflow:
|
|
|
419
392
|
test_pass_rate=passed / total_items if total_items > 0 else 0.0,
|
|
420
393
|
coverage_percent=avg_coverage,
|
|
421
394
|
assertion_depth=avg_assertions,
|
|
422
|
-
confidence_score=avg_confidence
|
|
395
|
+
confidence_score=avg_confidence,
|
|
423
396
|
)
|
|
424
397
|
|
|
425
|
-
def _should_escalate(
|
|
426
|
-
self,
|
|
427
|
-
tier: Tier,
|
|
428
|
-
result: TierResult,
|
|
429
|
-
attempt: int
|
|
430
|
-
) -> tuple[bool, str]:
|
|
398
|
+
def _should_escalate(self, tier: Tier, result: TierResult, attempt: int) -> tuple[bool, str]:
|
|
431
399
|
"""Determine if escalation is needed.
|
|
432
400
|
|
|
433
401
|
Uses meta-orchestrator to make intelligent escalation decisions
|
|
@@ -441,12 +409,7 @@ class ProgressiveWorkflow:
|
|
|
441
409
|
Returns:
|
|
442
410
|
Tuple of (should_escalate, reason)
|
|
443
411
|
"""
|
|
444
|
-
return self.meta_orchestrator.should_escalate(
|
|
445
|
-
tier,
|
|
446
|
-
result,
|
|
447
|
-
attempt,
|
|
448
|
-
self.config
|
|
449
|
-
)
|
|
412
|
+
return self.meta_orchestrator.should_escalate(tier, result, attempt, self.config)
|
|
450
413
|
|
|
451
414
|
def _get_next_tier(self, current_tier: Tier) -> Tier | None:
|
|
452
415
|
"""Get the next tier in the progression.
|
|
@@ -496,9 +459,9 @@ class ProgressiveWorkflow:
|
|
|
496
459
|
"""
|
|
497
460
|
# Cost per item (approximate, based on typical token usage)
|
|
498
461
|
COST_PER_ITEM = {
|
|
499
|
-
Tier.CHEAP: 0.003,
|
|
500
|
-
Tier.CAPABLE: 0.015,
|
|
501
|
-
Tier.PREMIUM: 0.05
|
|
462
|
+
Tier.CHEAP: 0.003, # ~$0.003 per item (gpt-4o-mini)
|
|
463
|
+
Tier.CAPABLE: 0.015, # ~$0.015 per item (claude-3-5-sonnet)
|
|
464
|
+
Tier.PREMIUM: 0.05, # ~$0.05 per item (claude-opus-4)
|
|
502
465
|
}
|
|
503
466
|
|
|
504
467
|
return COST_PER_ITEM[tier] * item_count
|
|
@@ -530,7 +493,9 @@ class ProgressiveWorkflow:
|
|
|
530
493
|
"""
|
|
531
494
|
# Check auto-approve threshold
|
|
532
495
|
if self.config.auto_approve_under and estimated_cost <= self.config.auto_approve_under:
|
|
533
|
-
logger.info(
|
|
496
|
+
logger.info(
|
|
497
|
+
f"Auto-approved: ${estimated_cost:.2f} <= ${self.config.auto_approve_under:.2f}"
|
|
498
|
+
)
|
|
534
499
|
return True
|
|
535
500
|
|
|
536
501
|
# Check if under default threshold ($1.00)
|
|
@@ -546,14 +511,10 @@ class ProgressiveWorkflow:
|
|
|
546
511
|
print()
|
|
547
512
|
|
|
548
513
|
response = input("Proceed? [y/N]: ").strip().lower()
|
|
549
|
-
return response ==
|
|
514
|
+
return response == "y"
|
|
550
515
|
|
|
551
516
|
def _request_escalation_approval(
|
|
552
|
-
self,
|
|
553
|
-
from_tier: Tier,
|
|
554
|
-
to_tier: Tier,
|
|
555
|
-
item_count: int,
|
|
556
|
-
additional_cost: float
|
|
517
|
+
self, from_tier: Tier, to_tier: Tier, item_count: int, additional_cost: float
|
|
557
518
|
) -> bool:
|
|
558
519
|
"""Request approval for tier escalation.
|
|
559
520
|
|
|
@@ -580,7 +541,7 @@ class ProgressiveWorkflow:
|
|
|
580
541
|
print()
|
|
581
542
|
|
|
582
543
|
response = input("Proceed? [Y/n]: ").strip().lower()
|
|
583
|
-
return response !=
|
|
544
|
+
return response != "n"
|
|
584
545
|
|
|
585
546
|
def _check_budget(self) -> None:
|
|
586
547
|
"""Check if budget has been exceeded.
|
|
@@ -622,7 +583,7 @@ class ProgressiveWorkflow:
|
|
|
622
583
|
MODEL_MAP = {
|
|
623
584
|
Tier.CHEAP: "gpt-4o-mini",
|
|
624
585
|
Tier.CAPABLE: "claude-3-5-sonnet",
|
|
625
|
-
Tier.PREMIUM: "claude-opus-4"
|
|
586
|
+
Tier.PREMIUM: "claude-opus-4",
|
|
626
587
|
}
|
|
627
588
|
|
|
628
589
|
return MODEL_MAP.get(tier, "claude-3-5-sonnet")
|
|
@@ -321,12 +321,27 @@ class ReleasePreparationWorkflow(BaseWorkflow):
|
|
|
321
321
|
|
|
322
322
|
Falls back gracefully if SecurityAuditCrew is not available.
|
|
323
323
|
"""
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
324
|
+
try:
|
|
325
|
+
from .security_adapters import (
|
|
326
|
+
_check_crew_available,
|
|
327
|
+
_get_crew_audit,
|
|
328
|
+
crew_report_to_workflow_format,
|
|
329
|
+
merge_security_results,
|
|
330
|
+
)
|
|
331
|
+
except ImportError:
|
|
332
|
+
# Security adapters removed - return fallback
|
|
333
|
+
return (
|
|
334
|
+
{
|
|
335
|
+
"crew_security": {
|
|
336
|
+
"available": False,
|
|
337
|
+
"fallback": True,
|
|
338
|
+
"reason": "Security adapters not installed",
|
|
339
|
+
},
|
|
340
|
+
**input_data,
|
|
341
|
+
},
|
|
342
|
+
0,
|
|
343
|
+
0,
|
|
344
|
+
)
|
|
330
345
|
|
|
331
346
|
target_path = input_data.get("path", ".")
|
|
332
347
|
existing_security = input_data.get("security", {})
|
|
@@ -160,11 +160,18 @@ class SecureReleasePipeline:
|
|
|
160
160
|
SecureReleaseResult with combined analysis
|
|
161
161
|
|
|
162
162
|
"""
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
163
|
+
try:
|
|
164
|
+
from .security_adapters import (
|
|
165
|
+
_check_crew_available,
|
|
166
|
+
_get_crew_audit,
|
|
167
|
+
crew_report_to_workflow_format,
|
|
168
|
+
)
|
|
169
|
+
adapters_available = True
|
|
170
|
+
except ImportError:
|
|
171
|
+
adapters_available = False
|
|
172
|
+
_check_crew_available = lambda: False
|
|
173
|
+
_get_crew_audit = None
|
|
174
|
+
crew_report_to_workflow_format = None
|
|
168
175
|
|
|
169
176
|
started_at = datetime.now()
|
|
170
177
|
|
|
@@ -181,7 +188,7 @@ class SecureReleasePipeline:
|
|
|
181
188
|
try:
|
|
182
189
|
# Step 1: SecurityAuditCrew (parallel or first)
|
|
183
190
|
crew_task = None
|
|
184
|
-
crew_enabled = self.use_crew and _check_crew_available()
|
|
191
|
+
crew_enabled = self.use_crew and adapters_available and _check_crew_available()
|
|
185
192
|
|
|
186
193
|
if crew_enabled:
|
|
187
194
|
if self.parallel_crew:
|
|
@@ -58,7 +58,7 @@ SKIP_DIRECTORIES = {
|
|
|
58
58
|
".coverage", # Coverage data
|
|
59
59
|
"vscode-extension", # VSCode extension code (separate security review)
|
|
60
60
|
"vscode-memory-panel", # VSCode panel code
|
|
61
|
-
"
|
|
61
|
+
"workflow-dashboard", # Dashboard build
|
|
62
62
|
}
|
|
63
63
|
|
|
64
64
|
# Patterns that indicate a line is DETECTION code, not vulnerable code
|
|
@@ -672,7 +672,12 @@ class SecurityAuditWorkflow(BaseWorkflow):
|
|
|
672
672
|
|
|
673
673
|
Supports XML-enhanced prompts when enabled in workflow config.
|
|
674
674
|
"""
|
|
675
|
-
|
|
675
|
+
try:
|
|
676
|
+
from .security_adapters import _check_crew_available
|
|
677
|
+
adapters_available = True
|
|
678
|
+
except ImportError:
|
|
679
|
+
adapters_available = False
|
|
680
|
+
_check_crew_available = lambda: False
|
|
676
681
|
|
|
677
682
|
assessment = input_data.get("assessment", {})
|
|
678
683
|
critical = assessment.get("critical_findings", [])
|
|
@@ -683,7 +688,7 @@ class SecurityAuditWorkflow(BaseWorkflow):
|
|
|
683
688
|
crew_enhanced = False
|
|
684
689
|
|
|
685
690
|
# Try crew-based remediation first if enabled
|
|
686
|
-
if self.use_crew_for_remediation and _check_crew_available():
|
|
691
|
+
if self.use_crew_for_remediation and adapters_available and _check_crew_available():
|
|
687
692
|
crew_remediation = await self._get_crew_remediation(target, critical + high, assessment)
|
|
688
693
|
if crew_remediation:
|
|
689
694
|
crew_enhanced = True
|
|
@@ -11,6 +11,7 @@ to analyze coverage gaps, generate tests, and validate improvements.
|
|
|
11
11
|
Copyright 2025 Smart AI Memory, LLC
|
|
12
12
|
Licensed under Fair Source 0.9
|
|
13
13
|
"""
|
|
14
|
+
|
|
14
15
|
import asyncio
|
|
15
16
|
import json
|
|
16
17
|
import re
|
|
@@ -375,7 +376,7 @@ CRITICAL FORMATTING RULES:
|
|
|
375
376
|
|
|
376
377
|
# Limit code size to avoid token bloat (max ~5000 chars per file)
|
|
377
378
|
if len(code) > 5000:
|
|
378
|
-
code = code[:5000] + f"\n... (truncated, {len(code)-5000} more chars)"
|
|
379
|
+
code = code[:5000] + f"\n... (truncated, {len(code) - 5000} more chars)"
|
|
379
380
|
|
|
380
381
|
result.append(
|
|
381
382
|
{
|
|
@@ -814,7 +815,7 @@ CRITICAL FORMATTING RULES:
|
|
|
814
815
|
lines.append(f"Tests Passing: {result.tests_passing}")
|
|
815
816
|
lines.append("")
|
|
816
817
|
lines.append(f"Cost: ${result.cost:.4f}")
|
|
817
|
-
lines.append(f"Duration: {result.duration_ms}ms ({result.duration_ms/1000:.1f}s)")
|
|
818
|
+
lines.append(f"Duration: {result.duration_ms}ms ({result.duration_ms / 1000:.1f}s)")
|
|
818
819
|
lines.append("")
|
|
819
820
|
|
|
820
821
|
if result.gaps_analyzed:
|
|
@@ -562,7 +562,7 @@ def _find_test_file(source_file: str) -> str | None:
|
|
|
562
562
|
if "src" in source_path.parts:
|
|
563
563
|
try:
|
|
564
564
|
src_idx = source_path.parts.index("src")
|
|
565
|
-
rel_parts = source_path.parts[src_idx + 1
|
|
565
|
+
rel_parts = source_path.parts[src_idx + 1 : -1] # Exclude src and filename
|
|
566
566
|
if len(rel_parts) >= 2:
|
|
567
567
|
# e.g., ('empathy_os', 'models') -> module_name = 'models'
|
|
568
568
|
module_name = rel_parts[-1]
|
|
@@ -572,19 +572,23 @@ def _find_test_file(source_file: str) -> str | None:
|
|
|
572
572
|
# Priority 1: Module-specific test directory
|
|
573
573
|
# e.g., src/empathy_os/models/registry.py -> tests/unit/models/test_registry.py
|
|
574
574
|
if module_name:
|
|
575
|
-
patterns.extend(
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
575
|
+
patterns.extend(
|
|
576
|
+
[
|
|
577
|
+
Path("tests") / "unit" / module_name / f"test_{filename}.py",
|
|
578
|
+
Path("tests") / module_name / f"test_{filename}.py",
|
|
579
|
+
Path("tests") / "integration" / module_name / f"test_{filename}.py",
|
|
580
|
+
]
|
|
581
|
+
)
|
|
580
582
|
|
|
581
583
|
# Priority 2: Standard locations
|
|
582
|
-
patterns.extend(
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
584
|
+
patterns.extend(
|
|
585
|
+
[
|
|
586
|
+
Path("tests") / "unit" / f"test_{filename}.py",
|
|
587
|
+
Path("tests") / f"test_{filename}.py",
|
|
588
|
+
Path("tests") / "integration" / f"test_{filename}.py",
|
|
589
|
+
parent / f"test_{filename}.py",
|
|
590
|
+
]
|
|
591
|
+
)
|
|
588
592
|
|
|
589
593
|
# Check explicit patterns first
|
|
590
594
|
for pattern in patterns:
|