crackerjack 0.31.18__py3-none-any.whl → 0.33.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +71 -452
- crackerjack/__main__.py +1 -1
- crackerjack/agents/refactoring_agent.py +67 -46
- crackerjack/cli/handlers.py +7 -7
- crackerjack/config/hooks.py +36 -6
- crackerjack/core/async_workflow_orchestrator.py +2 -2
- crackerjack/core/enhanced_container.py +67 -0
- crackerjack/core/phase_coordinator.py +211 -44
- crackerjack/core/workflow_orchestrator.py +723 -72
- crackerjack/dynamic_config.py +1 -25
- crackerjack/managers/publish_manager.py +22 -5
- crackerjack/managers/test_command_builder.py +19 -13
- crackerjack/managers/test_manager.py +15 -4
- crackerjack/mcp/server_core.py +162 -34
- crackerjack/mcp/tools/core_tools.py +1 -1
- crackerjack/mcp/tools/execution_tools.py +16 -3
- crackerjack/mcp/tools/workflow_executor.py +130 -40
- crackerjack/mixins/__init__.py +5 -0
- crackerjack/mixins/error_handling.py +214 -0
- crackerjack/models/config.py +9 -0
- crackerjack/models/protocols.py +114 -0
- crackerjack/models/task.py +3 -0
- crackerjack/security/__init__.py +1 -0
- crackerjack/security/audit.py +226 -0
- crackerjack/services/config.py +3 -2
- crackerjack/services/config_merge.py +11 -5
- crackerjack/services/coverage_ratchet.py +22 -0
- crackerjack/services/git.py +121 -22
- crackerjack/services/initialization.py +25 -9
- crackerjack/services/memory_optimizer.py +477 -0
- crackerjack/services/parallel_executor.py +474 -0
- crackerjack/services/performance_benchmarks.py +292 -577
- crackerjack/services/performance_cache.py +443 -0
- crackerjack/services/performance_monitor.py +633 -0
- crackerjack/services/security.py +63 -0
- crackerjack/services/security_logger.py +9 -1
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/tools/validate_regex_patterns.py +14 -0
- {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/METADATA +2 -2
- {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/RECORD +43 -34
- {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import time
|
|
2
3
|
import typing as t
|
|
3
4
|
from pathlib import Path
|
|
@@ -13,6 +14,12 @@ from crackerjack.services.logging import (
|
|
|
13
14
|
get_logger,
|
|
14
15
|
setup_structured_logging,
|
|
15
16
|
)
|
|
17
|
+
from crackerjack.services.memory_optimizer import get_memory_optimizer, memory_optimized
|
|
18
|
+
from crackerjack.services.performance_cache import get_performance_cache
|
|
19
|
+
from crackerjack.services.performance_monitor import (
|
|
20
|
+
get_performance_monitor,
|
|
21
|
+
phase_monitor,
|
|
22
|
+
)
|
|
16
23
|
|
|
17
24
|
from .phase_coordinator import PhaseCoordinator
|
|
18
25
|
from .session_coordinator import SessionCoordinator
|
|
@@ -40,10 +47,16 @@ class WorkflowPipeline:
|
|
|
40
47
|
self.session = session
|
|
41
48
|
self.phases = phases
|
|
42
49
|
self._mcp_state_manager: t.Any = None
|
|
50
|
+
self._last_security_audit: t.Any = None # Store security audit report
|
|
43
51
|
|
|
44
52
|
self.logger = get_logger("crackerjack.pipeline")
|
|
45
53
|
self._debugger = None
|
|
46
54
|
|
|
55
|
+
# Performance optimization services
|
|
56
|
+
self._performance_monitor = get_performance_monitor()
|
|
57
|
+
self._memory_optimizer = get_memory_optimizer()
|
|
58
|
+
self._cache = get_performance_cache()
|
|
59
|
+
|
|
47
60
|
@property
|
|
48
61
|
def debugger(self):
|
|
49
62
|
if self._debugger is None:
|
|
@@ -55,7 +68,16 @@ class WorkflowPipeline:
|
|
|
55
68
|
|
|
56
69
|
return os.environ.get("AI_AGENT_DEBUG", "0") == "1"
|
|
57
70
|
|
|
71
|
+
@memory_optimized
|
|
58
72
|
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
73
|
+
workflow_id = f"workflow_{int(time.time())}"
|
|
74
|
+
|
|
75
|
+
# Start performance monitoring
|
|
76
|
+
self._performance_monitor.start_workflow(workflow_id)
|
|
77
|
+
|
|
78
|
+
# Start cache service if not already running
|
|
79
|
+
await self._cache.start()
|
|
80
|
+
|
|
59
81
|
with LoggingContext(
|
|
60
82
|
"workflow_execution",
|
|
61
83
|
testing=getattr(options, "test", False),
|
|
@@ -65,17 +87,34 @@ class WorkflowPipeline:
|
|
|
65
87
|
self._initialize_workflow_session(options)
|
|
66
88
|
|
|
67
89
|
try:
|
|
68
|
-
success = await self._execute_workflow_with_timing(
|
|
90
|
+
success = await self._execute_workflow_with_timing(
|
|
91
|
+
options, start_time, workflow_id
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Finalize performance monitoring
|
|
95
|
+
workflow_perf = self._performance_monitor.end_workflow(
|
|
96
|
+
workflow_id, success
|
|
97
|
+
)
|
|
98
|
+
self.logger.info(
|
|
99
|
+
f"Workflow performance: {workflow_perf.performance_score:.1f} score, "
|
|
100
|
+
f"{workflow_perf.total_duration_seconds:.2f}s duration"
|
|
101
|
+
)
|
|
102
|
+
|
|
69
103
|
return success
|
|
70
104
|
|
|
71
105
|
except KeyboardInterrupt:
|
|
106
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
72
107
|
return self._handle_user_interruption()
|
|
73
108
|
|
|
74
109
|
except Exception as e:
|
|
110
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
75
111
|
return self._handle_workflow_exception(e)
|
|
76
112
|
|
|
77
113
|
finally:
|
|
78
114
|
self.session.cleanup_resources()
|
|
115
|
+
# Optimize memory after workflow completion
|
|
116
|
+
self._memory_optimizer.optimize_memory()
|
|
117
|
+
await self._cache.stop()
|
|
79
118
|
|
|
80
119
|
def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
|
|
81
120
|
self.session.initialize_session_tracking(options)
|
|
@@ -112,9 +151,9 @@ class WorkflowPipeline:
|
|
|
112
151
|
)
|
|
113
152
|
|
|
114
153
|
async def _execute_workflow_with_timing(
|
|
115
|
-
self, options: OptionsProtocol, start_time: float
|
|
154
|
+
self, options: OptionsProtocol, start_time: float, workflow_id: str
|
|
116
155
|
) -> bool:
|
|
117
|
-
success = await self._execute_workflow_phases(options)
|
|
156
|
+
success = await self._execute_workflow_phases(options, workflow_id)
|
|
118
157
|
self.session.finalize_session(start_time, success)
|
|
119
158
|
|
|
120
159
|
duration = time.time() - start_time
|
|
@@ -160,56 +199,144 @@ class WorkflowPipeline:
|
|
|
160
199
|
)
|
|
161
200
|
return False
|
|
162
201
|
|
|
163
|
-
async def _execute_workflow_phases(
|
|
202
|
+
async def _execute_workflow_phases(
|
|
203
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
204
|
+
) -> bool:
|
|
205
|
+
"""Execute all workflow phases with proper security gates and performance monitoring."""
|
|
164
206
|
success = True
|
|
165
|
-
self.phases.run_configuration_phase(options)
|
|
166
207
|
|
|
167
|
-
#
|
|
168
|
-
|
|
169
|
-
|
|
208
|
+
# Configuration phase with monitoring
|
|
209
|
+
with phase_monitor(workflow_id, "configuration"):
|
|
210
|
+
config_success = self.phases.run_configuration_phase(options)
|
|
211
|
+
success = success and config_success
|
|
212
|
+
|
|
213
|
+
# Execute quality phase (includes testing and comprehensive checks)
|
|
214
|
+
quality_success = await self._execute_quality_phase(options, workflow_id)
|
|
215
|
+
if not quality_success:
|
|
170
216
|
success = False
|
|
171
|
-
|
|
172
|
-
|
|
217
|
+
# For publishing workflows, enforce security gates
|
|
218
|
+
if self._is_publishing_workflow(options):
|
|
219
|
+
return False # Exit early - publishing requires ALL quality checks
|
|
220
|
+
|
|
221
|
+
# Execute publishing workflow if requested
|
|
222
|
+
if not await self._execute_publishing_workflow(options, workflow_id):
|
|
173
223
|
success = False
|
|
174
|
-
self.session.fail_task("workflow", "Publishing failed")
|
|
175
224
|
return False
|
|
176
|
-
|
|
225
|
+
|
|
226
|
+
# Execute commit workflow if requested
|
|
227
|
+
if not await self._execute_commit_workflow(options, workflow_id):
|
|
177
228
|
success = False
|
|
178
229
|
|
|
179
230
|
return success
|
|
180
231
|
|
|
181
|
-
|
|
232
|
+
def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
|
|
233
|
+
"""Check if this is a publishing workflow that requires strict security gates."""
|
|
234
|
+
return bool(options.publish or options.all or options.commit)
|
|
235
|
+
|
|
236
|
+
async def _execute_publishing_workflow(
|
|
237
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
238
|
+
) -> bool:
|
|
239
|
+
"""Execute publishing workflow with proper error handling and monitoring."""
|
|
240
|
+
if not options.publish and not options.all:
|
|
241
|
+
return True
|
|
242
|
+
|
|
243
|
+
with phase_monitor(workflow_id, "publishing"):
|
|
244
|
+
if not self.phases.run_publishing_phase(options):
|
|
245
|
+
self.session.fail_task("workflow", "Publishing failed")
|
|
246
|
+
return False
|
|
247
|
+
return True
|
|
248
|
+
|
|
249
|
+
async def _execute_commit_workflow(
|
|
250
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
251
|
+
) -> bool:
|
|
252
|
+
"""Execute commit workflow with proper error handling and monitoring."""
|
|
253
|
+
if not options.commit:
|
|
254
|
+
return True
|
|
255
|
+
|
|
256
|
+
with phase_monitor(workflow_id, "commit"):
|
|
257
|
+
if not self.phases.run_commit_phase(options):
|
|
258
|
+
return False
|
|
259
|
+
return True
|
|
260
|
+
|
|
261
|
+
async def _execute_quality_phase(
|
|
262
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
263
|
+
) -> bool:
|
|
182
264
|
if hasattr(options, "fast") and options.fast:
|
|
183
|
-
return self.
|
|
265
|
+
return await self._run_fast_hooks_phase_monitored(options, workflow_id)
|
|
184
266
|
if hasattr(options, "comp") and options.comp:
|
|
185
|
-
return self.
|
|
267
|
+
return await self._run_comprehensive_hooks_phase_monitored(
|
|
268
|
+
options, workflow_id
|
|
269
|
+
)
|
|
186
270
|
if getattr(options, "test", False):
|
|
187
|
-
return await self._execute_test_workflow(options)
|
|
188
|
-
return self.
|
|
271
|
+
return await self._execute_test_workflow(options, workflow_id)
|
|
272
|
+
return await self._execute_standard_hooks_workflow_monitored(
|
|
273
|
+
options, workflow_id
|
|
274
|
+
)
|
|
189
275
|
|
|
190
|
-
async def _execute_test_workflow(
|
|
276
|
+
async def _execute_test_workflow(
|
|
277
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
278
|
+
) -> bool:
|
|
191
279
|
iteration = self._start_iteration_tracking(options)
|
|
192
280
|
|
|
193
|
-
|
|
281
|
+
# Execute initial phases (fast hooks + optional cleaning)
|
|
282
|
+
if not await self._execute_initial_phases(options, workflow_id, iteration):
|
|
194
283
|
return False
|
|
195
284
|
|
|
196
|
-
# Run
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
285
|
+
# Run main quality phases
|
|
286
|
+
(
|
|
287
|
+
testing_passed,
|
|
288
|
+
comprehensive_passed,
|
|
289
|
+
) = await self._run_main_quality_phases_async(options, workflow_id)
|
|
290
|
+
|
|
291
|
+
# Handle workflow completion based on agent mode
|
|
292
|
+
return await self._handle_workflow_completion(
|
|
293
|
+
options, iteration, testing_passed, comprehensive_passed, workflow_id
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
async def _execute_initial_phases(
|
|
297
|
+
self, options: OptionsProtocol, workflow_id: str, iteration: int
|
|
298
|
+
) -> bool:
|
|
299
|
+
"""Execute fast hooks and optional code cleaning phases."""
|
|
300
|
+
# Fast hooks with performance monitoring
|
|
301
|
+
with phase_monitor(workflow_id, "fast_hooks") as monitor:
|
|
302
|
+
if not await self._run_initial_fast_hooks_async(
|
|
303
|
+
options, iteration, monitor
|
|
304
|
+
):
|
|
202
305
|
return False
|
|
203
|
-
self._mark_code_cleaning_complete()
|
|
204
306
|
|
|
205
|
-
|
|
307
|
+
# Run code cleaning if enabled
|
|
308
|
+
return self._execute_optional_cleaning_phase(options)
|
|
309
|
+
|
|
310
|
+
def _execute_optional_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
311
|
+
"""Execute code cleaning phase if enabled."""
|
|
312
|
+
if not getattr(options, "clean", False):
|
|
313
|
+
return True
|
|
314
|
+
|
|
315
|
+
if not self._run_code_cleaning_phase(options):
|
|
316
|
+
return False
|
|
317
|
+
|
|
318
|
+
# Run fast hooks again after cleaning for sanity check
|
|
319
|
+
if not self._run_post_cleaning_fast_hooks(options):
|
|
320
|
+
return False
|
|
321
|
+
|
|
322
|
+
self._mark_code_cleaning_complete()
|
|
323
|
+
return True
|
|
206
324
|
|
|
325
|
+
async def _handle_workflow_completion(
|
|
326
|
+
self,
|
|
327
|
+
options: OptionsProtocol,
|
|
328
|
+
iteration: int,
|
|
329
|
+
testing_passed: bool,
|
|
330
|
+
comprehensive_passed: bool,
|
|
331
|
+
workflow_id: str = "unknown",
|
|
332
|
+
) -> bool:
|
|
333
|
+
"""Handle workflow completion based on agent mode."""
|
|
207
334
|
if options.ai_agent:
|
|
208
335
|
return await self._handle_ai_agent_workflow(
|
|
209
|
-
options, iteration, testing_passed, comprehensive_passed
|
|
336
|
+
options, iteration, testing_passed, comprehensive_passed, workflow_id
|
|
210
337
|
)
|
|
211
338
|
|
|
212
|
-
return self._handle_standard_workflow(
|
|
339
|
+
return await self._handle_standard_workflow(
|
|
213
340
|
options, iteration, testing_passed, comprehensive_passed
|
|
214
341
|
)
|
|
215
342
|
|
|
@@ -227,9 +354,38 @@ class WorkflowPipeline:
|
|
|
227
354
|
return False
|
|
228
355
|
return True
|
|
229
356
|
|
|
230
|
-
def
|
|
231
|
-
|
|
232
|
-
|
|
357
|
+
async def _run_main_quality_phases_async(
|
|
358
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
359
|
+
) -> tuple[bool, bool]:
|
|
360
|
+
# Run testing and comprehensive phases in parallel where possible
|
|
361
|
+
testing_task = asyncio.create_task(
|
|
362
|
+
self._run_testing_phase_async(options, workflow_id)
|
|
363
|
+
)
|
|
364
|
+
comprehensive_task = asyncio.create_task(
|
|
365
|
+
self._run_comprehensive_hooks_phase_monitored(options, workflow_id)
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
results = await asyncio.gather(
|
|
369
|
+
testing_task, comprehensive_task, return_exceptions=True
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# Handle exceptions and ensure boolean types
|
|
373
|
+
testing_result, comprehensive_result = results
|
|
374
|
+
|
|
375
|
+
if isinstance(testing_result, Exception):
|
|
376
|
+
self.logger.error(f"Testing phase failed with exception: {testing_result}")
|
|
377
|
+
testing_passed = False
|
|
378
|
+
else:
|
|
379
|
+
testing_passed = bool(testing_result)
|
|
380
|
+
|
|
381
|
+
if isinstance(comprehensive_result, Exception):
|
|
382
|
+
self.logger.error(
|
|
383
|
+
f"Comprehensive hooks failed with exception: {comprehensive_result}"
|
|
384
|
+
)
|
|
385
|
+
comprehensive_passed = False
|
|
386
|
+
else:
|
|
387
|
+
comprehensive_passed = bool(comprehensive_result)
|
|
388
|
+
|
|
233
389
|
return testing_passed, comprehensive_passed
|
|
234
390
|
|
|
235
391
|
async def _handle_ai_agent_workflow(
|
|
@@ -238,38 +394,127 @@ class WorkflowPipeline:
|
|
|
238
394
|
iteration: int,
|
|
239
395
|
testing_passed: bool,
|
|
240
396
|
comprehensive_passed: bool,
|
|
397
|
+
workflow_id: str = "unknown",
|
|
241
398
|
) -> bool:
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
399
|
+
# Handle security gates first
|
|
400
|
+
if not await self._process_security_gates(options):
|
|
401
|
+
return False
|
|
402
|
+
|
|
403
|
+
# Determine if AI fixing is needed
|
|
404
|
+
needs_ai_fixing = self._determine_ai_fixing_needed(
|
|
405
|
+
testing_passed, comprehensive_passed, bool(options.publish or options.all)
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
if needs_ai_fixing:
|
|
409
|
+
return await self._execute_ai_fixing_workflow(options, iteration)
|
|
410
|
+
|
|
411
|
+
# Handle success case without AI fixing
|
|
412
|
+
return self._finalize_ai_workflow_success(
|
|
413
|
+
options, iteration, testing_passed, comprehensive_passed
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
async def _process_security_gates(self, options: OptionsProtocol) -> bool:
|
|
417
|
+
"""Process security gates for publishing operations."""
|
|
418
|
+
publishing_requested, security_blocks = (
|
|
419
|
+
self._check_security_gates_for_publishing(options)
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
if not (publishing_requested and security_blocks):
|
|
423
|
+
return True
|
|
424
|
+
|
|
425
|
+
# Try AI fixing for security issues, then re-check
|
|
426
|
+
security_fix_result = await self._handle_security_gate_failure(
|
|
427
|
+
options, allow_ai_fixing=True
|
|
428
|
+
)
|
|
429
|
+
return security_fix_result
|
|
247
430
|
|
|
431
|
+
async def _execute_ai_fixing_workflow(
|
|
432
|
+
self, options: OptionsProtocol, iteration: int
|
|
433
|
+
) -> bool:
|
|
434
|
+
"""Execute AI fixing workflow and handle debugging."""
|
|
435
|
+
success = await self._run_ai_agent_fixing_phase(options)
|
|
248
436
|
if self._should_debug():
|
|
249
|
-
self.debugger.log_iteration_end(iteration,
|
|
250
|
-
return
|
|
437
|
+
self.debugger.log_iteration_end(iteration, success)
|
|
438
|
+
return success
|
|
251
439
|
|
|
252
|
-
def
|
|
440
|
+
def _finalize_ai_workflow_success(
|
|
253
441
|
self,
|
|
254
442
|
options: OptionsProtocol,
|
|
255
443
|
iteration: int,
|
|
256
444
|
testing_passed: bool,
|
|
257
445
|
comprehensive_passed: bool,
|
|
258
446
|
) -> bool:
|
|
259
|
-
|
|
447
|
+
"""Finalize AI workflow when no fixing is needed."""
|
|
448
|
+
publishing_requested = bool(options.publish or options.all)
|
|
260
449
|
|
|
261
|
-
|
|
450
|
+
final_success = self._determine_workflow_success(
|
|
451
|
+
testing_passed, comprehensive_passed, publishing_requested
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
self._show_partial_success_warning_if_needed(
|
|
455
|
+
publishing_requested, final_success, testing_passed, comprehensive_passed
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
if self._should_debug():
|
|
459
|
+
self.debugger.log_iteration_end(iteration, final_success)
|
|
460
|
+
|
|
461
|
+
return final_success
|
|
462
|
+
|
|
463
|
+
def _show_partial_success_warning_if_needed(
|
|
464
|
+
self,
|
|
465
|
+
publishing_requested: bool,
|
|
466
|
+
final_success: bool,
|
|
467
|
+
testing_passed: bool,
|
|
468
|
+
comprehensive_passed: bool,
|
|
469
|
+
) -> None:
|
|
470
|
+
"""Show security audit warning for partial success in publishing workflows."""
|
|
471
|
+
should_show_warning = (
|
|
472
|
+
publishing_requested
|
|
473
|
+
and final_success
|
|
474
|
+
and not (testing_passed and comprehensive_passed)
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
if should_show_warning:
|
|
478
|
+
self._show_security_audit_warning()
|
|
479
|
+
|
|
480
|
+
async def _handle_standard_workflow(
|
|
481
|
+
self,
|
|
482
|
+
options: OptionsProtocol,
|
|
483
|
+
iteration: int,
|
|
484
|
+
testing_passed: bool,
|
|
485
|
+
comprehensive_passed: bool,
|
|
486
|
+
) -> bool:
|
|
487
|
+
# Check security gates for publishing operations
|
|
488
|
+
publishing_requested, security_blocks = (
|
|
489
|
+
self._check_security_gates_for_publishing(options)
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
if publishing_requested and security_blocks:
|
|
493
|
+
# Standard workflow cannot bypass security gates
|
|
494
|
+
return await self._handle_security_gate_failure(options)
|
|
495
|
+
|
|
496
|
+
# Determine success based on publishing requirements
|
|
497
|
+
success = self._determine_workflow_success(
|
|
498
|
+
testing_passed,
|
|
499
|
+
comprehensive_passed,
|
|
500
|
+
publishing_requested,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
# Show security audit warning for partial success in publishing workflows
|
|
504
|
+
if (
|
|
505
|
+
publishing_requested
|
|
506
|
+
and success
|
|
507
|
+
and not (testing_passed and comprehensive_passed)
|
|
508
|
+
):
|
|
509
|
+
self._show_security_audit_warning()
|
|
510
|
+
elif publishing_requested and not success:
|
|
262
511
|
self.console.print(
|
|
263
|
-
|
|
512
|
+
"[red]❌ Quality checks failed - cannot proceed to publishing[/red]"
|
|
264
513
|
)
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
if not comprehensive_passed:
|
|
270
|
-
self.console.print(
|
|
271
|
-
"[yellow] → Comprehensive hooks reported failure despite appearing successful[/ yellow]"
|
|
272
|
-
)
|
|
514
|
+
|
|
515
|
+
# Show verbose failure details if requested
|
|
516
|
+
if not success and getattr(options, "verbose", False):
|
|
517
|
+
self._show_verbose_failure_details(testing_passed, comprehensive_passed)
|
|
273
518
|
|
|
274
519
|
if options.ai_agent and self._should_debug():
|
|
275
520
|
self.debugger.log_iteration_end(iteration, success)
|
|
@@ -434,36 +679,67 @@ class WorkflowPipeline:
|
|
|
434
679
|
self._mcp_state_manager.update_stage_status("comprehensive", "completed")
|
|
435
680
|
|
|
436
681
|
async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
|
|
437
|
-
self.
|
|
438
|
-
self.logger.info("Starting AI agent fixing phase")
|
|
439
|
-
self._log_debug_phase_start()
|
|
682
|
+
self._initialize_ai_fixing_phase(options)
|
|
440
683
|
|
|
441
684
|
try:
|
|
442
|
-
#
|
|
443
|
-
|
|
444
|
-
if getattr(options, "clean", False) and not self._has_code_cleaning_run():
|
|
445
|
-
self.console.print(
|
|
446
|
-
"\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
|
|
447
|
-
)
|
|
448
|
-
if self._run_code_cleaning_phase(options):
|
|
449
|
-
# Run fast hooks sanity check after cleaning
|
|
450
|
-
self._run_post_cleaning_fast_hooks(options)
|
|
451
|
-
self._mark_code_cleaning_complete()
|
|
685
|
+
# Prepare environment for AI agents
|
|
686
|
+
self._prepare_ai_fixing_environment(options)
|
|
452
687
|
|
|
453
|
-
|
|
454
|
-
issues = await self.
|
|
688
|
+
# Setup coordinator and collect issues
|
|
689
|
+
agent_coordinator, issues = await self._setup_ai_fixing_workflow()
|
|
455
690
|
|
|
456
691
|
if not issues:
|
|
457
692
|
return self._handle_no_issues_found()
|
|
458
693
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
return await self._process_fix_results(options, fix_result)
|
|
694
|
+
# Execute AI fixing
|
|
695
|
+
return await self._execute_ai_fixes(options, agent_coordinator, issues)
|
|
463
696
|
|
|
464
697
|
except Exception as e:
|
|
465
698
|
return self._handle_fixing_phase_error(e)
|
|
466
699
|
|
|
700
|
+
def _initialize_ai_fixing_phase(self, options: OptionsProtocol) -> None:
|
|
701
|
+
"""Initialize the AI fixing phase with status updates and logging."""
|
|
702
|
+
self._update_mcp_status("ai_fixing", "running")
|
|
703
|
+
self.logger.info("Starting AI agent fixing phase")
|
|
704
|
+
self._log_debug_phase_start()
|
|
705
|
+
|
|
706
|
+
def _prepare_ai_fixing_environment(self, options: OptionsProtocol) -> None:
|
|
707
|
+
"""Prepare the environment for AI agents by running optional code cleaning."""
|
|
708
|
+
should_run_cleaning = (
|
|
709
|
+
getattr(options, "clean", False) and not self._has_code_cleaning_run()
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
if not should_run_cleaning:
|
|
713
|
+
return
|
|
714
|
+
|
|
715
|
+
self.console.print(
|
|
716
|
+
"\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
if self._run_code_cleaning_phase(options):
|
|
720
|
+
# Run fast hooks sanity check after cleaning
|
|
721
|
+
self._run_post_cleaning_fast_hooks(options)
|
|
722
|
+
self._mark_code_cleaning_complete()
|
|
723
|
+
|
|
724
|
+
async def _setup_ai_fixing_workflow(
|
|
725
|
+
self,
|
|
726
|
+
) -> tuple[AgentCoordinator, list[t.Any]]:
|
|
727
|
+
"""Setup agent coordinator and collect issues to fix."""
|
|
728
|
+
agent_coordinator = self._setup_agent_coordinator()
|
|
729
|
+
issues = await self._collect_issues_from_failures()
|
|
730
|
+
return agent_coordinator, issues
|
|
731
|
+
|
|
732
|
+
async def _execute_ai_fixes(
|
|
733
|
+
self,
|
|
734
|
+
options: OptionsProtocol,
|
|
735
|
+
agent_coordinator: AgentCoordinator,
|
|
736
|
+
issues: list[t.Any],
|
|
737
|
+
) -> bool:
|
|
738
|
+
"""Execute AI fixes and process results."""
|
|
739
|
+
self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
|
|
740
|
+
fix_result = await agent_coordinator.handle_issues(issues)
|
|
741
|
+
return await self._process_fix_results(options, fix_result)
|
|
742
|
+
|
|
467
743
|
def _log_debug_phase_start(self) -> None:
|
|
468
744
|
if self._should_debug():
|
|
469
745
|
self.debugger.log_workflow_phase(
|
|
@@ -995,6 +1271,381 @@ class WorkflowPipeline:
|
|
|
995
1271
|
self.debugger.log_test_failures(test_count)
|
|
996
1272
|
self.debugger.log_hook_failures(hook_count)
|
|
997
1273
|
|
|
1274
|
+
def _check_security_gates_for_publishing(
|
|
1275
|
+
self, options: OptionsProtocol
|
|
1276
|
+
) -> tuple[bool, bool]:
|
|
1277
|
+
"""Check if publishing is requested and if security gates block it.
|
|
1278
|
+
|
|
1279
|
+
Returns:
|
|
1280
|
+
tuple[bool, bool]: (publishing_requested, security_blocks_publishing)
|
|
1281
|
+
"""
|
|
1282
|
+
publishing_requested = bool(options.publish or options.all or options.commit)
|
|
1283
|
+
|
|
1284
|
+
if not publishing_requested:
|
|
1285
|
+
return False, False
|
|
1286
|
+
|
|
1287
|
+
# Check security gates for publishing operations
|
|
1288
|
+
try:
|
|
1289
|
+
security_blocks_publishing = self._check_security_critical_failures()
|
|
1290
|
+
return publishing_requested, security_blocks_publishing
|
|
1291
|
+
except Exception as e:
|
|
1292
|
+
# Fail securely if security check fails
|
|
1293
|
+
self.logger.warning(f"Security check failed: {e} - blocking publishing")
|
|
1294
|
+
self.console.print(
|
|
1295
|
+
"[red]🔒 SECURITY CHECK FAILED: Unable to verify security status - publishing BLOCKED[/red]"
|
|
1296
|
+
)
|
|
1297
|
+
# Return True for security_blocks to fail securely
|
|
1298
|
+
return publishing_requested, True
|
|
1299
|
+
|
|
1300
|
+
async def _handle_security_gate_failure(
|
|
1301
|
+
self, options: OptionsProtocol, allow_ai_fixing: bool = False
|
|
1302
|
+
) -> bool:
|
|
1303
|
+
"""Handle security gate failures with optional AI fixing.
|
|
1304
|
+
|
|
1305
|
+
Args:
|
|
1306
|
+
options: Workflow options
|
|
1307
|
+
allow_ai_fixing: Whether AI fixing is allowed for security issues
|
|
1308
|
+
|
|
1309
|
+
Returns:
|
|
1310
|
+
bool: True if security issues resolved, False if still blocked
|
|
1311
|
+
"""
|
|
1312
|
+
self.console.print(
|
|
1313
|
+
"[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
if allow_ai_fixing:
|
|
1317
|
+
self.console.print(
|
|
1318
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1319
|
+
)
|
|
1320
|
+
self.console.print(
|
|
1321
|
+
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1322
|
+
)
|
|
1323
|
+
|
|
1324
|
+
# Try AI fixing for security issues
|
|
1325
|
+
ai_fix_success = await self._run_ai_agent_fixing_phase(options)
|
|
1326
|
+
if ai_fix_success:
|
|
1327
|
+
# Re-check security after AI fixing
|
|
1328
|
+
try:
|
|
1329
|
+
security_still_blocks = self._check_security_critical_failures()
|
|
1330
|
+
if not security_still_blocks:
|
|
1331
|
+
self.console.print(
|
|
1332
|
+
"[green]✅ AI agents resolved security issues - publishing allowed[/green]"
|
|
1333
|
+
)
|
|
1334
|
+
return True
|
|
1335
|
+
else:
|
|
1336
|
+
self.console.print(
|
|
1337
|
+
"[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
|
|
1338
|
+
)
|
|
1339
|
+
return False
|
|
1340
|
+
except Exception as e:
|
|
1341
|
+
self.logger.warning(
|
|
1342
|
+
f"Security re-check failed: {e} - blocking publishing"
|
|
1343
|
+
)
|
|
1344
|
+
return False
|
|
1345
|
+
return False
|
|
1346
|
+
else:
|
|
1347
|
+
# Standard workflow cannot bypass security gates
|
|
1348
|
+
self.console.print(
|
|
1349
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1350
|
+
)
|
|
1351
|
+
return False
|
|
1352
|
+
|
|
1353
|
+
def _determine_ai_fixing_needed(
|
|
1354
|
+
self,
|
|
1355
|
+
testing_passed: bool,
|
|
1356
|
+
comprehensive_passed: bool,
|
|
1357
|
+
publishing_requested: bool,
|
|
1358
|
+
) -> bool:
|
|
1359
|
+
"""Determine if AI fixing is needed based on test results and publishing requirements."""
|
|
1360
|
+
if publishing_requested:
|
|
1361
|
+
# For publish/commit workflows, trigger AI fixing if either fails (since both must pass)
|
|
1362
|
+
return not testing_passed or not comprehensive_passed
|
|
1363
|
+
# For regular workflows, trigger AI fixing if either fails
|
|
1364
|
+
return not testing_passed or not comprehensive_passed
|
|
1365
|
+
|
|
1366
|
+
def _determine_workflow_success(
|
|
1367
|
+
self,
|
|
1368
|
+
testing_passed: bool,
|
|
1369
|
+
comprehensive_passed: bool,
|
|
1370
|
+
publishing_requested: bool,
|
|
1371
|
+
) -> bool:
|
|
1372
|
+
"""Determine workflow success based on test results and workflow type."""
|
|
1373
|
+
if publishing_requested:
|
|
1374
|
+
# For publishing workflows, ALL quality checks (tests AND comprehensive hooks) must pass
|
|
1375
|
+
return testing_passed and comprehensive_passed
|
|
1376
|
+
# For regular workflows, both must pass as well
|
|
1377
|
+
return testing_passed and comprehensive_passed
|
|
1378
|
+
|
|
1379
|
+
def _show_verbose_failure_details(
|
|
1380
|
+
self, testing_passed: bool, comprehensive_passed: bool
|
|
1381
|
+
) -> None:
|
|
1382
|
+
"""Show detailed failure information in verbose mode."""
|
|
1383
|
+
self.console.print(
|
|
1384
|
+
f"[yellow]⚠️ Quality phase results - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
|
|
1385
|
+
)
|
|
1386
|
+
if not testing_passed:
|
|
1387
|
+
self.console.print("[yellow] → Tests reported failure[/yellow]")
|
|
1388
|
+
if not comprehensive_passed:
|
|
1389
|
+
self.console.print(
|
|
1390
|
+
"[yellow] → Comprehensive hooks reported failure[/yellow]"
|
|
1391
|
+
)
|
|
1392
|
+
|
|
1393
|
+
def _check_security_critical_failures(self) -> bool:
|
|
1394
|
+
"""Check if any security-critical hooks have failed.
|
|
1395
|
+
|
|
1396
|
+
Returns:
|
|
1397
|
+
True if security-critical hooks failed and block publishing
|
|
1398
|
+
"""
|
|
1399
|
+
try:
|
|
1400
|
+
from crackerjack.security.audit import SecurityAuditor
|
|
1401
|
+
|
|
1402
|
+
auditor = SecurityAuditor()
|
|
1403
|
+
|
|
1404
|
+
# Get hook results - we need to be careful not to re-run hooks
|
|
1405
|
+
# Instead, check the session tracker for recent failures
|
|
1406
|
+
fast_results = self._get_recent_fast_hook_results()
|
|
1407
|
+
comprehensive_results = self._get_recent_comprehensive_hook_results()
|
|
1408
|
+
|
|
1409
|
+
# Generate security audit report
|
|
1410
|
+
audit_report = auditor.audit_hook_results(
|
|
1411
|
+
fast_results, comprehensive_results
|
|
1412
|
+
)
|
|
1413
|
+
|
|
1414
|
+
# Store audit report for later use
|
|
1415
|
+
self._last_security_audit = audit_report
|
|
1416
|
+
|
|
1417
|
+
# Block publishing if critical failures exist
|
|
1418
|
+
return audit_report.has_critical_failures
|
|
1419
|
+
|
|
1420
|
+
except Exception as e:
|
|
1421
|
+
# Fail securely - if we can't determine security status, block publishing
|
|
1422
|
+
self.logger.warning(f"Security audit failed: {e} - failing securely")
|
|
1423
|
+
# Re-raise the exception so it can be caught by the calling method
|
|
1424
|
+
raise
|
|
1425
|
+
|
|
1426
|
+
def _get_recent_fast_hook_results(self) -> list[t.Any]:
|
|
1427
|
+
"""Get recent fast hook results from session tracker."""
|
|
1428
|
+
# Try to get results from session tracker
|
|
1429
|
+
results = self._extract_hook_results_from_session("fast_hooks")
|
|
1430
|
+
|
|
1431
|
+
# If no results from session, create mock failed results for critical hooks
|
|
1432
|
+
if not results:
|
|
1433
|
+
results = self._create_mock_hook_results(["gitleaks"])
|
|
1434
|
+
|
|
1435
|
+
return results
|
|
1436
|
+
|
|
1437
|
+
def _extract_hook_results_from_session(self, hook_type: str) -> list[t.Any]:
|
|
1438
|
+
"""Extract hook results from session tracker for given hook type."""
|
|
1439
|
+
results = []
|
|
1440
|
+
|
|
1441
|
+
session_tracker = self._get_session_tracker()
|
|
1442
|
+
if not session_tracker:
|
|
1443
|
+
return results
|
|
1444
|
+
|
|
1445
|
+
for task_id, task_data in session_tracker.tasks.items():
|
|
1446
|
+
if task_id == hook_type and hasattr(task_data, "hook_results"):
|
|
1447
|
+
if task_data.hook_results:
|
|
1448
|
+
results.extend(task_data.hook_results)
|
|
1449
|
+
|
|
1450
|
+
return results
|
|
1451
|
+
|
|
1452
|
+
def _get_session_tracker(self) -> t.Any | None:
|
|
1453
|
+
"""Get session tracker if available."""
|
|
1454
|
+
return (
|
|
1455
|
+
getattr(self.session, "session_tracker", None)
|
|
1456
|
+
if hasattr(self.session, "session_tracker")
|
|
1457
|
+
else None
|
|
1458
|
+
)
|
|
1459
|
+
|
|
1460
|
+
def _create_mock_hook_results(self, critical_hooks: list[str]) -> list[t.Any]:
|
|
1461
|
+
"""Create mock failed results for critical hooks to fail securely."""
|
|
1462
|
+
results = []
|
|
1463
|
+
|
|
1464
|
+
for hook_name in critical_hooks:
|
|
1465
|
+
mock_result = self._create_mock_hook_result(hook_name)
|
|
1466
|
+
results.append(mock_result)
|
|
1467
|
+
|
|
1468
|
+
return results
|
|
1469
|
+
|
|
1470
|
+
def _create_mock_hook_result(self, hook_name: str) -> t.Any:
|
|
1471
|
+
"""Create a mock result that appears to have failed for security purposes."""
|
|
1472
|
+
return type(
|
|
1473
|
+
"MockResult",
|
|
1474
|
+
(),
|
|
1475
|
+
{
|
|
1476
|
+
"name": hook_name,
|
|
1477
|
+
"status": "unknown", # Unknown status = fail securely
|
|
1478
|
+
"output": "Unable to determine hook status",
|
|
1479
|
+
},
|
|
1480
|
+
)()
|
|
1481
|
+
|
|
1482
|
+
def _get_recent_comprehensive_hook_results(self) -> list[t.Any]:
|
|
1483
|
+
"""Get recent comprehensive hook results from session tracker."""
|
|
1484
|
+
# Try to get results from session tracker
|
|
1485
|
+
results = self._extract_hook_results_from_session("comprehensive_hooks")
|
|
1486
|
+
|
|
1487
|
+
# If no results from session, create mock failed results for critical hooks
|
|
1488
|
+
if not results:
|
|
1489
|
+
results = self._create_mock_hook_results(["bandit", "pyright"])
|
|
1490
|
+
|
|
1491
|
+
return results
|
|
1492
|
+
|
|
1493
|
+
def _is_security_critical_failure(self, result: t.Any) -> bool:
|
|
1494
|
+
"""Check if a hook result represents a security-critical failure."""
|
|
1495
|
+
|
|
1496
|
+
# List of security-critical hook names (fail-safe approach)
|
|
1497
|
+
security_critical_hooks = {
|
|
1498
|
+
"bandit", # Security vulnerability scanning
|
|
1499
|
+
"pyright", # Type safety prevents security holes
|
|
1500
|
+
"gitleaks", # Secret detection
|
|
1501
|
+
}
|
|
1502
|
+
|
|
1503
|
+
hook_name = getattr(result, "name", "").lower()
|
|
1504
|
+
is_failed = getattr(result, "status", "unknown") in (
|
|
1505
|
+
"failed",
|
|
1506
|
+
"error",
|
|
1507
|
+
"timeout",
|
|
1508
|
+
)
|
|
1509
|
+
|
|
1510
|
+
return hook_name in security_critical_hooks and is_failed
|
|
1511
|
+
|
|
1512
|
+
def _show_security_audit_warning(self) -> None:
|
|
1513
|
+
"""Show security audit warning when proceeding with partial success."""
|
|
1514
|
+
# Use stored audit report if available
|
|
1515
|
+
audit_report = getattr(self, "_last_security_audit", None)
|
|
1516
|
+
|
|
1517
|
+
if audit_report:
|
|
1518
|
+
self.console.print(
|
|
1519
|
+
"[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
|
|
1520
|
+
)
|
|
1521
|
+
|
|
1522
|
+
# Show security status
|
|
1523
|
+
for warning in audit_report.security_warnings:
|
|
1524
|
+
if "CRITICAL" in warning:
|
|
1525
|
+
# This shouldn't happen if we're showing warnings, but fail-safe
|
|
1526
|
+
self.console.print(f"[red]{warning}[/red]")
|
|
1527
|
+
elif "HIGH" in warning:
|
|
1528
|
+
self.console.print(f"[yellow]{warning}[/yellow]")
|
|
1529
|
+
else:
|
|
1530
|
+
self.console.print(f"[blue]{warning}[/blue]")
|
|
1531
|
+
|
|
1532
|
+
# Show recommendations
|
|
1533
|
+
if audit_report.recommendations:
|
|
1534
|
+
self.console.print("[bold]Security Recommendations:[/bold]")
|
|
1535
|
+
for rec in audit_report.recommendations[:3]: # Show top 3
|
|
1536
|
+
self.console.print(f"[dim]{rec}[/dim]")
|
|
1537
|
+
else:
|
|
1538
|
+
# Fallback if no audit report available
|
|
1539
|
+
self.console.print(
|
|
1540
|
+
"[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
|
|
1541
|
+
)
|
|
1542
|
+
self.console.print(
|
|
1543
|
+
"[yellow]✅ Security-critical checks (bandit, pyright, gitleaks) have passed[/yellow]"
|
|
1544
|
+
)
|
|
1545
|
+
self.console.print(
|
|
1546
|
+
"[yellow]⚠️ Some non-critical quality checks failed - consider reviewing before production deployment[/yellow]"
|
|
1547
|
+
)
|
|
1548
|
+
|
|
1549
|
+
# Performance-optimized async methods
|
|
1550
|
+
async def _run_initial_fast_hooks_async(
|
|
1551
|
+
self, options: OptionsProtocol, iteration: int, monitor: t.Any
|
|
1552
|
+
) -> bool:
|
|
1553
|
+
"""Run initial fast hooks asynchronously with monitoring."""
|
|
1554
|
+
monitor.record_sequential_op() # Fast hooks run sequentially for safety
|
|
1555
|
+
fast_hooks_passed = self._run_fast_hooks_phase(options)
|
|
1556
|
+
if not fast_hooks_passed:
|
|
1557
|
+
if options.ai_agent and self._should_debug():
|
|
1558
|
+
self.debugger.log_iteration_end(iteration, False)
|
|
1559
|
+
return False
|
|
1560
|
+
return True
|
|
1561
|
+
|
|
1562
|
+
async def _run_fast_hooks_phase_monitored(
|
|
1563
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
1564
|
+
) -> bool:
|
|
1565
|
+
"""Run fast hooks phase with performance monitoring."""
|
|
1566
|
+
with phase_monitor(workflow_id, "fast_hooks") as monitor:
|
|
1567
|
+
monitor.record_sequential_op()
|
|
1568
|
+
return self._run_fast_hooks_phase(options)
|
|
1569
|
+
|
|
1570
|
+
async def _run_comprehensive_hooks_phase_monitored(
|
|
1571
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
1572
|
+
) -> bool:
|
|
1573
|
+
"""Run comprehensive hooks phase with performance monitoring."""
|
|
1574
|
+
with phase_monitor(workflow_id, "comprehensive_hooks") as monitor:
|
|
1575
|
+
monitor.record_sequential_op()
|
|
1576
|
+
return self._run_comprehensive_hooks_phase(options)
|
|
1577
|
+
|
|
1578
|
+
async def _run_testing_phase_async(
|
|
1579
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
1580
|
+
) -> bool:
|
|
1581
|
+
"""Run testing phase asynchronously with monitoring."""
|
|
1582
|
+
with phase_monitor(workflow_id, "testing") as monitor:
|
|
1583
|
+
monitor.record_sequential_op()
|
|
1584
|
+
return self._run_testing_phase(options)
|
|
1585
|
+
|
|
1586
|
+
async def _execute_standard_hooks_workflow_monitored(
|
|
1587
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
1588
|
+
) -> bool:
|
|
1589
|
+
"""Execute standard hooks workflow with performance monitoring."""
|
|
1590
|
+
with phase_monitor(workflow_id, "hooks") as monitor:
|
|
1591
|
+
self._update_hooks_status_running()
|
|
1592
|
+
|
|
1593
|
+
# Execute fast hooks phase
|
|
1594
|
+
fast_hooks_success = self._execute_monitored_fast_hooks_phase(
|
|
1595
|
+
options, monitor
|
|
1596
|
+
)
|
|
1597
|
+
if not fast_hooks_success:
|
|
1598
|
+
self._handle_hooks_completion(False)
|
|
1599
|
+
return False
|
|
1600
|
+
|
|
1601
|
+
# Execute optional cleaning phase
|
|
1602
|
+
if not self._execute_monitored_cleaning_phase(options):
|
|
1603
|
+
self._handle_hooks_completion(False)
|
|
1604
|
+
return False
|
|
1605
|
+
|
|
1606
|
+
# Execute comprehensive hooks phase
|
|
1607
|
+
comprehensive_success = self._execute_monitored_comprehensive_phase(
|
|
1608
|
+
options, monitor
|
|
1609
|
+
)
|
|
1610
|
+
|
|
1611
|
+
# Complete workflow
|
|
1612
|
+
hooks_success = fast_hooks_success and comprehensive_success
|
|
1613
|
+
self._handle_hooks_completion(hooks_success)
|
|
1614
|
+
return hooks_success
|
|
1615
|
+
|
|
1616
|
+
def _execute_monitored_fast_hooks_phase(
|
|
1617
|
+
self, options: OptionsProtocol, monitor: t.Any
|
|
1618
|
+
) -> bool:
|
|
1619
|
+
"""Execute fast hooks phase with monitoring."""
|
|
1620
|
+
fast_hooks_success = self._run_fast_hooks_phase(options)
|
|
1621
|
+
if fast_hooks_success:
|
|
1622
|
+
monitor.record_sequential_op()
|
|
1623
|
+
return fast_hooks_success
|
|
1624
|
+
|
|
1625
|
+
def _execute_monitored_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
1626
|
+
"""Execute optional code cleaning phase."""
|
|
1627
|
+
if not getattr(options, "clean", False):
|
|
1628
|
+
return True
|
|
1629
|
+
|
|
1630
|
+
if not self._run_code_cleaning_phase(options):
|
|
1631
|
+
return False
|
|
1632
|
+
|
|
1633
|
+
# Run fast hooks again after cleaning for sanity check
|
|
1634
|
+
if not self._run_post_cleaning_fast_hooks(options):
|
|
1635
|
+
return False
|
|
1636
|
+
|
|
1637
|
+
self._mark_code_cleaning_complete()
|
|
1638
|
+
return True
|
|
1639
|
+
|
|
1640
|
+
def _execute_monitored_comprehensive_phase(
|
|
1641
|
+
self, options: OptionsProtocol, monitor: t.Any
|
|
1642
|
+
) -> bool:
|
|
1643
|
+
"""Execute comprehensive hooks phase with monitoring."""
|
|
1644
|
+
comprehensive_success = self._run_comprehensive_hooks_phase(options)
|
|
1645
|
+
if comprehensive_success:
|
|
1646
|
+
monitor.record_sequential_op()
|
|
1647
|
+
return comprehensive_success
|
|
1648
|
+
|
|
998
1649
|
|
|
999
1650
|
class WorkflowOrchestrator:
|
|
1000
1651
|
def __init__(
|
|
@@ -1063,8 +1714,8 @@ class WorkflowOrchestrator:
|
|
|
1063
1714
|
session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
|
|
1064
1715
|
debug_log_file = log_manager.create_debug_log_file(session_id)
|
|
1065
1716
|
|
|
1066
|
-
# Set log level based on
|
|
1067
|
-
log_level = "DEBUG" if
|
|
1717
|
+
# Set log level based on debug flag only - verbose should not enable DEBUG logs
|
|
1718
|
+
log_level = "DEBUG" if self.debug else "INFO"
|
|
1068
1719
|
setup_structured_logging(
|
|
1069
1720
|
level=log_level, json_output=False, log_file=debug_log_file
|
|
1070
1721
|
)
|