crackerjack 0.31.18__py3-none-any.whl → 0.33.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (43) hide show
  1. crackerjack/CLAUDE.md +71 -452
  2. crackerjack/__main__.py +1 -1
  3. crackerjack/agents/refactoring_agent.py +67 -46
  4. crackerjack/cli/handlers.py +7 -7
  5. crackerjack/config/hooks.py +36 -6
  6. crackerjack/core/async_workflow_orchestrator.py +2 -2
  7. crackerjack/core/enhanced_container.py +67 -0
  8. crackerjack/core/phase_coordinator.py +211 -44
  9. crackerjack/core/workflow_orchestrator.py +723 -72
  10. crackerjack/dynamic_config.py +1 -25
  11. crackerjack/managers/publish_manager.py +22 -5
  12. crackerjack/managers/test_command_builder.py +19 -13
  13. crackerjack/managers/test_manager.py +15 -4
  14. crackerjack/mcp/server_core.py +162 -34
  15. crackerjack/mcp/tools/core_tools.py +1 -1
  16. crackerjack/mcp/tools/execution_tools.py +16 -3
  17. crackerjack/mcp/tools/workflow_executor.py +130 -40
  18. crackerjack/mixins/__init__.py +5 -0
  19. crackerjack/mixins/error_handling.py +214 -0
  20. crackerjack/models/config.py +9 -0
  21. crackerjack/models/protocols.py +114 -0
  22. crackerjack/models/task.py +3 -0
  23. crackerjack/security/__init__.py +1 -0
  24. crackerjack/security/audit.py +226 -0
  25. crackerjack/services/config.py +3 -2
  26. crackerjack/services/config_merge.py +11 -5
  27. crackerjack/services/coverage_ratchet.py +22 -0
  28. crackerjack/services/git.py +121 -22
  29. crackerjack/services/initialization.py +25 -9
  30. crackerjack/services/memory_optimizer.py +477 -0
  31. crackerjack/services/parallel_executor.py +474 -0
  32. crackerjack/services/performance_benchmarks.py +292 -577
  33. crackerjack/services/performance_cache.py +443 -0
  34. crackerjack/services/performance_monitor.py +633 -0
  35. crackerjack/services/security.py +63 -0
  36. crackerjack/services/security_logger.py +9 -1
  37. crackerjack/services/terminal_utils.py +0 -0
  38. crackerjack/tools/validate_regex_patterns.py +14 -0
  39. {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/METADATA +2 -2
  40. {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/RECORD +43 -34
  41. {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/WHEEL +0 -0
  42. {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/entry_points.txt +0 -0
  43. {crackerjack-0.31.18.dist-info → crackerjack-0.33.0.dist-info}/licenses/LICENSE +0 -0
@@ -13,8 +13,24 @@ async def execute_crackerjack_workflow(
13
13
  ) -> dict[str, t.Any]:
14
14
  job_id = str(uuid.uuid4())[:8]
15
15
 
16
+ # Configure extended timeout for long-running test operations
17
+ execution_timeout = kwargs.get("execution_timeout", 900) # 15 minutes default
18
+ if kwargs.get("test", False) or kwargs.get("testing", False):
19
+ execution_timeout = max(execution_timeout, 1200) # 20 minutes for test runs
20
+
16
21
  try:
17
- return await _execute_crackerjack_sync(job_id, args, kwargs, get_context())
22
+ # Add overall execution timeout with keep-alive
23
+ return await asyncio.wait_for(
24
+ _execute_crackerjack_sync(job_id, args, kwargs, get_context()),
25
+ timeout=execution_timeout,
26
+ )
27
+ except TimeoutError:
28
+ return {
29
+ "job_id": job_id,
30
+ "status": "timeout",
31
+ "error": f"Execution timed out after {execution_timeout} seconds",
32
+ "timestamp": time.time(),
33
+ }
18
34
  except Exception as e:
19
35
  import traceback
20
36
 
@@ -143,9 +159,9 @@ async def _create_advanced_orchestrator(
143
159
  from pathlib import Path
144
160
 
145
161
  from crackerjack.core.async_workflow_orchestrator import AsyncWorkflowOrchestrator
146
- from crackerjack.core.enhanced_container import EnhancedContainer
162
+ from crackerjack.core.enhanced_container import EnhancedDependencyContainer
147
163
 
148
- container = EnhancedContainer()
164
+ container = EnhancedDependencyContainer()
149
165
 
150
166
  await _register_core_services(container, Path(working_dir))
151
167
 
@@ -171,41 +187,37 @@ def _create_standard_orchestrator(
171
187
  async def _register_core_services(container: t.Any, working_dir: t.Any) -> None:
172
188
  from rich.console import Console
173
189
 
174
- from crackerjack.core.enhanced_container import ServiceLifetime
175
190
  from crackerjack.managers.hook_manager import AsyncHookManager
176
- from crackerjack.managers.publish_manager import PublishManager
177
- from crackerjack.managers.test_manager import TestManager
191
+ from crackerjack.managers.publish_manager import PublishManagerImpl
192
+ from crackerjack.managers.test_manager import TestManagementImpl
178
193
  from crackerjack.models.protocols import (
179
- HookManagerProtocol,
180
- PublishManagerProtocol,
194
+ FileSystemInterface,
195
+ HookManager,
196
+ PublishManager,
181
197
  TestManagerProtocol,
182
198
  )
183
199
  from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
184
200
 
185
201
  console = Console()
186
202
 
187
- container.register_service(
188
- HookManagerProtocol,
189
- AsyncHookManager(console, working_dir),
190
- ServiceLifetime.SINGLETON,
203
+ container.register_singleton(
204
+ HookManager,
205
+ factory=lambda: AsyncHookManager(console, working_dir),
191
206
  )
192
207
 
193
- container.register_service(
208
+ container.register_singleton(
194
209
  TestManagerProtocol,
195
- TestManager(console, working_dir),
196
- ServiceLifetime.SINGLETON,
210
+ factory=lambda: TestManagementImpl(console, working_dir),
197
211
  )
198
212
 
199
- container.register_service(
200
- PublishManagerProtocol,
201
- PublishManager(console, working_dir),
202
- ServiceLifetime.SINGLETON,
213
+ container.register_singleton(
214
+ PublishManager,
215
+ factory=lambda: PublishManagerImpl(console, working_dir),
203
216
  )
204
217
 
205
- container.register_service(
206
- EnhancedFileSystemService,
207
- EnhancedFileSystemService(),
208
- ServiceLifetime.SINGLETON,
218
+ container.register_singleton(
219
+ FileSystemInterface,
220
+ factory=EnhancedFileSystemService,
209
221
  )
210
222
 
211
223
 
@@ -218,17 +230,29 @@ async def _run_workflow_iterations(
218
230
  options = _create_workflow_options(kwargs)
219
231
  max_iterations = kwargs.get("max_iterations", 10)
220
232
 
221
- for iteration in range(max_iterations):
222
- _update_progress(
223
- job_id,
224
- {
225
- "type": "iteration",
226
- "iteration": iteration + 1,
227
- "max_iterations": max_iterations,
228
- "status": "running",
229
- },
230
- context,
233
+ # Start keep-alive task to prevent TCP timeouts
234
+ keep_alive_task = asyncio.create_task(_keep_alive_heartbeat(job_id, context))
235
+
236
+ try:
237
+ result = await _execute_iterations_loop(
238
+ job_id, orchestrator, options, kwargs, max_iterations, context
231
239
  )
240
+ return result
241
+ finally:
242
+ await _cleanup_keep_alive_task(keep_alive_task)
243
+
244
+
245
+ async def _execute_iterations_loop(
246
+ job_id: str,
247
+ orchestrator: t.Any,
248
+ options: t.Any,
249
+ kwargs: dict[str, t.Any],
250
+ max_iterations: int,
251
+ context: t.Any,
252
+ ) -> dict[str, t.Any]:
253
+ """Execute the main iterations loop."""
254
+ for iteration in range(max_iterations):
255
+ _update_iteration_progress(job_id, iteration, max_iterations, context)
232
256
 
233
257
  try:
234
258
  success = await _execute_single_iteration(
@@ -236,13 +260,8 @@ async def _run_workflow_iterations(
236
260
  )
237
261
 
238
262
  if success:
239
- coverage_result = None
240
- if kwargs.get("boost_coverage", False):
241
- coverage_result = await _attempt_coverage_improvement(
242
- job_id, orchestrator, context
243
- )
244
- return _create_success_result(
245
- job_id, iteration + 1, context, coverage_result
263
+ return await _handle_iteration_success(
264
+ job_id, iteration, orchestrator, kwargs, context
246
265
  )
247
266
 
248
267
  if iteration < max_iterations - 1:
@@ -254,6 +273,77 @@ async def _run_workflow_iterations(
254
273
  return _create_failure_result(job_id, max_iterations, context)
255
274
 
256
275
 
276
+ def _update_iteration_progress(
277
+ job_id: str, iteration: int, max_iterations: int, context: t.Any
278
+ ) -> None:
279
+ """Update progress for current iteration."""
280
+ _update_progress(
281
+ job_id,
282
+ {
283
+ "type": "iteration",
284
+ "iteration": iteration + 1,
285
+ "max_iterations": max_iterations,
286
+ "status": "running",
287
+ },
288
+ context,
289
+ )
290
+
291
+
292
+ async def _handle_iteration_success(
293
+ job_id: str,
294
+ iteration: int,
295
+ orchestrator: t.Any,
296
+ kwargs: dict[str, t.Any],
297
+ context: t.Any,
298
+ ) -> dict[str, t.Any]:
299
+ """Handle successful iteration."""
300
+ coverage_result = None
301
+ if kwargs.get("boost_coverage", False):
302
+ coverage_result = await _attempt_coverage_improvement(
303
+ job_id, orchestrator, context
304
+ )
305
+ return _create_success_result(job_id, iteration + 1, context, coverage_result)
306
+
307
+
308
+ async def _cleanup_keep_alive_task(keep_alive_task: asyncio.Task[t.Any]) -> None:
309
+ """Clean up the keep-alive task."""
310
+ if not keep_alive_task.cancelled():
311
+ keep_alive_task.cancel()
312
+ try:
313
+ await keep_alive_task
314
+ except asyncio.CancelledError:
315
+ pass
316
+
317
+
318
+ async def _keep_alive_heartbeat(job_id: str, context: t.Any) -> None:
319
+ """Send periodic keep-alive messages to prevent TCP timeouts."""
320
+ try:
321
+ while True:
322
+ # Send heartbeat every 60 seconds (well under 2-minute TCP timeout)
323
+ await asyncio.sleep(60)
324
+ _update_progress(
325
+ job_id,
326
+ {
327
+ "type": "keep_alive",
328
+ "status": "heartbeat",
329
+ "timestamp": time.time(),
330
+ "message": "Keep-alive heartbeat to prevent connection timeout",
331
+ },
332
+ context,
333
+ )
334
+ except asyncio.CancelledError:
335
+ # Task was cancelled, cleanup
336
+ _update_progress(
337
+ job_id,
338
+ {
339
+ "type": "keep_alive",
340
+ "status": "cancelled",
341
+ "timestamp": time.time(),
342
+ },
343
+ context,
344
+ )
345
+
346
+
257
347
  def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
258
348
  from types import SimpleNamespace
259
349
 
@@ -0,0 +1,5 @@
1
+ """Common mixins for crackerjack components."""
2
+
3
+ from .error_handling import ErrorHandlingMixin
4
+
5
+ __all__ = ["ErrorHandlingMixin"]
@@ -0,0 +1,214 @@
1
+ """Common error handling patterns for crackerjack components."""
2
+
3
+ import subprocess
4
+ import typing as t
5
+ from pathlib import Path
6
+
7
+ from rich.console import Console
8
+
9
+
10
+ class ErrorHandlingMixin:
11
+ """Mixin providing common error handling patterns for crackerjack components."""
12
+
13
+ def __init__(self) -> None:
14
+ # These attributes should be provided by the class using the mixin
15
+ self.console: Console
16
+ self.logger: t.Any # Logger instance
17
+
18
+ def handle_subprocess_error(
19
+ self,
20
+ error: Exception,
21
+ command: list[str],
22
+ operation_name: str,
23
+ critical: bool = False,
24
+ ) -> bool:
25
+ """Handle subprocess errors with consistent logging and user feedback.
26
+
27
+ Args:
28
+ error: The exception that occurred
29
+ command: The command that failed
30
+ operation_name: Human-readable name of the operation
31
+ critical: Whether this is a critical error that should stop execution
32
+
33
+ Returns:
34
+ False to indicate failure
35
+ """
36
+ error_msg = f"{operation_name} failed: {error}"
37
+
38
+ # Log the error
39
+ if hasattr(self, "logger") and self.logger:
40
+ self.logger.error(
41
+ error_msg,
42
+ command=" ".join(command),
43
+ error_type=type(error).__name__,
44
+ critical=critical,
45
+ )
46
+
47
+ # Display user-friendly error message
48
+ if critical:
49
+ self.console.print(f"[red]🚨 CRITICAL: {error_msg}[/red]")
50
+ else:
51
+ self.console.print(f"[red]❌ {error_msg}[/red]")
52
+
53
+ return False
54
+
55
+ def handle_file_operation_error(
56
+ self,
57
+ error: Exception,
58
+ file_path: Path,
59
+ operation: str,
60
+ critical: bool = False,
61
+ ) -> bool:
62
+ """Handle file operation errors with consistent logging and user feedback.
63
+
64
+ Args:
65
+ error: The exception that occurred
66
+ file_path: The file that caused the error
67
+ operation: The operation that failed (e.g., "read", "write", "delete")
68
+ critical: Whether this is a critical error that should stop execution
69
+
70
+ Returns:
71
+ False to indicate failure
72
+ """
73
+ error_msg = f"Failed to {operation} {file_path}: {error}"
74
+
75
+ # Log the error
76
+ if hasattr(self, "logger") and self.logger:
77
+ self.logger.error(
78
+ error_msg,
79
+ file_path=str(file_path),
80
+ operation=operation,
81
+ error_type=type(error).__name__,
82
+ critical=critical,
83
+ )
84
+
85
+ # Display user-friendly error message
86
+ if critical:
87
+ self.console.print(f"[red]🚨 CRITICAL: {error_msg}[/red]")
88
+ else:
89
+ self.console.print(f"[red]❌ {error_msg}[/red]")
90
+
91
+ return False
92
+
93
+ def handle_timeout_error(
94
+ self,
95
+ operation_name: str,
96
+ timeout_seconds: float,
97
+ command: list[str] | None = None,
98
+ ) -> bool:
99
+ """Handle timeout errors with consistent logging and user feedback.
100
+
101
+ Args:
102
+ operation_name: Human-readable name of the operation
103
+ timeout_seconds: The timeout that was exceeded
104
+ command: Optional command that timed out
105
+
106
+ Returns:
107
+ False to indicate failure
108
+ """
109
+ error_msg = f"{operation_name} timed out after {timeout_seconds}s"
110
+
111
+ # Log the error
112
+ if hasattr(self, "logger") and self.logger:
113
+ self.logger.warning(
114
+ error_msg,
115
+ timeout=timeout_seconds,
116
+ command=" ".join(command) if command else None,
117
+ )
118
+
119
+ # Display user-friendly error message
120
+ self.console.print(f"[yellow]⏰ {error_msg}[/yellow]")
121
+
122
+ return False
123
+
124
+ def log_operation_success(
125
+ self,
126
+ operation_name: str,
127
+ details: dict[str, t.Any] | None = None,
128
+ ) -> None:
129
+ """Log successful operations with consistent formatting.
130
+
131
+ Args:
132
+ operation_name: Human-readable name of the operation
133
+ details: Optional additional details to log
134
+ """
135
+ if hasattr(self, "logger") and self.logger:
136
+ self.logger.info(
137
+ f"{operation_name} completed successfully", **(details or {})
138
+ )
139
+
140
+ def validate_required_tools(
141
+ self,
142
+ tools: dict[str, str],
143
+ operation_name: str,
144
+ ) -> bool:
145
+ """Validate that required external tools are available.
146
+
147
+ Args:
148
+ tools: Dict mapping tool names to their expected commands
149
+ operation_name: Name of operation requiring the tools
150
+
151
+ Returns:
152
+ True if all tools are available, False otherwise
153
+ """
154
+ missing_tools = []
155
+
156
+ for tool_name, command in tools.items():
157
+ try:
158
+ subprocess.run(
159
+ [command, "--version"],
160
+ capture_output=True,
161
+ check=True,
162
+ timeout=5,
163
+ )
164
+ except (
165
+ subprocess.CalledProcessError,
166
+ subprocess.TimeoutExpired,
167
+ FileNotFoundError,
168
+ ):
169
+ missing_tools.append(tool_name)
170
+
171
+ if missing_tools:
172
+ error_msg = f"Missing required tools for {operation_name}: {', '.join(missing_tools)}"
173
+
174
+ if hasattr(self, "logger") and self.logger:
175
+ self.logger.error(
176
+ error_msg,
177
+ missing_tools=missing_tools,
178
+ operation=operation_name,
179
+ )
180
+
181
+ self.console.print(f"[red]❌ {error_msg}[/red]")
182
+ return False
183
+
184
+ return True
185
+
186
+ def safe_get_attribute(
187
+ self,
188
+ obj: t.Any,
189
+ attribute: str,
190
+ default: t.Any = None,
191
+ operation_name: str = "attribute access",
192
+ ) -> t.Any:
193
+ """Safely get an attribute with error handling.
194
+
195
+ Args:
196
+ obj: Object to get attribute from
197
+ attribute: Name of attribute to get
198
+ default: Default value if attribute doesn't exist
199
+ operation_name: Name of operation for error logging
200
+
201
+ Returns:
202
+ The attribute value or default
203
+ """
204
+ try:
205
+ return getattr(obj, attribute, default)
206
+ except Exception as e:
207
+ if hasattr(self, "logger") and self.logger:
208
+ self.logger.warning(
209
+ f"Error accessing {attribute} during {operation_name}: {e}",
210
+ attribute=attribute,
211
+ operation=operation_name,
212
+ error_type=type(e).__name__,
213
+ )
214
+ return default
@@ -79,6 +79,14 @@ class EnterpriseConfig:
79
79
  organization: str | None = None
80
80
 
81
81
 
82
+ @dataclass
83
+ class MCPServerConfig:
84
+ http_port: int = 8676
85
+ http_host: str = "127.0.0.1"
86
+ websocket_port: int = 8675
87
+ http_enabled: bool = False
88
+
89
+
82
90
  @dataclass
83
91
  class WorkflowOptions:
84
92
  cleaning: CleaningConfig = field(default_factory=CleaningConfig)
@@ -91,3 +99,4 @@ class WorkflowOptions:
91
99
  progress: ProgressConfig = field(default_factory=ProgressConfig)
92
100
  cleanup: CleanupConfig = field(default_factory=CleanupConfig)
93
101
  enterprise: EnterpriseConfig = field(default_factory=EnterpriseConfig)
102
+ mcp_server: MCPServerConfig = field(default_factory=MCPServerConfig)
@@ -83,8 +83,12 @@ class GitInterface(t.Protocol):
83
83
 
84
84
  def add_files(self, files: list[str]) -> bool: ...
85
85
 
86
+ def add_all_files(self) -> bool: ...
87
+
86
88
  def get_commit_message_suggestions(self, changed_files: list[str]) -> list[str]: ...
87
89
 
90
+ def get_unpushed_commit_count(self) -> int: ...
91
+
88
92
 
89
93
  @t.runtime_checkable
90
94
  class HookManager(t.Protocol):
@@ -99,6 +103,116 @@ class HookManager(t.Protocol):
99
103
  def get_hook_summary(self, results: t.Any) -> t.Any: ...
100
104
 
101
105
 
106
+ @t.runtime_checkable
107
+ class SecurityAwareHookManager(HookManager, t.Protocol):
108
+ """Security-aware hook manager that tracks security-critical failures."""
109
+
110
+ def get_security_critical_failures(self, results: list[t.Any]) -> list[t.Any]:
111
+ """Extract security-critical failures from hook results.
112
+
113
+ Args:
114
+ results: List of hook results from run_fast_hooks or run_comprehensive_hooks
115
+
116
+ Returns:
117
+ List of results that are security-critical and failed
118
+ """
119
+ ...
120
+
121
+ def has_security_critical_failures(self, results: list[t.Any]) -> bool:
122
+ """Check if any security-critical hooks failed.
123
+
124
+ Args:
125
+ results: List of hook results
126
+
127
+ Returns:
128
+ True if any CRITICAL security level hooks failed
129
+ """
130
+ ...
131
+
132
+ def get_security_audit_report(
133
+ self, fast_results: list[t.Any], comprehensive_results: list[t.Any]
134
+ ) -> dict[str, t.Any]:
135
+ """Generate security audit report for publishing decisions.
136
+
137
+ Args:
138
+ fast_results: Results from fast hooks
139
+ comprehensive_results: Results from comprehensive hooks
140
+
141
+ Returns:
142
+ Dict containing security status, failed critical checks, and recommendations
143
+ """
144
+ ...
145
+
146
+
147
+ @t.runtime_checkable
148
+ class CoverageRatchetProtocol(t.Protocol):
149
+ """Protocol for coverage ratchet service."""
150
+
151
+ def get_baseline_coverage(self) -> float: ...
152
+
153
+ def update_baseline_coverage(self, new_coverage: float) -> bool: ...
154
+
155
+ def is_coverage_regression(self, current_coverage: float) -> bool: ...
156
+
157
+ def get_coverage_improvement_needed(self) -> float: ...
158
+
159
+ def get_status_report(self) -> dict[str, t.Any]: ...
160
+
161
+ def get_coverage_report(self) -> str | None: ...
162
+
163
+ def check_and_update_coverage(self) -> dict[str, t.Any]: ...
164
+
165
+
166
+ @t.runtime_checkable
167
+ class ConfigurationServiceProtocol(t.Protocol):
168
+ """Protocol for configuration service."""
169
+
170
+ def update_precommit_config(self, options: OptionsProtocol) -> bool: ...
171
+
172
+ def update_pyproject_config(self, options: OptionsProtocol) -> bool: ...
173
+
174
+ def get_temp_config_path(self) -> str | None: ...
175
+
176
+
177
+ @t.runtime_checkable
178
+ class SecurityServiceProtocol(t.Protocol):
179
+ """Protocol for security service."""
180
+
181
+ def validate_file_safety(self, path: str | Path) -> bool: ...
182
+
183
+ def check_hardcoded_secrets(self, content: str) -> list[dict[str, t.Any]]: ...
184
+
185
+ def is_safe_subprocess_call(self, cmd: list[str]) -> bool: ...
186
+
187
+ def create_secure_command_env(self) -> dict[str, str]: ...
188
+
189
+ def mask_tokens(self, text: str) -> str: ...
190
+
191
+ def validate_token_format(self, token: str, token_type: str) -> bool: ...
192
+
193
+
194
+ @t.runtime_checkable
195
+ class InitializationServiceProtocol(t.Protocol):
196
+ """Protocol for initialization service."""
197
+
198
+ def initialize_project(self, project_path: str | Path) -> bool: ...
199
+
200
+ def validate_project_structure(self) -> bool: ...
201
+
202
+ def setup_git_hooks(self) -> bool: ...
203
+
204
+
205
+ @t.runtime_checkable
206
+ class UnifiedConfigurationServiceProtocol(t.Protocol):
207
+ """Protocol for unified configuration service."""
208
+
209
+ def merge_configurations(self) -> dict[str, t.Any]: ...
210
+
211
+ def validate_configuration(self, config: dict[str, t.Any]) -> bool: ...
212
+
213
+ def get_merged_config(self) -> dict[str, t.Any]: ...
214
+
215
+
102
216
  @t.runtime_checkable
103
217
  class TestManagerProtocol(t.Protocol):
104
218
  def run_tests(self, options: OptionsProtocol) -> bool: ...
@@ -56,10 +56,13 @@ class TaskStatusData:
56
56
  details: str | None = None
57
57
  error_message: str | None = None
58
58
  files_changed: list[str] | None = None
59
+ hook_results: list[t.Any] | None = None
59
60
 
60
61
  def __post_init__(self) -> None:
61
62
  if self.files_changed is None:
62
63
  self.files_changed = []
64
+ if self.hook_results is None:
65
+ self.hook_results = []
63
66
  if self.start_time is not None and self.end_time is not None:
64
67
  self.duration = self.end_time - self.start_time
65
68
 
@@ -0,0 +1 @@
1
+ """Security utilities for Crackerjack."""