crackerjack 0.37.9__py3-none-any.whl → 0.38.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

@@ -485,44 +485,74 @@ class ReferenceGenerator:
485
485
  Returns:
486
486
  Enhanced commands with workflow info
487
487
  """
488
- # Define common workflow patterns
489
- workflow_patterns = {
488
+ workflow_patterns = self._get_workflow_patterns()
489
+
490
+ for command in commands.values():
491
+ self._assign_command_workflows(command, workflow_patterns)
492
+ self._add_ai_context_to_command(command)
493
+
494
+ return commands
495
+
496
+ def _get_workflow_patterns(self) -> dict[str, list[str]]:
497
+ """Get workflow patterns for command categorization.
498
+
499
+ Returns:
500
+ Dictionary mapping workflow names to pattern lists
501
+ """
502
+ return {
490
503
  "development": ["test", "format", "lint", "type-check"],
491
504
  "release": ["version", "build", "publish", "tag"],
492
505
  "maintenance": ["clean", "update", "optimize", "backup"],
493
506
  "monitoring": ["status", "health", "metrics", "logs"],
494
507
  }
495
508
 
496
- for command in commands.values():
497
- # Assign workflows based on command name patterns
498
- for workflow, patterns in workflow_patterns.items():
499
- if any(pattern in command.name for pattern in patterns):
500
- command.common_workflows.append(workflow)
501
-
502
- # Add AI context based on command purpose
503
- if "test" in command.name:
504
- command.ai_context.update(
505
- {
506
- "purpose": "quality_assurance",
507
- "automation_level": "high",
508
- "ai_agent_compatible": True,
509
- }
510
- )
511
- command.success_patterns.append("All tests passed")
512
- command.failure_patterns.append("Test failures detected")
513
-
514
- elif "format" in command.name or "lint" in command.name:
515
- command.ai_context.update(
516
- {
517
- "purpose": "code_quality",
518
- "automation_level": "high",
519
- "ai_agent_compatible": True,
520
- }
521
- )
522
- command.success_patterns.append("No formatting issues")
523
- command.failure_patterns.append("Style violations found")
509
+ def _assign_command_workflows(
510
+ self, command: CommandInfo, workflow_patterns: dict[str, list[str]]
511
+ ) -> None:
512
+ """Assign workflows to a command based on name patterns.
524
513
 
525
- return commands
514
+ Args:
515
+ command: Command to assign workflows to
516
+ workflow_patterns: Workflow patterns to match against
517
+ """
518
+ for workflow, patterns in workflow_patterns.items():
519
+ if any(pattern in command.name for pattern in patterns):
520
+ command.common_workflows.append(workflow)
521
+
522
+ def _add_ai_context_to_command(self, command: CommandInfo) -> None:
523
+ """Add AI context to a command based on its purpose.
524
+
525
+ Args:
526
+ command: Command to enhance with AI context
527
+ """
528
+ if "test" in command.name:
529
+ self._add_test_ai_context(command)
530
+ elif "format" in command.name or "lint" in command.name:
531
+ self._add_quality_ai_context(command)
532
+
533
+ def _add_test_ai_context(self, command: CommandInfo) -> None:
534
+ """Add AI context for test-related commands."""
535
+ command.ai_context.update(
536
+ {
537
+ "purpose": "quality_assurance",
538
+ "automation_level": "high",
539
+ "ai_agent_compatible": True,
540
+ }
541
+ )
542
+ command.success_patterns.append("All tests passed")
543
+ command.failure_patterns.append("Test failures detected")
544
+
545
+ def _add_quality_ai_context(self, command: CommandInfo) -> None:
546
+ """Add AI context for code quality commands."""
547
+ command.ai_context.update(
548
+ {
549
+ "purpose": "code_quality",
550
+ "automation_level": "high",
551
+ "ai_agent_compatible": True,
552
+ }
553
+ )
554
+ command.success_patterns.append("No formatting issues")
555
+ command.failure_patterns.append("Style violations found")
526
556
 
527
557
  def _categorize_commands(
528
558
  self, commands: dict[str, CommandInfo]
@@ -536,8 +566,18 @@ class ReferenceGenerator:
536
566
  Dictionary of category to command names
537
567
  """
538
568
  categories: dict[str, list[str]] = {}
569
+ category_patterns = self._get_category_patterns()
570
+
571
+ for command in commands.values():
572
+ category = self._determine_command_category(command, category_patterns)
573
+ command.category = category
574
+ self._add_command_to_category(categories, category, command.name)
575
+
576
+ return categories
539
577
 
540
- category_patterns = {
578
+ def _get_category_patterns(self) -> dict[str, list[str]]:
579
+ """Get category patterns for command classification."""
580
+ return {
541
581
  "development": ["test", "format", "lint", "check", "run"],
542
582
  "server": ["server", "start", "stop", "restart", "monitor"],
543
583
  "release": ["version", "bump", "publish", "build", "tag"],
@@ -545,27 +585,22 @@ class ReferenceGenerator:
545
585
  "utilities": ["clean", "help", "info", "status"],
546
586
  }
547
587
 
548
- for command in commands.values():
549
- assigned = False
550
-
551
- # Assign based on patterns
552
- for category, patterns in category_patterns.items():
553
- if any(pattern in command.name for pattern in patterns):
554
- command.category = category
555
- if category not in categories:
556
- categories[category] = []
557
- categories[category].append(command.name)
558
- assigned = True
559
- break
560
-
561
- # Default category
562
- if not assigned:
563
- command.category = "general"
564
- if "general" not in categories:
565
- categories["general"] = []
566
- categories["general"].append(command.name)
567
-
568
- return categories
588
+ def _determine_command_category(
589
+ self, command: CommandInfo, category_patterns: dict[str, list[str]]
590
+ ) -> str:
591
+ """Determine the category for a command based on patterns."""
592
+ for category, patterns in category_patterns.items():
593
+ if any(pattern in command.name for pattern in patterns):
594
+ return category
595
+ return "general"
596
+
597
+ def _add_command_to_category(
598
+ self, categories: dict[str, list[str]], category: str, command_name: str
599
+ ) -> None:
600
+ """Add command to the specified category."""
601
+ if category not in categories:
602
+ categories[category] = []
603
+ categories[category].append(command_name)
569
604
 
570
605
  def _generate_workflows(
571
606
  self, commands: dict[str, CommandInfo]
@@ -663,7 +698,9 @@ class ReferenceGenerator:
663
698
  """Render command categories for markdown."""
664
699
  category_lines = []
665
700
  for category, command_names in reference.categories.items():
666
- category_section = self._render_markdown_category(category, reference.commands, command_names)
701
+ category_section = self._render_markdown_category(
702
+ category, reference.commands, command_names
703
+ )
667
704
  category_lines.extend(category_section)
668
705
  return category_lines
669
706
 
@@ -733,7 +770,9 @@ class ReferenceGenerator:
733
770
 
734
771
  # Add related commands section
735
772
  if command.related_commands:
736
- related_lines = self._render_command_related_markdown(command.related_commands)
773
+ related_lines = self._render_command_related_markdown(
774
+ command.related_commands
775
+ )
737
776
  lines.extend(related_lines)
738
777
 
739
778
  return lines
@@ -813,9 +852,11 @@ class ReferenceGenerator:
813
852
  def _render_html(self, reference: CommandReference) -> str:
814
853
  """Render reference as HTML."""
815
854
  html_parts = [
816
- self._render_html_header(reference.generated_at.strftime("%Y-%m-%d %H:%M:%S")),
855
+ self._render_html_header(
856
+ reference.generated_at.strftime("%Y-%m-%d %H:%M:%S")
857
+ ),
817
858
  self._render_html_commands(reference),
818
- "</body></html>"
859
+ "</body></html>",
819
860
  ]
820
861
  return "".join(html_parts)
821
862
 
@@ -842,7 +883,9 @@ class ReferenceGenerator:
842
883
  """Render HTML commands by category."""
843
884
  html_parts = []
844
885
  for category, command_names in reference.categories.items():
845
- category_html = self._render_html_category(category, reference.commands, command_names)
886
+ category_html = self._render_html_category(
887
+ category, reference.commands, command_names
888
+ )
846
889
  html_parts.append(category_html)
847
890
  return "".join(html_parts)
848
891
 
@@ -918,7 +961,9 @@ class ReferenceGenerator:
918
961
  "aliases": command.aliases,
919
962
  }
920
963
 
921
- def _serialize_parameters(self, parameters: list[ParameterInfo]) -> list[dict[str, t.Any]]:
964
+ def _serialize_parameters(
965
+ self, parameters: list[ParameterInfo]
966
+ ) -> list[dict[str, t.Any]]:
922
967
  """Serialize parameters for JSON output."""
923
968
  return [self._serialize_parameter(param) for param in parameters]
924
969
 
@@ -192,7 +192,7 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
192
192
  "stages": ["pre-push", "manual"],
193
193
  "args": ["-c", "pyproject.toml", "-r", "-ll"],
194
194
  "files": "^crackerjack/.*\\.py$",
195
- "exclude": None,
195
+ "exclude": r"^tests/",
196
196
  "additional_dependencies": None,
197
197
  "types_or": None,
198
198
  "language": None,
@@ -284,9 +284,9 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
284
284
  "tier": 3,
285
285
  "time_estimate": 0.1,
286
286
  "stages": ["pre-push", "manual"],
287
- "args": ["crackerjack"],
287
+ "args": ["crackerjack", "--exclude", "tests"],
288
288
  "files": None,
289
- "exclude": None,
289
+ "exclude": r"^tests/",
290
290
  "additional_dependencies": None,
291
291
  "types_or": None,
292
292
  "language": "system",
@@ -338,9 +338,9 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
338
338
  "tier": 3,
339
339
  "time_estimate": 3.0,
340
340
  "stages": ["pre-push", "manual"],
341
- "args": ["--ignore", "FURB184", "--ignore", "FURB120"],
341
+ "args": [],
342
342
  "files": "^crackerjack/.*\\.py$",
343
- "exclude": r"^tests/.*\.py$",
343
+ "exclude": r"^tests/",
344
344
  "additional_dependencies": None,
345
345
  "types_or": None,
346
346
  "language": None,
@@ -358,7 +358,7 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
358
358
  "stages": ["pre-push", "manual"],
359
359
  "args": ["--config-file", "mypy.ini", "./crackerjack"],
360
360
  "files": None,
361
- "exclude": None,
361
+ "exclude": r"^tests/",
362
362
  "additional_dependencies": None,
363
363
  "types_or": None,
364
364
  "language": "system",
@@ -544,7 +544,7 @@ class DynamicConfigGenerator:
544
544
  """Update hook configuration to use the detected package directory."""
545
545
  # Update skylos hook
546
546
  if hook["id"] == "skylos" and hook["args"]:
547
- hook["args"] = [self.package_directory]
547
+ hook["args"] = [self.package_directory, "--exclude", "tests"]
548
548
 
549
549
  # Update zuban hook
550
550
  elif hook["id"] == "zuban" and hook["args"]:
@@ -566,12 +566,24 @@ class DynamicConfigGenerator:
566
566
  "crackerjack", self.package_directory
567
567
  )
568
568
 
569
- # Ensure hooks exclude src directories to avoid JavaScript conflicts
569
+ # Ensure hooks exclude src directories to avoid JavaScript conflicts and tests
570
570
  if hook["exclude"]:
571
+ # Add src exclusion if not present
571
572
  if "src/" not in hook["exclude"]:
572
573
  hook["exclude"] = f"{hook['exclude']}|^src/"
573
574
  else:
574
- hook["exclude"] = "^src/"
575
+ # If no exclusion, add both tests and src
576
+ if hook["id"] in (
577
+ "skylos",
578
+ "zuban",
579
+ "bandit",
580
+ "refurb",
581
+ "complexipy",
582
+ "pyright",
583
+ ):
584
+ hook["exclude"] = r"^tests/|^src/"
585
+ else:
586
+ hook["exclude"] = "^src/"
575
587
 
576
588
  return hook
577
589
 
@@ -24,7 +24,7 @@ class TaskStatus(Enum):
24
24
 
25
25
 
26
26
  @dataclass
27
- class WorkflowOptions:
27
+ class InteractiveWorkflowOptions:
28
28
  clean: bool = False
29
29
  test: bool = False
30
30
  publish: str | None = None
@@ -35,7 +35,7 @@ class WorkflowOptions:
35
35
  dry_run: bool = False
36
36
 
37
37
  @classmethod
38
- def from_args(cls, args: t.Any) -> "WorkflowOptions":
38
+ def from_args(cls, args: t.Any) -> "InteractiveWorkflowOptions":
39
39
  return cls(
40
40
  clean=getattr(args, "clean", False),
41
41
  test=getattr(args, "test", False),
@@ -399,7 +399,7 @@ class InteractiveCLI:
399
399
 
400
400
  self.logger = logging.getLogger("crackerjack.interactive.cli")
401
401
 
402
- def create_dynamic_workflow(self, options: WorkflowOptions) -> None:
402
+ def create_dynamic_workflow(self, options: InteractiveWorkflowOptions) -> None:
403
403
  builder = WorkflowBuilder(self.console)
404
404
 
405
405
  workflow_steps = [
@@ -581,7 +581,7 @@ class InteractiveCLI:
581
581
  or last_task
582
582
  )
583
583
 
584
- def run_interactive_workflow(self, options: WorkflowOptions) -> bool:
584
+ def run_interactive_workflow(self, options: InteractiveWorkflowOptions) -> bool:
585
585
  self.logger.info(
586
586
  f"Starting interactive workflow with options: {options.__dict__}",
587
587
  )
@@ -682,7 +682,9 @@ def launch_interactive_cli(version: str, options: t.Any = None) -> None:
682
682
  console.print()
683
683
 
684
684
  workflow_options = (
685
- WorkflowOptions.from_args(options) if options else WorkflowOptions()
685
+ InteractiveWorkflowOptions.from_args(options)
686
+ if options
687
+ else InteractiveWorkflowOptions()
686
688
  )
687
689
  cli.create_dynamic_workflow(workflow_options)
688
690
  cli.run_interactive_workflow(workflow_options)
@@ -156,7 +156,7 @@ class PublishManagerImpl:
156
156
  self.console.print(f"[red]❌[/ red] Version bump failed: {e}")
157
157
  raise
158
158
 
159
- def _prompt_for_version_type(self, recommendation=None) -> str:
159
+ def _prompt_for_version_type(self, recommendation: t.Any = None) -> str:
160
160
  try:
161
161
  from rich.prompt import Prompt
162
162
 
@@ -178,7 +178,7 @@ class PublishManagerImpl:
178
178
  )
179
179
  return "patch"
180
180
 
181
- def _get_version_recommendation(self):
181
+ def _get_version_recommendation(self) -> t.Any:
182
182
  """Get AI-powered version bump recommendation based on git history."""
183
183
  try:
184
184
  import asyncio
@@ -217,7 +217,7 @@ class PublishManagerImpl:
217
217
  self.console.print(f"[yellow]⚠️[/yellow] Version analysis failed: {e}")
218
218
  return None
219
219
 
220
- def _display_version_analysis(self, recommendation):
220
+ def _display_version_analysis(self, recommendation: t.Any) -> None:
221
221
  """Display version analysis in a compact format."""
222
222
  if not recommendation:
223
223
  return
@@ -132,21 +132,54 @@ class TestManager:
132
132
  try:
133
133
  status = self.coverage_ratchet.get_status_report()
134
134
 
135
- if status.get("status") == "not_initialized":
135
+ # Check if we have actual coverage data from coverage.json even if ratchet is not initialized
136
+ coverage_json_path = self.pkg_path / "coverage.json"
137
+ direct_coverage = None
138
+
139
+ if coverage_json_path.exists():
140
+ try:
141
+ import json
142
+
143
+ with coverage_json_path.open() as f:
144
+ data = json.load(f)
145
+ direct_coverage = data.get("totals", {}).get("percent_covered")
146
+ except (json.JSONDecodeError, KeyError):
147
+ pass # Fall back to ratchet data
148
+
149
+ # If ratchet is not initialized but we have direct coverage data, use it
150
+ if (
151
+ not status or status.get("status") == "not_initialized"
152
+ ) and direct_coverage is not None:
153
+ return {
154
+ "status": "coverage_available",
155
+ "coverage_percent": direct_coverage,
156
+ "message": "Coverage data available from coverage.json",
157
+ "source": "coverage.json",
158
+ }
159
+
160
+ # If ratchet is not initialized and no direct coverage, return not initialized
161
+ if not status or status.get("status") == "not_initialized":
136
162
  return {
137
163
  "status": "not_initialized",
138
164
  "coverage_percent": 0.0,
139
165
  "message": "Coverage ratchet not initialized",
140
166
  }
141
167
 
168
+ # Use ratchet data, but prefer direct coverage if available and different
169
+ ratchet_coverage = status.get("current_coverage", 0.0)
170
+ final_coverage = (
171
+ direct_coverage if direct_coverage is not None else ratchet_coverage
172
+ )
173
+
142
174
  return {
143
175
  "status": "active",
144
- "coverage_percent": status.get("current_coverage", 0.0),
176
+ "coverage_percent": final_coverage,
145
177
  "target_coverage": status.get("target_coverage", 100.0),
146
178
  "next_milestone": status.get("next_milestone"),
147
179
  "progress_percent": status.get("progress_percent", 0.0),
148
180
  "last_updated": status.get("last_updated"),
149
181
  "milestones_achieved": status.get("milestones_achieved", []),
182
+ "source": "coverage.json" if direct_coverage is not None else "ratchet",
150
183
  }
151
184
  except Exception as e:
152
185
  return {
@@ -235,25 +268,64 @@ class TestManager:
235
268
  current_coverage = None
236
269
  coverage_json_path = self.pkg_path / "coverage.json"
237
270
 
271
+ # Primary: Try to extract from coverage.json
238
272
  if coverage_json_path.exists():
239
- with coverage_json_path.open() as f:
240
- data = json.load(f)
241
- current_coverage = data.get("totals", {}).get("percent_covered")
273
+ try:
274
+ with coverage_json_path.open() as f:
275
+ data = json.load(f)
276
+ current_coverage = data.get("totals", {}).get("percent_covered")
277
+ if current_coverage is not None:
278
+ self.console.print(
279
+ f"[dim]📊 Coverage extracted from coverage.json: {current_coverage:.2f}%[/dim]"
280
+ )
281
+ except (json.JSONDecodeError, KeyError) as e:
282
+ self.console.print(
283
+ f"[yellow]⚠️[/yellow] Failed to parse coverage.json: {e}"
284
+ )
242
285
 
243
- # Fallback to ratchet result if coverage.json not available
286
+ # Secondary: Try ratchet result if coverage.json failed
244
287
  if current_coverage is None:
245
288
  current_coverage = ratchet_result.get("current_coverage")
289
+ if current_coverage is not None:
290
+ self.console.print(
291
+ f"[dim]📊 Coverage from ratchet result: {current_coverage:.2f}%[/dim]"
292
+ )
246
293
 
247
- # Final fallback to coverage service
294
+ # Tertiary: Try coverage service, but only accept non-zero values
248
295
  if current_coverage is None:
249
296
  coverage_info = self.get_coverage()
250
- current_coverage = coverage_info.get("coverage_percent")
297
+ fallback_coverage = coverage_info.get("coverage_percent")
298
+ # Only use fallback if it's meaningful (>0) or if no coverage.json exists
299
+ if fallback_coverage and (
300
+ fallback_coverage > 0 or not coverage_json_path.exists()
301
+ ):
302
+ current_coverage = fallback_coverage
303
+ self.console.print(
304
+ f"[dim]📊 Coverage from service fallback: {current_coverage:.2f}%[/dim]"
305
+ )
306
+ else:
307
+ self.console.print(
308
+ "[yellow]⚠️[/yellow] Skipping 0.0% fallback when coverage.json exists"
309
+ )
251
310
 
252
- if current_coverage is not None:
311
+ # Only update badge if we have valid coverage data
312
+ if current_coverage is not None and current_coverage >= 0:
253
313
  if self._coverage_badge_service.should_update_badge(current_coverage):
254
314
  self._coverage_badge_service.update_readme_coverage_badge(
255
315
  current_coverage
256
316
  )
317
+ self.console.print(
318
+ f"[green]✅[/green] Badge updated to {current_coverage:.2f}%"
319
+ )
320
+ else:
321
+ self.console.print(
322
+ f"[dim]📊 Badge unchanged (current: {current_coverage:.2f}%)[/dim]"
323
+ )
324
+ else:
325
+ self.console.print(
326
+ "[yellow]⚠️[/yellow] No valid coverage data found for badge update"
327
+ )
328
+
257
329
  except Exception as e:
258
330
  # Don't fail the test process if badge update fails
259
331
  self.console.print(f"[yellow]⚠️[/yellow] Badge update failed: {e}")
@@ -5,10 +5,20 @@ import typing as t
5
5
  from collections.abc import Callable
6
6
  from pathlib import Path
7
7
 
8
+ # Type aliases for watchdog types
9
+ FileSystemEvent: t.Any
10
+ FileSystemEventHandler: t.Any
11
+ Observer: t.Any
12
+ WATCHDOG_AVAILABLE: bool
13
+
8
14
  try:
9
- from watchdog.events import FileSystemEvent, FileSystemEventHandler
10
- from watchdog.observers import Observer
15
+ from watchdog.events import FileSystemEvent as WatchdogFileSystemEvent
16
+ from watchdog.events import FileSystemEventHandler as WatchdogFileSystemEventHandler
17
+ from watchdog.observers import Observer as WatchdogObserver
11
18
 
19
+ FileSystemEvent = WatchdogFileSystemEvent
20
+ FileSystemEventHandler = WatchdogFileSystemEventHandler
21
+ Observer = WatchdogObserver
12
22
  WATCHDOG_AVAILABLE = True
13
23
  except ImportError:
14
24
  # Type stubs for when watchdog is not available
@@ -35,14 +35,18 @@ class RateLimiter:
35
35
  self.requests_per_hour = requests_per_hour
36
36
 
37
37
  self.minute_windows: dict[str, deque[float]] = defaultdict(
38
- lambda: deque(maxlen=requests_per_minute), # type: ignore[misc]
38
+ lambda: deque[float](maxlen=requests_per_minute), # type: ignore[arg-type,misc]
39
39
  )
40
40
  self.hour_windows: dict[str, deque[float]] = defaultdict(
41
- lambda: deque(maxlen=requests_per_hour), # type: ignore[misc]
41
+ lambda: deque[float](maxlen=requests_per_hour), # type: ignore[arg-type,misc]
42
42
  )
43
43
 
44
- self.global_minute_window: deque[float] = deque(maxlen=requests_per_minute * 10)
45
- self.global_hour_window: deque[float] = deque(maxlen=requests_per_hour * 10)
44
+ self.global_minute_window: deque[float] = deque[float](
45
+ maxlen=requests_per_minute * 10
46
+ )
47
+ self.global_hour_window: deque[float] = deque[float](
48
+ maxlen=requests_per_hour * 10
49
+ )
46
50
 
47
51
  self._lock = asyncio.Lock()
48
52
 
@@ -126,7 +130,7 @@ class RateLimiter:
126
130
  self._remove_expired_entries(self.global_minute_window, minute_cutoff)
127
131
  self._remove_expired_entries(self.global_hour_window, hour_cutoff)
128
132
 
129
- def _remove_expired_entries(self, window: deque, cutoff: float) -> None:
133
+ def _remove_expired_entries(self, window: deque[float], cutoff: float) -> None:
130
134
  while window and window[0] < cutoff:
131
135
  window.popleft()
132
136
 
@@ -3,6 +3,7 @@ import socket
3
3
  import subprocess
4
4
  import sys
5
5
  import time
6
+ from contextlib import suppress
6
7
  from typing import Any
7
8
 
8
9
  import aiohttp
@@ -34,7 +35,7 @@ class ServiceConfig:
34
35
  self.max_restarts = max_restarts
35
36
  self.restart_window = restart_window
36
37
 
37
- self.process: subprocess.Popen[bytes] | None = None
38
+ self.process: subprocess.Popen[str] | None = None
38
39
  self.restart_count = 0
39
40
  self.restart_timestamps: list[float] = []
40
41
  self.last_health_check = 0.0
@@ -125,7 +126,7 @@ class ServiceWatchdog:
125
126
  return False
126
127
 
127
128
  async def _launch_service_process(self, service: ServiceConfig) -> bool:
128
- service.process = subprocess.Popen(
129
+ service.process = subprocess.Popen[str](
129
130
  service.command,
130
131
  stdout=subprocess.PIPE,
131
132
  stderr=subprocess.PIPE,
@@ -137,6 +138,8 @@ class ServiceWatchdog:
137
138
  return await self._check_process_startup_success(service)
138
139
 
139
140
  async def _check_process_startup_success(self, service: ServiceConfig) -> bool:
141
+ if service.process is None:
142
+ return False
140
143
  exit_code = service.process.poll()
141
144
  if exit_code is not None:
142
145
  return await self._handle_process_died(service, exit_code)
@@ -147,6 +150,8 @@ class ServiceWatchdog:
147
150
  service: ServiceConfig,
148
151
  exit_code: int,
149
152
  ) -> bool:
153
+ if service.process is None:
154
+ return False
150
155
  stdout, stderr = service.process.communicate()
151
156
  error_msg = f"Process died (exit: {exit_code})"
152
157
  if stderr and stderr.strip():
@@ -441,8 +446,6 @@ class ServiceWatchdog:
441
446
  message: str,
442
447
  ) -> None:
443
448
  if self.event_queue:
444
- from contextlib import suppress
445
-
446
449
  with suppress(Exception):
447
450
  event = {
448
451
  "type": event_type,
@@ -147,7 +147,7 @@ async def get_smart_agent_recommendation(
147
147
  context=task_context,
148
148
  )
149
149
 
150
- response = {
150
+ response: dict[str, t.Any] = {
151
151
  "task_description": task_description,
152
152
  "context_type": context_type,
153
153
  }
@@ -119,7 +119,7 @@ def _create_validation_results(file_path: str) -> dict[str, t.Any]:
119
119
 
120
120
 
121
121
  def _create_pattern_suggestions(problem_context: str) -> dict[str, t.Any]:
122
- pattern_suggestions = {
122
+ pattern_suggestions: dict[str, t.Any] = {
123
123
  "context": problem_context,
124
124
  "recommended_patterns": [],
125
125
  "implementation_guidance": [],
@@ -262,10 +262,10 @@ class OrchestrationPlanner:
262
262
  hook_plans=hook_plans,
263
263
  test_plan=test_plan,
264
264
  ai_plan=ai_plan,
265
- estimated_total_duration=sum(
266
- int(plan["estimated_duration"]) for plan in hook_plans
265
+ estimated_total_duration=float(
266
+ sum(int(float(str(plan["estimated_duration"]))) for plan in hook_plans)
267
267
  )
268
- + test_plan["estimated_duration"],
268
+ + float(test_plan["estimated_duration"]),
269
269
  )
270
270
 
271
271
  def _estimate_strategy_duration(self, strategy: HookStrategy) -> float:
@@ -131,9 +131,10 @@ class CustomHookPlugin(HookPluginBase):
131
131
  try:
132
132
  if hook_def.command is None:
133
133
  return HookResult(
134
+ id=f"hook_{hook_def.name}",
134
135
  name=hook_def.name,
135
136
  status="failed",
136
- message="Hook command is None",
137
+ issues_found=["Hook command is None"],
137
138
  duration=0.0,
138
139
  )
139
140
  cmd = hook_def.command.copy()