specfact-cli 0.4.2__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. specfact_cli/__init__.py +1 -1
  2. specfact_cli/agents/analyze_agent.py +2 -3
  3. specfact_cli/analyzers/__init__.py +2 -1
  4. specfact_cli/analyzers/ambiguity_scanner.py +601 -0
  5. specfact_cli/analyzers/code_analyzer.py +462 -30
  6. specfact_cli/analyzers/constitution_evidence_extractor.py +491 -0
  7. specfact_cli/analyzers/contract_extractor.py +419 -0
  8. specfact_cli/analyzers/control_flow_analyzer.py +281 -0
  9. specfact_cli/analyzers/requirement_extractor.py +337 -0
  10. specfact_cli/analyzers/test_pattern_extractor.py +330 -0
  11. specfact_cli/cli.py +151 -206
  12. specfact_cli/commands/constitution.py +281 -0
  13. specfact_cli/commands/enforce.py +42 -34
  14. specfact_cli/commands/import_cmd.py +481 -152
  15. specfact_cli/commands/init.py +224 -55
  16. specfact_cli/commands/plan.py +2133 -547
  17. specfact_cli/commands/repro.py +100 -78
  18. specfact_cli/commands/sync.py +701 -186
  19. specfact_cli/enrichers/constitution_enricher.py +765 -0
  20. specfact_cli/enrichers/plan_enricher.py +294 -0
  21. specfact_cli/importers/speckit_converter.py +364 -48
  22. specfact_cli/importers/speckit_scanner.py +65 -0
  23. specfact_cli/models/plan.py +42 -0
  24. specfact_cli/resources/mappings/node-async.yaml +49 -0
  25. specfact_cli/resources/mappings/python-async.yaml +47 -0
  26. specfact_cli/resources/mappings/speckit-default.yaml +82 -0
  27. specfact_cli/resources/prompts/specfact-enforce.md +185 -0
  28. specfact_cli/resources/prompts/specfact-import-from-code.md +626 -0
  29. specfact_cli/resources/prompts/specfact-plan-add-feature.md +188 -0
  30. specfact_cli/resources/prompts/specfact-plan-add-story.md +212 -0
  31. specfact_cli/resources/prompts/specfact-plan-compare.md +571 -0
  32. specfact_cli/resources/prompts/specfact-plan-init.md +531 -0
  33. specfact_cli/resources/prompts/specfact-plan-promote.md +352 -0
  34. specfact_cli/resources/prompts/specfact-plan-review.md +1276 -0
  35. specfact_cli/resources/prompts/specfact-plan-select.md +401 -0
  36. specfact_cli/resources/prompts/specfact-plan-update-feature.md +242 -0
  37. specfact_cli/resources/prompts/specfact-plan-update-idea.md +211 -0
  38. specfact_cli/resources/prompts/specfact-repro.md +268 -0
  39. specfact_cli/resources/prompts/specfact-sync.md +497 -0
  40. specfact_cli/resources/schemas/deviation.schema.json +61 -0
  41. specfact_cli/resources/schemas/plan.schema.json +204 -0
  42. specfact_cli/resources/schemas/protocol.schema.json +53 -0
  43. specfact_cli/resources/templates/github-action.yml.j2 +140 -0
  44. specfact_cli/resources/templates/plan.bundle.yaml.j2 +141 -0
  45. specfact_cli/resources/templates/pr-template.md.j2 +58 -0
  46. specfact_cli/resources/templates/protocol.yaml.j2 +24 -0
  47. specfact_cli/resources/templates/telemetry.yaml.example +35 -0
  48. specfact_cli/sync/__init__.py +10 -1
  49. specfact_cli/sync/watcher.py +268 -0
  50. specfact_cli/telemetry.py +440 -0
  51. specfact_cli/utils/acceptance_criteria.py +127 -0
  52. specfact_cli/utils/enrichment_parser.py +445 -0
  53. specfact_cli/utils/feature_keys.py +12 -3
  54. specfact_cli/utils/ide_setup.py +170 -0
  55. specfact_cli/utils/structure.py +179 -2
  56. specfact_cli/utils/yaml_utils.py +33 -0
  57. specfact_cli/validators/repro_checker.py +22 -1
  58. specfact_cli/validators/schema.py +15 -4
  59. specfact_cli-0.6.8.dist-info/METADATA +456 -0
  60. specfact_cli-0.6.8.dist-info/RECORD +99 -0
  61. {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/entry_points.txt +1 -0
  62. specfact_cli-0.6.8.dist-info/licenses/LICENSE.md +202 -0
  63. specfact_cli-0.4.2.dist-info/METADATA +0 -370
  64. specfact_cli-0.4.2.dist-info/RECORD +0 -62
  65. specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +0 -61
  66. {specfact_cli-0.4.2.dist-info → specfact_cli-0.6.8.dist-info}/WHEEL +0 -0
@@ -6,15 +6,25 @@ import ast
6
6
  import re
7
7
  from collections import defaultdict
8
8
  from pathlib import Path
9
+ from typing import Any
9
10
 
10
11
  import networkx as nx
11
12
  from beartype import beartype
12
13
  from icontract import ensure, require
14
+ from rich.console import Console
15
+ from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
13
16
 
17
+ from specfact_cli.analyzers.contract_extractor import ContractExtractor
18
+ from specfact_cli.analyzers.control_flow_analyzer import ControlFlowAnalyzer
19
+ from specfact_cli.analyzers.requirement_extractor import RequirementExtractor
20
+ from specfact_cli.analyzers.test_pattern_extractor import TestPatternExtractor
14
21
  from specfact_cli.models.plan import Feature, Idea, Metadata, PlanBundle, Product, Story
15
22
  from specfact_cli.utils.feature_keys import to_classname_key, to_sequential_key
16
23
 
17
24
 
25
+ console = Console()
26
+
27
+
18
28
  class CodeAnalyzer:
19
29
  """
20
30
  Analyzes Python code to auto-derive plan bundles.
@@ -30,12 +40,17 @@ class CodeAnalyzer:
30
40
  @require(lambda repo_path: repo_path is not None and isinstance(repo_path, Path), "Repo path must be Path")
31
41
  @require(lambda confidence_threshold: 0.0 <= confidence_threshold <= 1.0, "Confidence threshold must be 0.0-1.0")
32
42
  @require(lambda plan_name: plan_name is None or isinstance(plan_name, str), "Plan name must be None or str")
43
+ @require(
44
+ lambda entry_point: entry_point is None or isinstance(entry_point, Path),
45
+ "Entry point must be None or Path",
46
+ )
33
47
  def __init__(
34
48
  self,
35
49
  repo_path: Path,
36
50
  confidence_threshold: float = 0.5,
37
51
  key_format: str = "classname",
38
52
  plan_name: str | None = None,
53
+ entry_point: Path | None = None,
39
54
  ) -> None:
40
55
  """
41
56
  Initialize code analyzer.
@@ -45,17 +60,37 @@ class CodeAnalyzer:
45
60
  confidence_threshold: Minimum confidence score (0.0-1.0)
46
61
  key_format: Feature key format ('classname' or 'sequential', default: 'classname')
47
62
  plan_name: Custom plan name (will be used for idea.title, optional)
63
+ entry_point: Optional entry point path for partial analysis (relative to repo_path)
48
64
  """
49
- self.repo_path = Path(repo_path)
65
+ self.repo_path = Path(repo_path).resolve()
50
66
  self.confidence_threshold = confidence_threshold
51
67
  self.key_format = key_format
52
68
  self.plan_name = plan_name
69
+ self.entry_point: Path | None = None
70
+ if entry_point is not None:
71
+ # Resolve entry point relative to repo_path
72
+ if entry_point.is_absolute():
73
+ self.entry_point = entry_point
74
+ else:
75
+ self.entry_point = (self.repo_path / entry_point).resolve()
76
+ # Validate entry point exists and is within repo
77
+ if not self.entry_point.exists():
78
+ raise ValueError(f"Entry point does not exist: {self.entry_point}")
79
+ if not str(self.entry_point).startswith(str(self.repo_path)):
80
+ raise ValueError(f"Entry point must be within repository: {self.entry_point}")
53
81
  self.features: list[Feature] = []
54
82
  self.themes: set[str] = set()
55
83
  self.dependency_graph: nx.DiGraph[str] = nx.DiGraph() # Module dependency graph
56
84
  self.type_hints: dict[str, dict[str, str]] = {} # Module -> {function: type_hint}
57
85
  self.async_patterns: dict[str, list[str]] = {} # Module -> [async_methods]
58
86
  self.commit_bounds: dict[str, tuple[str, str]] = {} # Feature -> (first_commit, last_commit)
87
+ self.external_dependencies: set[str] = set() # External modules imported from outside entry point
88
+ # Use entry_point for test extractor if provided, otherwise repo_path
89
+ test_extractor_path = self.entry_point if self.entry_point else self.repo_path
90
+ self.test_extractor = TestPatternExtractor(test_extractor_path)
91
+ self.control_flow_analyzer = ControlFlowAnalyzer()
92
+ self.requirement_extractor = RequirementExtractor()
93
+ self.contract_extractor = ContractExtractor()
59
94
 
60
95
  @beartype
61
96
  @ensure(lambda result: isinstance(result, PlanBundle), "Must return PlanBundle")
@@ -74,24 +109,69 @@ class CodeAnalyzer:
74
109
  Returns:
75
110
  Generated PlanBundle from code analysis
76
111
  """
77
- # Find all Python files
78
- python_files = list(self.repo_path.rglob("*.py"))
79
-
80
- # Build module dependency graph first
81
- self._build_dependency_graph(python_files)
82
-
83
- # Analyze each file
84
- for file_path in python_files:
85
- if self._should_skip_file(file_path):
86
- continue
87
-
88
- self._analyze_file(file_path)
89
-
90
- # Analyze commit history for feature boundaries
91
- self._analyze_commit_history()
112
+ with Progress(
113
+ SpinnerColumn(),
114
+ TextColumn("[progress.description]{task.description}"),
115
+ BarColumn(),
116
+ TimeElapsedColumn(),
117
+ console=console,
118
+ ) as progress:
119
+ # Phase 1: Discover Python files
120
+ task1 = progress.add_task("[cyan]Phase 1: Discovering Python files...", total=None)
121
+ if self.entry_point:
122
+ # Scope analysis to entry point directory
123
+ python_files = list(self.entry_point.rglob("*.py"))
124
+ entry_point_rel = self.entry_point.relative_to(self.repo_path)
125
+ progress.update(
126
+ task1,
127
+ description=f"[green]✓ Found {len(python_files)} Python files in {entry_point_rel}",
128
+ )
129
+ else:
130
+ # Full repository analysis
131
+ python_files = list(self.repo_path.rglob("*.py"))
132
+ progress.update(task1, description=f"[green]✓ Found {len(python_files)} Python files")
133
+ progress.remove_task(task1)
134
+
135
+ # Phase 2: Build dependency graph
136
+ task2 = progress.add_task("[cyan]Phase 2: Building dependency graph...", total=None)
137
+ self._build_dependency_graph(python_files)
138
+ progress.update(task2, description="[green]✓ Dependency graph built")
139
+ progress.remove_task(task2)
140
+
141
+ # Phase 3: Analyze files and extract features
142
+ task3 = progress.add_task(
143
+ "[cyan]Phase 3: Analyzing files and extracting features...", total=len(python_files)
144
+ )
145
+ for file_path in python_files:
146
+ if self._should_skip_file(file_path):
147
+ progress.advance(task3)
148
+ continue
92
149
 
93
- # Enhance features with dependency information
94
- self._enhance_features_with_dependencies()
150
+ self._analyze_file(file_path)
151
+ progress.advance(task3)
152
+ progress.update(
153
+ task3,
154
+ description=f"[green]✓ Analyzed {len(python_files)} files, extracted {len(self.features)} features",
155
+ )
156
+ progress.remove_task(task3)
157
+
158
+ # Phase 4: Analyze commit history
159
+ task4 = progress.add_task("[cyan]Phase 4: Analyzing commit history...", total=None)
160
+ self._analyze_commit_history()
161
+ progress.update(task4, description="[green]✓ Commit history analyzed")
162
+ progress.remove_task(task4)
163
+
164
+ # Phase 5: Enhance features with dependencies
165
+ task5 = progress.add_task("[cyan]Phase 5: Enhancing features with dependency information...", total=None)
166
+ self._enhance_features_with_dependencies()
167
+ progress.update(task5, description="[green]✓ Features enhanced")
168
+ progress.remove_task(task5)
169
+
170
+ # Phase 6: Extract technology stack
171
+ task6 = progress.add_task("[cyan]Phase 6: Extracting technology stack...", total=None)
172
+ technology_constraints = self._extract_technology_stack_from_dependencies()
173
+ progress.update(task6, description="[green]✓ Technology stack extracted")
174
+ progress.remove_task(task6)
95
175
 
96
176
  # If sequential format, update all keys now that we know the total count
97
177
  if self.key_format == "sequential":
@@ -99,17 +179,27 @@ class CodeAnalyzer:
99
179
  feature.key = to_sequential_key(feature.key, idx)
100
180
 
101
181
  # Generate plan bundle
102
- # Use plan_name if provided, otherwise use repo name, otherwise fallback
182
+ # Use plan_name if provided, otherwise use entry point name or repo name
103
183
  if self.plan_name:
104
184
  # Use the plan name (already sanitized, but humanize for title)
105
185
  title = self.plan_name.replace("_", " ").replace("-", " ").title()
186
+ elif self.entry_point:
187
+ # Use entry point name for partial analysis
188
+ entry_point_name = self.entry_point.name or self.entry_point.relative_to(self.repo_path).as_posix()
189
+ title = f"{self._humanize_name(entry_point_name)} Module"
106
190
  else:
107
191
  repo_name = self.repo_path.name or "Unknown Project"
108
192
  title = self._humanize_name(repo_name)
109
193
 
194
+ narrative = f"Auto-derived plan from brownfield analysis of {title}"
195
+ if self.entry_point:
196
+ entry_point_rel = self.entry_point.relative_to(self.repo_path)
197
+ narrative += f" (scoped to {entry_point_rel})"
198
+
110
199
  idea = Idea(
111
200
  title=title,
112
- narrative=f"Auto-derived plan from brownfield analysis of {title}",
201
+ narrative=narrative,
202
+ constraints=technology_constraints,
113
203
  metrics=None,
114
204
  )
115
205
 
@@ -118,13 +208,24 @@ class CodeAnalyzer:
118
208
  releases=[],
119
209
  )
120
210
 
211
+ # Build metadata with scope information
212
+ metadata = Metadata(
213
+ stage="draft",
214
+ promoted_at=None,
215
+ promoted_by=None,
216
+ analysis_scope="partial" if self.entry_point else "full",
217
+ entry_point=str(self.entry_point.relative_to(self.repo_path)) if self.entry_point else None,
218
+ external_dependencies=sorted(self.external_dependencies),
219
+ )
220
+
121
221
  return PlanBundle(
122
222
  version="1.0",
123
223
  idea=idea,
124
224
  business=None,
125
225
  product=product,
126
226
  features=self.features,
127
- metadata=Metadata(stage="draft", promoted_at=None, promoted_by=None),
227
+ metadata=metadata,
228
+ clarifications=None,
128
229
  )
129
230
 
130
231
  def _should_skip_file(self, file_path: Path) -> bool:
@@ -242,11 +343,23 @@ class CodeAnalyzer:
242
343
  if not stories:
243
344
  return None
244
345
 
346
+ # Extract complete requirements (Step 1.3)
347
+ complete_requirement = self.requirement_extractor.extract_complete_requirement(node)
348
+ acceptance_criteria = (
349
+ [complete_requirement] if complete_requirement else [f"{node.name} class provides documented functionality"]
350
+ )
351
+
352
+ # Extract NFRs from code patterns (Step 1.3)
353
+ nfrs = self.requirement_extractor.extract_nfrs(node)
354
+ # Add NFRs as constraints
355
+ constraints = nfrs if nfrs else []
356
+
245
357
  return Feature(
246
358
  key=feature_key,
247
359
  title=self._humanize_name(node.name),
248
360
  outcomes=outcomes,
249
- acceptance=[f"{node.name} class provides documented functionality"],
361
+ acceptance=acceptance_criteria,
362
+ constraints=constraints,
250
363
  stories=stories,
251
364
  confidence=round(confidence, 2),
252
365
  )
@@ -344,25 +457,70 @@ class CodeAnalyzer:
344
457
  # Create user-centric title based on group
345
458
  title = self._generate_story_title(group_name, class_name)
346
459
 
347
- # Extract acceptance criteria from docstrings
460
+ # Extract testable acceptance criteria using test patterns
348
461
  acceptance: list[str] = []
349
462
  tasks: list[str] = []
350
463
 
464
+ # Try to extract test patterns from existing tests
465
+ test_patterns = self.test_extractor.extract_test_patterns_for_class(class_name)
466
+
467
+ # If test patterns found, use them
468
+ if test_patterns:
469
+ acceptance.extend(test_patterns)
470
+
471
+ # Also extract from code patterns (for methods without tests)
351
472
  for method in methods:
352
473
  # Add method as task
353
474
  tasks.append(f"{method.name}()")
354
475
 
355
- # Extract acceptance from docstring
476
+ # Extract test patterns from code if no test file patterns found
477
+ if not test_patterns:
478
+ code_patterns = self.test_extractor.infer_from_code_patterns(method, class_name)
479
+ acceptance.extend(code_patterns)
480
+
481
+ # Also check docstrings for additional context
356
482
  docstring = ast.get_docstring(method)
357
483
  if docstring:
358
- # Take first line as acceptance criterion
359
- first_line = docstring.split("\n")[0].strip()
360
- if first_line and first_line not in acceptance:
361
- acceptance.append(first_line)
484
+ # Check if docstring contains Given/When/Then format
485
+ if "Given" in docstring and "When" in docstring and "Then" in docstring:
486
+ # Extract Given/When/Then from docstring
487
+ gwt_match = re.search(
488
+ r"Given\s+(.+?),\s*When\s+(.+?),\s*Then\s+(.+?)(?:\.|$)", docstring, re.IGNORECASE
489
+ )
490
+ if gwt_match:
491
+ acceptance.append(
492
+ f"Given {gwt_match.group(1)}, When {gwt_match.group(2)}, Then {gwt_match.group(3)}"
493
+ )
494
+ else:
495
+ # Use first line as fallback (will be converted to Given/When/Then later)
496
+ first_line = docstring.split("\n")[0].strip()
497
+ if first_line and first_line not in acceptance:
498
+ # Convert to Given/When/Then format
499
+ acceptance.append(self._convert_to_gwt_format(first_line, method.name, class_name))
362
500
 
363
- # Add default acceptance if none found
501
+ # Add default testable acceptance if none found
364
502
  if not acceptance:
365
- acceptance.append(f"{group_name} functionality works as expected")
503
+ acceptance.append(
504
+ f"Given {class_name} instance, When {group_name.lower()} is performed, Then operation completes successfully"
505
+ )
506
+
507
+ # Extract scenarios from control flow (Step 1.2)
508
+ scenarios: dict[str, list[str]] | None = None
509
+ if methods:
510
+ # Extract scenarios from the first method (representative of the group)
511
+ # In the future, we could merge scenarios from all methods in the group
512
+ primary_method = methods[0]
513
+ scenarios = self.control_flow_analyzer.extract_scenarios_from_method(
514
+ primary_method, class_name, primary_method.name
515
+ )
516
+
517
+ # Extract contracts from function signatures (Step 2.1)
518
+ contracts: dict[str, Any] | None = None
519
+ if methods:
520
+ # Extract contracts from the first method (representative of the group)
521
+ # In the future, we could merge contracts from all methods in the group
522
+ primary_method = methods[0]
523
+ contracts = self.contract_extractor.extract_function_contracts(primary_method)
366
524
 
367
525
  # Calculate story points (complexity) based on number of methods and their size
368
526
  story_points = self._calculate_story_points(methods)
@@ -378,6 +536,8 @@ class CodeAnalyzer:
378
536
  value_points=value_points,
379
537
  tasks=tasks,
380
538
  confidence=0.8 if len(methods) > 1 else 0.6,
539
+ scenarios=scenarios,
540
+ contracts=contracts,
381
541
  )
382
542
 
383
543
  def _generate_story_title(self, group_name: str, class_name: str) -> str:
@@ -533,6 +693,14 @@ class CodeAnalyzer:
533
693
  break
534
694
  if matching_module:
535
695
  self.dependency_graph.add_edge(module_name, matching_module)
696
+ elif self.entry_point and not any(
697
+ imported_module.startswith(prefix) for prefix in ["src.", "lib.", "app.", "main.", "core."]
698
+ ):
699
+ # Track external dependencies when using entry point
700
+ # Check if it's a standard library or third-party import
701
+ # (heuristic: if it doesn't start with known repo patterns)
702
+ # Likely external dependency
703
+ self.external_dependencies.add(imported_module)
536
704
  except (SyntaxError, UnicodeDecodeError):
537
705
  # Skip files that can't be parsed
538
706
  continue
@@ -788,6 +956,270 @@ class CodeAnalyzer:
788
956
  # This is simplified - would need to track which module each feature comes from
789
957
  pass
790
958
 
959
+ @beartype
960
+ @ensure(lambda result: isinstance(result, list), "Must return list")
961
+ def _extract_technology_stack_from_dependencies(self) -> list[str]:
962
+ """
963
+ Extract technology stack from dependency files (requirements.txt, pyproject.toml).
964
+
965
+ Returns:
966
+ List of technology constraints extracted from dependency files
967
+ """
968
+ constraints: list[str] = []
969
+
970
+ # Try to read requirements.txt
971
+ requirements_file = self.repo_path / "requirements.txt"
972
+ if requirements_file.exists():
973
+ try:
974
+ content = requirements_file.read_text(encoding="utf-8")
975
+ # Parse requirements.txt format: package==version or package>=version
976
+ for line in content.splitlines():
977
+ line = line.strip()
978
+ # Skip comments and empty lines
979
+ if not line or line.startswith("#"):
980
+ continue
981
+
982
+ # Remove version specifiers for framework detection
983
+ package = (
984
+ line.split("==")[0]
985
+ .split(">=")[0]
986
+ .split(">")[0]
987
+ .split("<=")[0]
988
+ .split("<")[0]
989
+ .split("~=")[0]
990
+ .strip()
991
+ )
992
+ package_lower = package.lower()
993
+
994
+ # Detect Python version requirement
995
+ if package_lower == "python":
996
+ # Extract version from line
997
+ if ">=" in line:
998
+ version = line.split(">=")[1].split(",")[0].strip()
999
+ constraints.append(f"Python {version}+")
1000
+ elif "==" in line:
1001
+ version = line.split("==")[1].split(",")[0].strip()
1002
+ constraints.append(f"Python {version}")
1003
+
1004
+ # Detect frameworks
1005
+ framework_map = {
1006
+ "fastapi": "FastAPI framework",
1007
+ "django": "Django framework",
1008
+ "flask": "Flask framework",
1009
+ "typer": "Typer for CLI",
1010
+ "tornado": "Tornado framework",
1011
+ "bottle": "Bottle framework",
1012
+ }
1013
+
1014
+ if package_lower in framework_map:
1015
+ constraints.append(framework_map[package_lower])
1016
+
1017
+ # Detect databases
1018
+ db_map = {
1019
+ "psycopg2": "PostgreSQL database",
1020
+ "psycopg2-binary": "PostgreSQL database",
1021
+ "mysql-connector-python": "MySQL database",
1022
+ "pymongo": "MongoDB database",
1023
+ "redis": "Redis database",
1024
+ "sqlalchemy": "SQLAlchemy ORM",
1025
+ }
1026
+
1027
+ if package_lower in db_map:
1028
+ constraints.append(db_map[package_lower])
1029
+
1030
+ # Detect testing tools
1031
+ test_map = {
1032
+ "pytest": "pytest for testing",
1033
+ "unittest": "unittest for testing",
1034
+ "nose": "nose for testing",
1035
+ "tox": "tox for testing",
1036
+ }
1037
+
1038
+ if package_lower in test_map:
1039
+ constraints.append(test_map[package_lower])
1040
+
1041
+ # Detect deployment tools
1042
+ deploy_map = {
1043
+ "docker": "Docker for containerization",
1044
+ "kubernetes": "Kubernetes for orchestration",
1045
+ }
1046
+
1047
+ if package_lower in deploy_map:
1048
+ constraints.append(deploy_map[package_lower])
1049
+
1050
+ # Detect data validation
1051
+ if package_lower == "pydantic":
1052
+ constraints.append("Pydantic for data validation")
1053
+ except Exception:
1054
+ # If reading fails, continue silently
1055
+ pass
1056
+
1057
+ # Try to read pyproject.toml
1058
+ pyproject_file = self.repo_path / "pyproject.toml"
1059
+ if pyproject_file.exists():
1060
+ try:
1061
+ import tomli # type: ignore[import-untyped]
1062
+
1063
+ content = pyproject_file.read_text(encoding="utf-8")
1064
+ data = tomli.loads(content)
1065
+
1066
+ # Extract Python version requirement
1067
+ if "project" in data and "requires-python" in data["project"]:
1068
+ python_req = data["project"]["requires-python"]
1069
+ if python_req:
1070
+ constraints.append(f"Python {python_req}")
1071
+
1072
+ # Extract dependencies
1073
+ if "project" in data and "dependencies" in data["project"]:
1074
+ deps = data["project"]["dependencies"]
1075
+ for dep in deps:
1076
+ # Similar parsing as requirements.txt
1077
+ package = (
1078
+ dep.split("==")[0]
1079
+ .split(">=")[0]
1080
+ .split(">")[0]
1081
+ .split("<=")[0]
1082
+ .split("<")[0]
1083
+ .split("~=")[0]
1084
+ .strip()
1085
+ )
1086
+ package_lower = package.lower()
1087
+
1088
+ # Apply same mapping as requirements.txt
1089
+ framework_map = {
1090
+ "fastapi": "FastAPI framework",
1091
+ "django": "Django framework",
1092
+ "flask": "Flask framework",
1093
+ "typer": "Typer for CLI",
1094
+ "tornado": "Tornado framework",
1095
+ "bottle": "Bottle framework",
1096
+ }
1097
+
1098
+ if package_lower in framework_map:
1099
+ constraints.append(framework_map[package_lower])
1100
+
1101
+ db_map = {
1102
+ "psycopg2": "PostgreSQL database",
1103
+ "psycopg2-binary": "PostgreSQL database",
1104
+ "mysql-connector-python": "MySQL database",
1105
+ "pymongo": "MongoDB database",
1106
+ "redis": "Redis database",
1107
+ "sqlalchemy": "SQLAlchemy ORM",
1108
+ }
1109
+
1110
+ if package_lower in db_map:
1111
+ constraints.append(db_map[package_lower])
1112
+
1113
+ if package_lower == "pydantic":
1114
+ constraints.append("Pydantic for data validation")
1115
+ except ImportError:
1116
+ # tomli not available, try tomllib (Python 3.11+)
1117
+ try:
1118
+ import tomllib # type: ignore[import-untyped]
1119
+
1120
+ # tomllib.load() takes a file object opened in binary mode
1121
+ with pyproject_file.open("rb") as f:
1122
+ data = tomllib.load(f)
1123
+
1124
+ # Extract Python version requirement
1125
+ if "project" in data and "requires-python" in data["project"]:
1126
+ python_req = data["project"]["requires-python"]
1127
+ if python_req:
1128
+ constraints.append(f"Python {python_req}")
1129
+
1130
+ # Extract dependencies
1131
+ if "project" in data and "dependencies" in data["project"]:
1132
+ deps = data["project"]["dependencies"]
1133
+ for dep in deps:
1134
+ package = (
1135
+ dep.split("==")[0]
1136
+ .split(">=")[0]
1137
+ .split(">")[0]
1138
+ .split("<=")[0]
1139
+ .split("<")[0]
1140
+ .split("~=")[0]
1141
+ .strip()
1142
+ )
1143
+ package_lower = package.lower()
1144
+
1145
+ framework_map = {
1146
+ "fastapi": "FastAPI framework",
1147
+ "django": "Django framework",
1148
+ "flask": "Flask framework",
1149
+ "typer": "Typer for CLI",
1150
+ "tornado": "Tornado framework",
1151
+ "bottle": "Bottle framework",
1152
+ }
1153
+
1154
+ if package_lower in framework_map:
1155
+ constraints.append(framework_map[package_lower])
1156
+
1157
+ db_map = {
1158
+ "psycopg2": "PostgreSQL database",
1159
+ "psycopg2-binary": "PostgreSQL database",
1160
+ "mysql-connector-python": "MySQL database",
1161
+ "pymongo": "MongoDB database",
1162
+ "redis": "Redis database",
1163
+ "sqlalchemy": "SQLAlchemy ORM",
1164
+ }
1165
+
1166
+ if package_lower in db_map:
1167
+ constraints.append(db_map[package_lower])
1168
+
1169
+ if package_lower == "pydantic":
1170
+ constraints.append("Pydantic for data validation")
1171
+ except ImportError:
1172
+ # Neither tomli nor tomllib available, skip
1173
+ pass
1174
+ except Exception:
1175
+ # If parsing fails, continue silently
1176
+ pass
1177
+
1178
+ # Remove duplicates while preserving order
1179
+ seen: set[str] = set()
1180
+ unique_constraints: list[str] = []
1181
+ for constraint in constraints:
1182
+ if constraint not in seen:
1183
+ seen.add(constraint)
1184
+ unique_constraints.append(constraint)
1185
+
1186
+ # Default fallback if nothing extracted
1187
+ if not unique_constraints:
1188
+ unique_constraints = ["Python 3.11+", "Typer for CLI", "Pydantic for data validation"]
1189
+
1190
+ return unique_constraints
1191
+
1192
+ @beartype
1193
+ def _convert_to_gwt_format(self, text: str, method_name: str, class_name: str) -> str:
1194
+ """
1195
+ Convert a text description to Given/When/Then format.
1196
+
1197
+ Args:
1198
+ text: Original text description
1199
+ method_name: Name of the method
1200
+ class_name: Name of the class
1201
+
1202
+ Returns:
1203
+ Acceptance criterion in Given/When/Then format
1204
+ """
1205
+ # If already in Given/When/Then format, return as-is
1206
+ if "Given" in text and "When" in text and "Then" in text:
1207
+ return text
1208
+
1209
+ # Try to extract action and outcome from text
1210
+ text_lower = text.lower()
1211
+
1212
+ # Common patterns
1213
+ if "must" in text_lower or "should" in text_lower:
1214
+ # Extract action after modal verb
1215
+ action_match = re.search(r"(?:must|should)\s+(.+?)(?:\.|$)", text_lower)
1216
+ if action_match:
1217
+ action = action_match.group(1).strip()
1218
+ return f"Given {class_name} instance, When {method_name} is called, Then {action}"
1219
+
1220
+ # Default conversion
1221
+ return f"Given {class_name} instance, When {method_name} is called, Then {text}"
1222
+
791
1223
  def _get_module_dependencies(self, module_name: str) -> list[str]:
792
1224
  """Get list of modules that the given module depends on."""
793
1225
  if module_name not in self.dependency_graph: