adversarial-workflow 0.6.1__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,7 @@ Usage:
12
12
  adversarial validate "pytest"
13
13
  """
14
14
 
15
- __version__ = "0.6.1"
15
+ __version__ = "0.6.2"
16
16
  __author__ = "Fredrik Matheson"
17
17
  __license__ = "MIT"
18
18
 
@@ -27,9 +27,9 @@ from pathlib import Path
27
27
  from typing import Dict, List, Optional, Tuple
28
28
 
29
29
  import yaml
30
- from dotenv import load_dotenv, dotenv_values
30
+ from dotenv import dotenv_values, load_dotenv
31
31
 
32
- __version__ = "0.6.1"
32
+ __version__ = "0.6.2"
33
33
 
34
34
  # ANSI color codes for better output
35
35
  RESET = "\033[0m"
@@ -806,15 +806,14 @@ def check() -> int:
806
806
 
807
807
  if env_file.exists():
808
808
  try:
809
- # Load .env into environment (idempotent - safe to call again after main())
810
- load_dotenv(env_file)
811
- # Use dotenv_values() to count variables directly from file
812
- # This gives accurate count regardless of what was already in environment
809
+ # Count variables by reading file directly (works even if already loaded)
813
810
  env_vars = dotenv_values(env_file)
811
+ var_count = len([k for k, v in env_vars.items() if v is not None])
812
+
813
+ # Still load to ensure environment is set
814
+ load_dotenv(env_file)
814
815
  env_loaded = True
815
- good_checks.append(
816
- f".env file found ({len(env_vars)} variables configured)"
817
- )
816
+ good_checks.append(f".env file found and loaded ({var_count} variables)")
818
817
  except (FileNotFoundError, PermissionError) as e:
819
818
  # File access errors
820
819
  issues.append(
@@ -2097,10 +2096,6 @@ def evaluate(task_file: str) -> int:
2097
2096
  return 0
2098
2097
 
2099
2098
 
2100
-
2101
-
2102
-
2103
-
2104
2099
  def review() -> int:
2105
2100
  """Run Phase 3: Code review."""
2106
2101
 
@@ -2739,54 +2734,61 @@ def agent_onboard(project_path: str = ".") -> int:
2739
2734
  return 0
2740
2735
 
2741
2736
 
2742
- def split(task_file: str, strategy: str = "sections", max_lines: int = 500, dry_run: bool = False):
2737
+ def split(
2738
+ task_file: str,
2739
+ strategy: str = "sections",
2740
+ max_lines: int = 500,
2741
+ dry_run: bool = False,
2742
+ ):
2743
2743
  """Split large task files into smaller evaluable chunks.
2744
-
2744
+
2745
2745
  Args:
2746
2746
  task_file: Path to the task file to split
2747
2747
  strategy: Split strategy ('sections', 'phases', or 'manual')
2748
2748
  max_lines: Maximum lines per split (default: 500)
2749
2749
  dry_run: Preview splits without creating files
2750
-
2750
+
2751
2751
  Returns:
2752
2752
  Exit code (0 for success, 1 for error)
2753
2753
  """
2754
2754
  from .utils.file_splitter import (
2755
- analyze_task_file,
2756
- split_by_sections,
2757
- split_by_phases,
2758
- generate_split_files
2755
+ analyze_task_file,
2756
+ generate_split_files,
2757
+ split_by_phases,
2758
+ split_by_sections,
2759
2759
  )
2760
-
2760
+
2761
2761
  try:
2762
2762
  print_box("File Splitting Utility", CYAN)
2763
-
2763
+
2764
2764
  # Validate file exists
2765
2765
  if not os.path.exists(task_file):
2766
2766
  print(f"{RED}Error: File not found: {task_file}{RESET}")
2767
2767
  return 1
2768
-
2768
+
2769
2769
  # Analyze file
2770
2770
  print(f"📄 Analyzing task file: {task_file}")
2771
2771
  analysis = analyze_task_file(task_file)
2772
-
2773
- lines = analysis['total_lines']
2774
- tokens = analysis['estimated_tokens']
2772
+
2773
+ lines = analysis["total_lines"]
2774
+ tokens = analysis["estimated_tokens"]
2775
2775
  print(f" Lines: {lines}")
2776
2776
  print(f" Estimated tokens: ~{tokens:,}")
2777
-
2777
+
2778
2778
  # Check if splitting is recommended
2779
2779
  if lines <= max_lines:
2780
- print(f"{GREEN}✅ File is under recommended limit ({max_lines} lines){RESET}")
2780
+ print(
2781
+ f"{GREEN}✅ File is under recommended limit ({max_lines} lines){RESET}"
2782
+ )
2781
2783
  print("No splitting needed.")
2782
2784
  return 0
2783
-
2785
+
2784
2786
  print(f"{YELLOW}⚠️ File exceeds recommended limit ({max_lines} lines){RESET}")
2785
-
2787
+
2786
2788
  # Read file content for splitting
2787
- with open(task_file, 'r', encoding='utf-8') as f:
2789
+ with open(task_file, "r", encoding="utf-8") as f:
2788
2790
  content = f.read()
2789
-
2791
+
2790
2792
  # Apply split strategy
2791
2793
  if strategy == "sections":
2792
2794
  splits = split_by_sections(content, max_lines=max_lines)
@@ -2795,42 +2797,44 @@ def split(task_file: str, strategy: str = "sections", max_lines: int = 500, dry_
2795
2797
  splits = split_by_phases(content)
2796
2798
  print(f"\n💡 Suggested splits (by phases):")
2797
2799
  else:
2798
- print(f"{RED}Error: Unknown strategy '{strategy}'. Use 'sections' or 'phases'.{RESET}")
2800
+ print(
2801
+ f"{RED}Error: Unknown strategy '{strategy}'. Use 'sections' or 'phases'.{RESET}"
2802
+ )
2799
2803
  return 1
2800
-
2804
+
2801
2805
  # Display split preview
2802
2806
  for i, split in enumerate(splits, 1):
2803
2807
  filename = f"{Path(task_file).stem}-part{i}{Path(task_file).suffix}"
2804
2808
  print(f" - {filename} ({split['line_count']} lines)")
2805
-
2809
+
2806
2810
  # Dry run mode
2807
2811
  if dry_run:
2808
2812
  print(f"\n{CYAN}📋 Dry run mode - no files created{RESET}")
2809
2813
  return 0
2810
-
2814
+
2811
2815
  # Prompt user for confirmation
2812
2816
  create_files = prompt_user(f"\nCreate {len(splits)} files?", default="n")
2813
-
2814
- if create_files.lower() in ['y', 'yes']:
2817
+
2818
+ if create_files.lower() in ["y", "yes"]:
2815
2819
  # Create output directory
2816
2820
  output_dir = os.path.join(os.path.dirname(task_file), "splits")
2817
-
2821
+
2818
2822
  # Generate split files
2819
2823
  created_files = generate_split_files(task_file, splits, output_dir)
2820
-
2824
+
2821
2825
  print(f"{GREEN}✅ Created {len(created_files)} files:{RESET}")
2822
2826
  for file_path in created_files:
2823
2827
  print(f" {file_path}")
2824
-
2828
+
2825
2829
  print(f"\n{CYAN}💡 Tip: Evaluate each split file independently:{RESET}")
2826
2830
  for file_path in created_files:
2827
2831
  rel_path = os.path.relpath(file_path)
2828
2832
  print(f" adversarial evaluate {rel_path}")
2829
2833
  else:
2830
2834
  print("Cancelled - no files created.")
2831
-
2835
+
2832
2836
  return 0
2833
-
2837
+
2834
2838
  except Exception as e:
2835
2839
  print(f"{RED}Error during file splitting: {e}{RESET}")
2836
2840
  return 1
@@ -2876,6 +2880,7 @@ def list_evaluators() -> int:
2876
2880
 
2877
2881
  return 0
2878
2882
 
2883
+
2879
2884
  def main():
2880
2885
  """Main CLI entry point."""
2881
2886
  import logging
@@ -2888,10 +2893,20 @@ def main():
2888
2893
  except Exception as e:
2889
2894
  print(f"Warning: Could not load .env file: {e}", file=sys.stderr)
2890
2895
 
2896
+ # Load .env file before any commands run
2897
+ # Use explicit path to ensure we find .env in current working directory
2898
+ # (load_dotenv() without args can fail to find .env in some contexts)
2899
+ env_file = Path.cwd() / ".env"
2900
+ if env_file.exists():
2901
+ try:
2902
+ load_dotenv(env_file)
2903
+ except (OSError, UnicodeDecodeError) as e:
2904
+ print(f"Warning: Could not load .env file: {e}", file=sys.stderr)
2905
+
2891
2906
  from adversarial_workflow.evaluators import (
2907
+ BUILTIN_EVALUATORS,
2892
2908
  get_all_evaluators,
2893
2909
  run_evaluator,
2894
- BUILTIN_EVALUATORS,
2895
2910
  )
2896
2911
 
2897
2912
  logger = logging.getLogger(__name__)
@@ -2899,8 +2914,16 @@ def main():
2899
2914
  # Commands that cannot be overridden by evaluators
2900
2915
  # Note: 'review' is special - it reviews git changes without a file argument
2901
2916
  STATIC_COMMANDS = {
2902
- "init", "check", "doctor", "health", "quickstart",
2903
- "agent", "split", "validate", "review", "list-evaluators"
2917
+ "init",
2918
+ "check",
2919
+ "doctor",
2920
+ "health",
2921
+ "quickstart",
2922
+ "agent",
2923
+ "split",
2924
+ "validate",
2925
+ "review",
2926
+ "list-evaluators",
2904
2927
  }
2905
2928
 
2906
2929
  parser = argparse.ArgumentParser(
@@ -2989,16 +3012,21 @@ For more information: https://github.com/movito/adversarial-workflow
2989
3012
  )
2990
3013
  split_parser.add_argument("task_file", help="Task file to split")
2991
3014
  split_parser.add_argument(
2992
- "--strategy", "-s", choices=["sections", "phases"], default="sections",
2993
- help="Split strategy: 'sections' (default) or 'phases'"
3015
+ "--strategy",
3016
+ "-s",
3017
+ choices=["sections", "phases"],
3018
+ default="sections",
3019
+ help="Split strategy: 'sections' (default) or 'phases'",
2994
3020
  )
2995
3021
  split_parser.add_argument(
2996
- "--max-lines", "-m", type=int, default=500,
2997
- help="Maximum lines per split (default: 500)"
3022
+ "--max-lines",
3023
+ "-m",
3024
+ type=int,
3025
+ default=500,
3026
+ help="Maximum lines per split (default: 500)",
2998
3027
  )
2999
3028
  split_parser.add_argument(
3000
- "--dry-run", action="store_true",
3001
- help="Preview splits without creating files"
3029
+ "--dry-run", action="store_true", help="Preview splits without creating files"
3002
3030
  )
3003
3031
 
3004
3032
  # list-evaluators command
@@ -3019,7 +3047,12 @@ For more information: https://github.com/movito/adversarial-workflow
3019
3047
  for name, config in evaluators.items():
3020
3048
  # Skip if name conflicts with static command
3021
3049
  if name in STATIC_COMMANDS:
3022
- logger.warning("Evaluator '%s' conflicts with CLI command; skipping", name)
3050
+ # Only warn for user-defined evaluators, not built-ins
3051
+ # Built-in conflicts are intentional (e.g., 'review' command vs 'review' evaluator)
3052
+ if getattr(config, "source", None) != "builtin":
3053
+ logger.warning(
3054
+ "Evaluator '%s' conflicts with CLI command; skipping", name
3055
+ )
3023
3056
  # Mark as registered to prevent alias re-registration attempts
3024
3057
  registered_configs.add(id(config))
3025
3058
  continue
@@ -3046,10 +3079,11 @@ For more information: https://github.com/movito/adversarial-workflow
3046
3079
  )
3047
3080
  eval_parser.add_argument("file", help="File to evaluate")
3048
3081
  eval_parser.add_argument(
3049
- "--timeout", "-t",
3082
+ "--timeout",
3083
+ "-t",
3050
3084
  type=int,
3051
3085
  default=180,
3052
- help="Timeout in seconds (default: 180)"
3086
+ help="Timeout in seconds (default: 180)",
3053
3087
  )
3054
3088
  # Store config for later execution
3055
3089
  eval_parser.set_defaults(evaluator_config=config)
@@ -3097,7 +3131,7 @@ For more information: https://github.com/movito/adversarial-workflow
3097
3131
  args.task_file,
3098
3132
  strategy=args.strategy,
3099
3133
  max_lines=args.max_lines,
3100
- dry_run=args.dry_run
3134
+ dry_run=args.dry_run,
3101
3135
  )
3102
3136
  elif args.command == "list-evaluators":
3103
3137
  return list_evaluators()
@@ -1,13 +1,13 @@
1
1
  """Evaluators module for adversarial-workflow plugin architecture."""
2
2
 
3
+ from .builtins import BUILTIN_EVALUATORS
3
4
  from .config import EvaluatorConfig
4
5
  from .discovery import (
6
+ EvaluatorParseError,
5
7
  discover_local_evaluators,
6
8
  parse_evaluator_yaml,
7
- EvaluatorParseError,
8
9
  )
9
10
  from .runner import run_evaluator
10
- from .builtins import BUILTIN_EVALUATORS
11
11
 
12
12
 
13
13
  def get_all_evaluators() -> dict[str, EvaluatorConfig]:
@@ -17,6 +17,7 @@ def get_all_evaluators() -> dict[str, EvaluatorConfig]:
17
17
  Aliases from local evaluators are also included in the returned dictionary.
18
18
  """
19
19
  import logging
20
+
20
21
  logger = logging.getLogger(__name__)
21
22
 
22
23
  evaluators: dict[str, EvaluatorConfig] = {}
@@ -40,9 +40,7 @@ def parse_evaluator_yaml(yml_file: Path) -> EvaluatorConfig:
40
40
  try:
41
41
  content = yml_file.read_text(encoding="utf-8")
42
42
  except UnicodeDecodeError as e:
43
- raise EvaluatorParseError(
44
- f"File encoding error (not UTF-8): {yml_file}"
45
- ) from e
43
+ raise EvaluatorParseError(f"File encoding error (not UTF-8): {yml_file}") from e
46
44
 
47
45
  # Parse YAML
48
46
  data = yaml.safe_load(content)
@@ -58,7 +56,14 @@ def parse_evaluator_yaml(yml_file: Path) -> EvaluatorConfig:
58
56
  )
59
57
 
60
58
  # Validate required fields exist
61
- required = ["name", "description", "model", "api_key_env", "prompt", "output_suffix"]
59
+ required = [
60
+ "name",
61
+ "description",
62
+ "model",
63
+ "api_key_env",
64
+ "prompt",
65
+ "output_suffix",
66
+ ]
62
67
  missing = [f for f in required if f not in data]
63
68
  if missing:
64
69
  raise EvaluatorParseError(f"Missing required fields: {', '.join(missing)}")
@@ -10,10 +10,10 @@ import tempfile
10
10
  from datetime import datetime, timezone
11
11
  from pathlib import Path
12
12
 
13
- from .config import EvaluatorConfig
14
- from ..utils.colors import RESET, BOLD, GREEN, YELLOW, RED
13
+ from ..utils.colors import BOLD, GREEN, RED, RESET, YELLOW
15
14
  from ..utils.config import load_config
16
15
  from ..utils.validation import validate_evaluation_output
16
+ from .config import EvaluatorConfig
17
17
 
18
18
 
19
19
  def run_evaluator(config: EvaluatorConfig, file_path: str, timeout: int = 180) -> int:
@@ -124,7 +124,7 @@ def _run_custom_evaluator(
124
124
  """
125
125
 
126
126
  # Create temp file for prompt
127
- with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) as f:
127
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
128
128
  f.write(full_prompt)
129
129
  prompt_file = f.name
130
130
 
@@ -136,12 +136,15 @@ def _run_custom_evaluator(
136
136
  # Build aider command
137
137
  cmd = [
138
138
  "aider",
139
- "--model", config.model,
139
+ "--model",
140
+ config.model,
140
141
  "--yes",
141
142
  "--no-git",
142
143
  "--no-auto-commits",
143
- "--message-file", prompt_file,
144
- "--read", file_path,
144
+ "--message-file",
145
+ prompt_file,
146
+ "--read",
147
+ file_path,
145
148
  ]
146
149
 
147
150
  result = subprocess.run(
@@ -224,7 +227,10 @@ def _execute_script(
224
227
 
225
228
  # Validate output
226
229
  file_basename = Path(file_path).stem
227
- log_file = Path(project_config["log_directory"]) / f"{file_basename}-{config.output_suffix}.md"
230
+ log_file = (
231
+ Path(project_config["log_directory"])
232
+ / f"{file_basename}-{config.output_suffix}.md"
233
+ )
228
234
 
229
235
  is_valid, verdict, message = validate_evaluation_output(str(log_file))
230
236
 
@@ -235,7 +241,9 @@ def _execute_script(
235
241
  return _report_verdict(verdict, log_file, config)
236
242
 
237
243
 
238
- def _report_verdict(verdict: str | None, log_file: Path, config: EvaluatorConfig) -> int:
244
+ def _report_verdict(
245
+ verdict: str | None, log_file: Path, config: EvaluatorConfig
246
+ ) -> int:
239
247
  """Report the evaluation verdict to terminal."""
240
248
  print()
241
249
  if verdict == "APPROVED":
@@ -4,360 +4,392 @@ This module provides functionality to split large markdown files into smaller,
4
4
  independently evaluable chunks to work around OpenAI's rate limits.
5
5
  """
6
6
 
7
- import re
8
7
  import os
8
+ import re
9
9
  from pathlib import Path
10
- from typing import List, Dict, Any
10
+ from typing import Any, Dict, List
11
11
 
12
12
 
13
13
  def analyze_task_file(file_path: str) -> Dict[str, Any]:
14
14
  """Analyze file structure and suggest split points.
15
-
15
+
16
16
  Args:
17
17
  file_path: Path to the markdown file to analyze
18
-
18
+
19
19
  Returns:
20
20
  Dict containing:
21
21
  - total_lines: Total number of lines
22
22
  - sections: List of detected sections with metadata
23
23
  - estimated_tokens: Rough token estimate (lines * 4)
24
24
  - suggested_splits: List of suggested split points
25
-
25
+
26
26
  Raises:
27
27
  FileNotFoundError: If file doesn't exist
28
28
  ValueError: If file is empty or too small
29
29
  """
30
30
  if not os.path.exists(file_path):
31
31
  raise FileNotFoundError(f"File not found: {file_path}")
32
-
33
- with open(file_path, 'r', encoding='utf-8') as f:
32
+
33
+ with open(file_path, "r", encoding="utf-8") as f:
34
34
  content = f.read()
35
-
35
+
36
36
  if not content.strip():
37
37
  raise ValueError("File is empty or too small")
38
-
39
- lines = content.split('\n')
38
+
39
+ lines = content.split("\n")
40
40
  total_lines = len(lines)
41
-
41
+
42
42
  # Detect markdown sections
43
43
  sections = []
44
44
  current_section = None
45
45
  current_start = 1
46
-
46
+
47
47
  for i, line in enumerate(lines, 1):
48
48
  # Check for markdown headings (# or ##)
49
- if re.match(r'^#+\s+', line.strip()):
49
+ if re.match(r"^#+\s+", line.strip()):
50
50
  # Close previous section
51
51
  if current_section is not None:
52
- current_section['end_line'] = i - 1
53
- current_section['line_count'] = current_section['end_line'] - current_section['start_line'] + 1
52
+ current_section["end_line"] = i - 1
53
+ current_section["line_count"] = (
54
+ current_section["end_line"] - current_section["start_line"] + 1
55
+ )
54
56
  sections.append(current_section)
55
-
57
+
56
58
  # Start new section
57
59
  heading_level = len(line.lstrip().split()[0]) # Count # characters
58
- title = re.sub(r'^#+\s+', '', line.strip())
60
+ title = re.sub(r"^#+\s+", "", line.strip())
59
61
  current_section = {
60
- 'title': title,
61
- 'heading_level': heading_level,
62
- 'start_line': i,
63
- 'end_line': None,
64
- 'line_count': 0
62
+ "title": title,
63
+ "heading_level": heading_level,
64
+ "start_line": i,
65
+ "end_line": None,
66
+ "line_count": 0,
65
67
  }
66
68
  current_start = i
67
-
69
+
68
70
  # Close final section
69
71
  if current_section is not None:
70
- current_section['end_line'] = total_lines
71
- current_section['line_count'] = current_section['end_line'] - current_section['start_line'] + 1
72
+ current_section["end_line"] = total_lines
73
+ current_section["line_count"] = (
74
+ current_section["end_line"] - current_section["start_line"] + 1
75
+ )
72
76
  sections.append(current_section)
73
-
77
+
74
78
  # If no sections found, treat entire file as one section
75
79
  if not sections:
76
- sections = [{
77
- 'title': 'Full Document',
78
- 'heading_level': 1,
79
- 'start_line': 1,
80
- 'end_line': total_lines,
81
- 'line_count': total_lines
82
- }]
83
-
80
+ sections = [
81
+ {
82
+ "title": "Full Document",
83
+ "heading_level": 1,
84
+ "start_line": 1,
85
+ "end_line": total_lines,
86
+ "line_count": total_lines,
87
+ }
88
+ ]
89
+
84
90
  # Estimate tokens (rough approximation: 1 line ≈ 4 tokens)
85
91
  estimated_tokens = total_lines * 4
86
-
92
+
87
93
  # Suggest splits if file is large
88
94
  suggested_splits = []
89
95
  if total_lines > 500:
90
96
  # Suggest section-based splits
91
97
  suggested_splits = _suggest_section_splits(sections, max_lines=500)
92
-
98
+
93
99
  return {
94
- 'total_lines': total_lines,
95
- 'sections': sections,
96
- 'estimated_tokens': estimated_tokens,
97
- 'suggested_splits': suggested_splits
100
+ "total_lines": total_lines,
101
+ "sections": sections,
102
+ "estimated_tokens": estimated_tokens,
103
+ "suggested_splits": suggested_splits,
98
104
  }
99
105
 
100
106
 
101
107
  def split_by_sections(content: str, max_lines: int = 500) -> List[Dict[str, Any]]:
102
108
  """Split file by markdown sections.
103
-
109
+
104
110
  Args:
105
111
  content: The markdown content to split
106
112
  max_lines: Maximum lines per split
107
-
113
+
108
114
  Returns:
109
115
  List of split dictionaries with metadata
110
116
  """
111
- lines = content.split('\n')
117
+ lines = content.split("\n")
112
118
  total_lines = len(lines)
113
-
119
+
114
120
  if total_lines <= max_lines:
115
- return [{
116
- 'content': content,
117
- 'title': 'Full Document',
118
- 'start_line': 1,
119
- 'end_line': total_lines,
120
- 'line_count': total_lines
121
- }]
122
-
121
+ return [
122
+ {
123
+ "content": content,
124
+ "title": "Full Document",
125
+ "start_line": 1,
126
+ "end_line": total_lines,
127
+ "line_count": total_lines,
128
+ }
129
+ ]
130
+
123
131
  splits = []
124
132
  current_split_lines = []
125
133
  current_start = 1
126
134
  current_title = "Part"
127
135
  split_count = 1
128
-
136
+
129
137
  for i, line in enumerate(lines, 1):
130
138
  current_split_lines.append(line)
131
-
139
+
132
140
  # Check if we hit a section boundary and are near limit
133
- is_section_boundary = re.match(r'^#+\s+', line.strip())
141
+ is_section_boundary = re.match(r"^#+\s+", line.strip())
134
142
  approaching_limit = len(current_split_lines) >= max_lines * 0.8
135
-
136
- if len(current_split_lines) >= max_lines or (is_section_boundary and approaching_limit):
143
+
144
+ if len(current_split_lines) >= max_lines or (
145
+ is_section_boundary and approaching_limit
146
+ ):
137
147
  # Create split
138
- split_content = '\n'.join(current_split_lines)
139
- splits.append({
140
- 'content': split_content,
141
- 'title': f"Part {split_count}",
142
- 'start_line': current_start,
143
- 'end_line': i,
144
- 'line_count': len(current_split_lines)
145
- })
146
-
148
+ split_content = "\n".join(current_split_lines)
149
+ splits.append(
150
+ {
151
+ "content": split_content,
152
+ "title": f"Part {split_count}",
153
+ "start_line": current_start,
154
+ "end_line": i,
155
+ "line_count": len(current_split_lines),
156
+ }
157
+ )
158
+
147
159
  # Reset for next split
148
160
  current_split_lines = []
149
161
  current_start = i + 1
150
162
  split_count += 1
151
-
163
+
152
164
  # Handle remaining lines
153
165
  if current_split_lines:
154
- split_content = '\n'.join(current_split_lines)
155
- splits.append({
156
- 'content': split_content,
157
- 'title': f"Part {split_count}",
158
- 'start_line': current_start,
159
- 'end_line': total_lines,
160
- 'line_count': len(current_split_lines)
161
- })
162
-
166
+ split_content = "\n".join(current_split_lines)
167
+ splits.append(
168
+ {
169
+ "content": split_content,
170
+ "title": f"Part {split_count}",
171
+ "start_line": current_start,
172
+ "end_line": total_lines,
173
+ "line_count": len(current_split_lines),
174
+ }
175
+ )
176
+
163
177
  return splits
164
178
 
165
179
 
166
180
  def split_by_phases(content: str) -> List[Dict[str, Any]]:
167
181
  """Split file by implementation phases.
168
-
182
+
169
183
  Args:
170
184
  content: The markdown content to split
171
-
185
+
172
186
  Returns:
173
187
  List of split dictionaries, one per phase
174
188
  """
175
- lines = content.split('\n')
189
+ lines = content.split("\n")
176
190
  splits = []
177
191
  current_split_lines = []
178
192
  current_phase = None
179
193
  current_start = 1
180
-
194
+
181
195
  for i, line in enumerate(lines, 1):
182
196
  # Check for phase markers
183
- phase_match = re.search(r'#+\s+Phase\s+(\d+)', line, re.IGNORECASE)
184
-
197
+ phase_match = re.search(r"#+\s+Phase\s+(\d+)", line, re.IGNORECASE)
198
+
185
199
  if phase_match:
186
200
  # Close previous split
187
201
  if current_split_lines:
188
- split_content = '\n'.join(current_split_lines)
202
+ split_content = "\n".join(current_split_lines)
189
203
  title = f"Phase {current_phase}" if current_phase else "Overview"
190
- splits.append({
191
- 'content': split_content,
192
- 'title': title,
193
- 'phase_number': current_phase,
194
- 'start_line': current_start,
195
- 'end_line': i - 1,
196
- 'line_count': len(current_split_lines)
197
- })
198
-
204
+ splits.append(
205
+ {
206
+ "content": split_content,
207
+ "title": title,
208
+ "phase_number": current_phase,
209
+ "start_line": current_start,
210
+ "end_line": i - 1,
211
+ "line_count": len(current_split_lines),
212
+ }
213
+ )
214
+
199
215
  # Start new split
200
216
  current_phase = int(phase_match.group(1))
201
217
  current_split_lines = [line]
202
218
  current_start = i
203
219
  else:
204
220
  current_split_lines.append(line)
205
-
221
+
206
222
  # Handle final split
207
223
  if current_split_lines:
208
- split_content = '\n'.join(current_split_lines)
224
+ split_content = "\n".join(current_split_lines)
209
225
  title = f"Phase {current_phase}" if current_phase else "Full Document"
210
- phase_info = {'phase_number': current_phase} if current_phase else {}
211
- splits.append({
212
- 'content': split_content,
213
- 'title': title,
214
- 'start_line': current_start,
215
- 'end_line': len(lines),
216
- 'line_count': len(current_split_lines),
217
- **phase_info
218
- })
219
-
226
+ phase_info = {"phase_number": current_phase} if current_phase else {}
227
+ splits.append(
228
+ {
229
+ "content": split_content,
230
+ "title": title,
231
+ "start_line": current_start,
232
+ "end_line": len(lines),
233
+ "line_count": len(current_split_lines),
234
+ **phase_info,
235
+ }
236
+ )
237
+
220
238
  # If no phases found, return entire content
221
239
  if not splits:
222
- splits = [{
223
- 'content': content,
224
- 'title': 'Full Document',
225
- 'start_line': 1,
226
- 'end_line': len(lines),
227
- 'line_count': len(lines)
228
- }]
229
-
240
+ splits = [
241
+ {
242
+ "content": content,
243
+ "title": "Full Document",
244
+ "start_line": 1,
245
+ "end_line": len(lines),
246
+ "line_count": len(lines),
247
+ }
248
+ ]
249
+
230
250
  return splits
231
251
 
232
252
 
233
253
  def split_at_lines(content: str, line_numbers: List[int]) -> List[Dict[str, Any]]:
234
254
  """Split at specified line numbers.
235
-
255
+
236
256
  Args:
237
257
  content: The content to split
238
258
  line_numbers: Line numbers where splits should occur
239
-
259
+
240
260
  Returns:
241
261
  List of split dictionaries
242
262
  """
243
- lines = content.split('\n')
263
+ lines = content.split("\n")
244
264
  total_lines = len(lines)
245
-
265
+
246
266
  if not line_numbers:
247
- return [{
248
- 'content': content,
249
- 'title': 'Full Document',
250
- 'start_line': 1,
251
- 'end_line': total_lines,
252
- 'line_count': total_lines
253
- }]
254
-
267
+ return [
268
+ {
269
+ "content": content,
270
+ "title": "Full Document",
271
+ "start_line": 1,
272
+ "end_line": total_lines,
273
+ "line_count": total_lines,
274
+ }
275
+ ]
276
+
255
277
  # Sort and deduplicate line numbers
256
278
  split_points = sorted(set(line_numbers))
257
-
279
+
258
280
  splits = []
259
281
  current_start = 1
260
-
282
+
261
283
  for split_line in split_points:
262
284
  if split_line >= total_lines:
263
285
  continue
264
-
286
+
265
287
  # Create split from current_start to split_line
266
- split_lines = lines[current_start - 1:split_line]
267
- split_content = '\n'.join(split_lines)
268
-
269
- splits.append({
270
- 'content': split_content,
271
- 'title': f"Lines {current_start}-{split_line}",
272
- 'start_line': current_start,
273
- 'end_line': split_line,
274
- 'line_count': len(split_lines)
275
- })
276
-
288
+ split_lines = lines[current_start - 1 : split_line]
289
+ split_content = "\n".join(split_lines)
290
+
291
+ splits.append(
292
+ {
293
+ "content": split_content,
294
+ "title": f"Lines {current_start}-{split_line}",
295
+ "start_line": current_start,
296
+ "end_line": split_line,
297
+ "line_count": len(split_lines),
298
+ }
299
+ )
300
+
277
301
  current_start = split_line + 1
278
-
302
+
279
303
  # Handle remaining lines after final split
280
304
  if current_start <= total_lines:
281
- remaining_lines = lines[current_start - 1:]
282
- split_content = '\n'.join(remaining_lines)
283
-
284
- splits.append({
285
- 'content': split_content,
286
- 'title': f"Lines {current_start}-{total_lines}",
287
- 'start_line': current_start,
288
- 'end_line': total_lines,
289
- 'line_count': len(remaining_lines)
290
- })
291
-
305
+ remaining_lines = lines[current_start - 1 :]
306
+ split_content = "\n".join(remaining_lines)
307
+
308
+ splits.append(
309
+ {
310
+ "content": split_content,
311
+ "title": f"Lines {current_start}-{total_lines}",
312
+ "start_line": current_start,
313
+ "end_line": total_lines,
314
+ "line_count": len(remaining_lines),
315
+ }
316
+ )
317
+
292
318
  return splits
293
319
 
294
320
 
295
- def generate_split_files(original: str, splits: List[Dict[str, Any]], output_dir: str) -> List[str]:
321
+ def generate_split_files(
322
+ original: str, splits: List[Dict[str, Any]], output_dir: str
323
+ ) -> List[str]:
296
324
  """Generate split files with metadata and cross-references.
297
-
325
+
298
326
  Args:
299
327
  original: Original filename
300
328
  splits: List of split dictionaries
301
329
  output_dir: Directory to write split files
302
-
330
+
303
331
  Returns:
304
332
  List of created file paths
305
333
  """
306
334
  os.makedirs(output_dir, exist_ok=True)
307
-
335
+
308
336
  created_files = []
309
337
  original_name = Path(original).stem
310
338
  original_ext = Path(original).suffix
311
-
339
+
312
340
  for i, split in enumerate(splits, 1):
313
341
  # Generate filename
314
342
  filename = f"{original_name}-part{i}{original_ext}"
315
343
  file_path = os.path.join(output_dir, filename)
316
-
344
+
317
345
  # Create content with metadata header
318
346
  metadata_header = f"""<!-- Split from {original} -->
319
347
  <!-- Part {i} of {len(splits)} -->
320
348
  <!-- Lines {split['start_line']}-{split['end_line']} ({split['line_count']} lines) -->
321
349
 
322
350
  """
323
-
324
- full_content = metadata_header + split['content']
325
-
351
+
352
+ full_content = metadata_header + split["content"]
353
+
326
354
  # Write file
327
- with open(file_path, 'w', encoding='utf-8') as f:
355
+ with open(file_path, "w", encoding="utf-8") as f:
328
356
  f.write(full_content)
329
-
357
+
330
358
  created_files.append(file_path)
331
-
359
+
332
360
  return created_files
333
361
 
334
362
 
335
- def _suggest_section_splits(sections: List[Dict[str, Any]], max_lines: int = 500) -> List[Dict[str, Any]]:
363
+ def _suggest_section_splits(
364
+ sections: List[Dict[str, Any]], max_lines: int = 500
365
+ ) -> List[Dict[str, Any]]:
336
366
  """Suggest optimal split points based on sections.
337
-
367
+
338
368
  Args:
339
369
  sections: List of section metadata
340
370
  max_lines: Maximum lines per split
341
-
371
+
342
372
  Returns:
343
373
  List of suggested split configurations
344
374
  """
345
375
  suggestions = []
346
376
  current_chunk_lines = 0
347
377
  current_chunk_sections = []
348
-
378
+
349
379
  for section in sections:
350
- section_lines = section['line_count']
351
-
380
+ section_lines = section["line_count"]
381
+
352
382
  # If adding this section would exceed limit, finish current chunk
353
383
  if current_chunk_lines + section_lines > max_lines and current_chunk_sections:
354
- suggestions.append({
355
- 'sections': current_chunk_sections.copy(),
356
- 'total_lines': current_chunk_lines,
357
- 'start_line': current_chunk_sections[0]['start_line'],
358
- 'end_line': current_chunk_sections[-1]['end_line']
359
- })
360
-
384
+ suggestions.append(
385
+ {
386
+ "sections": current_chunk_sections.copy(),
387
+ "total_lines": current_chunk_lines,
388
+ "start_line": current_chunk_sections[0]["start_line"],
389
+ "end_line": current_chunk_sections[-1]["end_line"],
390
+ }
391
+ )
392
+
361
393
  # Start new chunk
362
394
  current_chunk_sections = [section]
363
395
  current_chunk_lines = section_lines
@@ -365,14 +397,16 @@ def _suggest_section_splits(sections: List[Dict[str, Any]], max_lines: int = 500
365
397
  # Add section to current chunk
366
398
  current_chunk_sections.append(section)
367
399
  current_chunk_lines += section_lines
368
-
400
+
369
401
  # Add final chunk
370
402
  if current_chunk_sections:
371
- suggestions.append({
372
- 'sections': current_chunk_sections,
373
- 'total_lines': current_chunk_lines,
374
- 'start_line': current_chunk_sections[0]['start_line'],
375
- 'end_line': current_chunk_sections[-1]['end_line']
376
- })
377
-
378
- return suggestions
403
+ suggestions.append(
404
+ {
405
+ "sections": current_chunk_sections,
406
+ "total_lines": current_chunk_lines,
407
+ "start_line": current_chunk_sections[0]["start_line"],
408
+ "end_line": current_chunk_sections[-1]["end_line"],
409
+ }
410
+ )
411
+
412
+ return suggestions
@@ -47,7 +47,9 @@ def validate_evaluation_output(
47
47
  "concerns",
48
48
  ]
49
49
 
50
- has_evaluation_content = any(marker in content_lower for marker in evaluation_markers)
50
+ has_evaluation_content = any(
51
+ marker in content_lower for marker in evaluation_markers
52
+ )
51
53
  if not has_evaluation_content:
52
54
  return (
53
55
  False,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: adversarial-workflow
3
- Version: 0.6.1
3
+ Version: 0.6.2
4
4
  Summary: Multi-stage AI code review system preventing phantom work - Author/Evaluator pattern
5
5
  Author: Fredrik Matheson
6
6
  License: MIT
@@ -1,11 +1,11 @@
1
- adversarial_workflow/__init__.py,sha256=0AhyTbjRHXfTskqMpd272ayiGdU4rfQqAcf9_oJScnA,596
1
+ adversarial_workflow/__init__.py,sha256=moTEp6nKU5F4B1YnJaSBmwhptkDP0ST5n--2hak9PRc,596
2
2
  adversarial_workflow/__main__.py,sha256=Ibb0CngDCh4mpCe8Zxnf3kyKnMddBxQy2JAk_kfTUMQ,119
3
- adversarial_workflow/cli.py,sha256=ckrJNqJiX6Okg5f7UcahaB2-qx8PrnQg5MA6OymKrpY,110065
4
- adversarial_workflow/evaluators/__init__.py,sha256=vB4gGaoP46a-ZLOeoVKjR6WohAsgeif4JMhaak9AIPo,1266
3
+ adversarial_workflow/cli.py,sha256=I9LM6MnfW-m1kXbF202l50-SeidLCyyF0Sk0sR-UFqk,110743
4
+ adversarial_workflow/evaluators/__init__.py,sha256=A9ZKUmjSMfyvEu6jDzYAFLxfkt_OQ4RGA10Bv_eO2i4,1267
5
5
  adversarial_workflow/evaluators/builtins.py,sha256=u5LokYLe8ruEW2tunhOQaNSkpcZ9Ee2IeTkaC0dZDSY,1102
6
6
  adversarial_workflow/evaluators/config.py,sha256=05qYPIiIpCxXBVJzs70WQQLxi8I7MedfhE_oydXEcq0,1520
7
- adversarial_workflow/evaluators/discovery.py,sha256=V5vyFLKfh3Q9MVEipWMdD0tzsW3xC3RttVS_oEeWIb8,6801
8
- adversarial_workflow/evaluators/runner.py,sha256=27fdz49wdkNyBfdjr9VcZZA53rw270XYQWp_2y5s0PU,9244
7
+ adversarial_workflow/evaluators/discovery.py,sha256=a8qTUsuJRdPUcVbt1zzEgLixmvJbHA7WfnYSNm8V5OY,6834
8
+ adversarial_workflow/evaluators/runner.py,sha256=JPVeigjGF2fRDVJLcGyDEuy9pCIp-LjmVAZyucMbdCU,9310
9
9
  adversarial_workflow/templates/.aider.conf.yml.template,sha256=jT2jWIgsnmS3HLhoQWMTO3GV07bUcsT2keYw60jqiDw,183
10
10
  adversarial_workflow/templates/.env.example.template,sha256=TmTlcgz44uZqIbqgXqdfHMl-0vVn96F_EGNohClFkb8,1821
11
11
  adversarial_workflow/templates/README.template,sha256=FQAMPO99eIt_kgQfwhGHcrK736rm_MEvWSbPnqBSjAE,1349
@@ -23,11 +23,11 @@ adversarial_workflow/templates/agent-context/current-state.json.template,sha256=
23
23
  adversarial_workflow/utils/__init__.py,sha256=Pnm-a_jqoMVOxHdvVWXeVrL0IKI-zkY7EAdbQmZAkSI,352
24
24
  adversarial_workflow/utils/colors.py,sha256=uRrG6KfIDBLo0F5_vPwms9NCm9-x8YXBiyZ4naCr868,160
25
25
  adversarial_workflow/utils/config.py,sha256=NBoC_-YYukEVo6BgpX2cDyeqV-3tnn_sHNU9L1AuSLQ,1341
26
- adversarial_workflow/utils/file_splitter.py,sha256=rVRMHJgzJ7uNiytimqbBY8PAr-SevXdRqUpr4xf6LdM,12061
27
- adversarial_workflow/utils/validation.py,sha256=0QfuRd-kurcadUCd9XQvO-N8RsmLp6ONQnc0vaQTUBA,2188
28
- adversarial_workflow-0.6.1.dist-info/licenses/LICENSE,sha256=M-dOQlre-NmicyPa55hYOJUW8roGpCKEgtq-z0z1KCA,1073
29
- adversarial_workflow-0.6.1.dist-info/METADATA,sha256=ItR4yn7PWdP_AsFqDLMQhAkwuxhit76LxkQYHsVDXlo,29955
30
- adversarial_workflow-0.6.1.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
31
- adversarial_workflow-0.6.1.dist-info/entry_points.txt,sha256=9H-iZ-yF1uKZ8P0G1suc6kWR0NvK7uPZJbhN7nvt1sE,62
32
- adversarial_workflow-0.6.1.dist-info/top_level.txt,sha256=8irutNxLRjUbTlzfAibIpz7_ovkkF2h8ES69NQpv24c,21
33
- adversarial_workflow-0.6.1.dist-info/RECORD,,
26
+ adversarial_workflow/utils/file_splitter.py,sha256=-zSWgAZ71DfX6dBu15Y4M84NBbJzq-0ENktbBEp9zvQ,12409
27
+ adversarial_workflow/utils/validation.py,sha256=ZiJxtm03kJXicfFTt0QZwpc9V_D8PkDOVYrJEDsafQI,2202
28
+ adversarial_workflow-0.6.2.dist-info/licenses/LICENSE,sha256=M-dOQlre-NmicyPa55hYOJUW8roGpCKEgtq-z0z1KCA,1073
29
+ adversarial_workflow-0.6.2.dist-info/METADATA,sha256=zdFc4h-9XPcgg6mn0SCnJNg3VFhYl7XyxI0egYhU2fY,29955
30
+ adversarial_workflow-0.6.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
31
+ adversarial_workflow-0.6.2.dist-info/entry_points.txt,sha256=9H-iZ-yF1uKZ8P0G1suc6kWR0NvK7uPZJbhN7nvt1sE,62
32
+ adversarial_workflow-0.6.2.dist-info/top_level.txt,sha256=8irutNxLRjUbTlzfAibIpz7_ovkkF2h8ES69NQpv24c,21
33
+ adversarial_workflow-0.6.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5