borisxdave 0.3.0__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2 @@
1
+ include boris_prompt.md
2
+ include requirements.txt
@@ -1,5 +1,5 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: borisxdave
3
- Version: 0.3.0
3
+ Version: 0.3.1
4
4
  Summary: Boris - Autonomous Project Orchestrator
5
5
  Requires-Python: >=3.8
@@ -28,6 +28,123 @@ DEFAULT_MAX_ITERATIONS = 15
28
28
  MAX_CORRECTIONS = 2
29
29
  MAX_RETRIES = 1
30
30
 
31
+ # Swarm mode configuration presets
32
+ SWARM_PRESETS = {
33
+ "conservative": {
34
+ "swarm_budget": 3,
35
+ "swarm_depth": 1,
36
+ "isolation": "worktree",
37
+ "no_converge": False,
38
+ },
39
+ "balanced": {
40
+ "swarm_budget": 5,
41
+ "swarm_depth": 1,
42
+ "isolation": "worktree",
43
+ "no_converge": False,
44
+ },
45
+ "aggressive": {
46
+ "swarm_budget": 10,
47
+ "swarm_depth": 2,
48
+ "isolation": "worktree",
49
+ "no_converge": False,
50
+ },
51
+ "yolo": { # Closest to the original emergent behavior
52
+ "swarm_budget": 999,
53
+ "swarm_depth": 2,
54
+ "isolation": "none",
55
+ "no_converge": True,
56
+ },
57
+ }
58
+
59
+
60
+ class TokenEstimator:
61
+ """Estimates token usage for Boris orchestration.
62
+
63
+ Claude Code runs on a token-based subscription model, not pay-per-API-call.
64
+ This estimator helps users understand token consumption before committing to
65
+ a task, and allows setting token budgets to limit usage.
66
+
67
+ Token estimates are based on realistic DaveLoop iteration profiles:
68
+ - Base iteration: ~4000 input tokens (prompt) + ~2000 output tokens (response)
69
+ - Context growth: each iteration adds ~1500 tokens of accumulated context
70
+ - Sub-agent spawn: ~3000 tokens each
71
+ - Convergence run: ~5000 tokens (reads multiple files + analysis)
72
+ """
73
+
74
+ BASE_INPUT_TOKENS = 4000 # Base input tokens per DaveLoop iteration
75
+ BASE_OUTPUT_TOKENS = 2000 # Base output tokens per DaveLoop iteration
76
+ CONTEXT_GROWTH_PER_ITER = 1500 # Additional context tokens accumulated per iteration
77
+ TOKENS_PER_SUB_AGENT = 3000 # Tokens per sub-agent spawn
78
+ TOKENS_PER_CONVERGENCE = 5000 # Tokens per convergence run
79
+
80
+ def __init__(self, max_tokens: int = None):
81
+ self.max_tokens = max_tokens
82
+ self.estimated_tokens = 0
83
+
84
+ def _tokens_per_iteration(self, iteration_num: int) -> int:
85
+ """Estimate tokens for a single DaveLoop iteration, accounting for context growth."""
86
+ input_tokens = self.BASE_INPUT_TOKENS + (iteration_num * self.CONTEXT_GROWTH_PER_ITER)
87
+ return input_tokens + self.BASE_OUTPUT_TOKENS
88
+
89
+ def record_tokens(self, amount: int):
90
+ """Record estimated token usage."""
91
+ self.estimated_tokens += amount
92
+
93
+ def estimate_batch(self, num_workers: int, avg_iterations: int,
94
+ avg_sub_agents: int) -> int:
95
+ """Estimate tokens for a batch of parallel workers."""
96
+ worker_tokens = 0
97
+ for w in range(num_workers):
98
+ for i in range(avg_iterations):
99
+ worker_tokens += self._tokens_per_iteration(i)
100
+ sub_agent_tokens = num_workers * avg_sub_agents * self.TOKENS_PER_SUB_AGENT
101
+ convergence_tokens = self.TOKENS_PER_CONVERGENCE
102
+ return worker_tokens + sub_agent_tokens + convergence_tokens
103
+
104
+ def estimate_task(self, plan) -> dict:
105
+ """Estimate total tokens for a Boris plan.
106
+
107
+ Returns a dict with breakdown: per-milestone estimates, sub-agent tokens,
108
+ convergence tokens, and total.
109
+ """
110
+ milestones = plan.milestones
111
+ num_milestones = len(milestones)
112
+ avg_iterations = 8 # Typical DaveLoop iterations per milestone
113
+
114
+ milestone_tokens = 0
115
+ for i in range(avg_iterations):
116
+ milestone_tokens += self._tokens_per_iteration(i)
117
+ total_milestone_tokens = milestone_tokens * num_milestones
118
+
119
+ # Estimate convergence runs (one per parallel batch, assume ~ceil(milestones/3) batches)
120
+ estimated_batches = max(1, (num_milestones + 2) // 3)
121
+ convergence_tokens = estimated_batches * self.TOKENS_PER_CONVERGENCE
122
+
123
+ total = total_milestone_tokens + convergence_tokens
124
+
125
+ return {
126
+ "num_milestones": num_milestones,
127
+ "avg_iterations_per_milestone": avg_iterations,
128
+ "tokens_per_milestone": milestone_tokens,
129
+ "total_milestone_tokens": total_milestone_tokens,
130
+ "convergence_tokens": convergence_tokens,
131
+ "total_tokens": total,
132
+ }
133
+
134
+ def check_budget(self, estimated_additional: int) -> bool:
135
+ """Check if estimated additional tokens fit within budget."""
136
+ if self.max_tokens is None:
137
+ return True
138
+ return (self.estimated_tokens + estimated_additional) <= self.max_tokens
139
+
140
+ def summary(self) -> dict:
141
+ """Return token tracking summary."""
142
+ return {
143
+ "estimated_tokens": self.estimated_tokens,
144
+ "max_tokens": self.max_tokens,
145
+ "remaining": (self.max_tokens - self.estimated_tokens) if self.max_tokens else None,
146
+ }
147
+
31
148
 
32
149
  def setup_logging() -> logging.Logger:
33
150
  """Set up logging to both console and file."""
@@ -82,6 +199,61 @@ def print_plan_summary(plan: state_module.Plan):
82
199
  print(flush=True)
83
200
 
84
201
 
202
+ def prompt_skip_milestones(plan: state_module.Plan, logger: logging.Logger) -> set:
203
+ """Prompt the user to optionally skip milestones before execution.
204
+
205
+ Returns a set of milestone IDs that were marked as skipped.
206
+ """
207
+ valid_ids = {m.id for m in plan.milestones}
208
+
209
+ try:
210
+ user_input = input("Enter milestone IDs to skip (comma-separated), or press Enter to run all: ").strip()
211
+ except EOFError:
212
+ # Non-interactive environment, skip nothing
213
+ return set()
214
+
215
+ if not user_input:
216
+ return set()
217
+
218
+ # Parse comma-separated milestone IDs
219
+ requested_ids = {s.strip().upper() for s in user_input.split(",") if s.strip()}
220
+ skip_ids = requested_ids & valid_ids
221
+ invalid_ids = requested_ids - valid_ids
222
+
223
+ if invalid_ids:
224
+ print(f"[Boris] WARNING: Unknown milestone IDs ignored: {', '.join(sorted(invalid_ids))}", flush=True)
225
+ logger.warning("Unknown milestone IDs in skip request: %s", invalid_ids)
226
+
227
+ if not skip_ids:
228
+ return set()
229
+
230
+ # Check for dependency warnings: if a skipped milestone is depended on by a non-skipped one
231
+ skipped_set = skip_ids
232
+ for m in plan.milestones:
233
+ if m.id in skipped_set:
234
+ continue
235
+ depends_on_skipped = [dep for dep in m.depends_on if dep in skipped_set]
236
+ if depends_on_skipped:
237
+ print(
238
+ f"[Boris] WARNING: {m.id} ({m.title}) depends on skipped milestone(s): "
239
+ f"{', '.join(depends_on_skipped)}",
240
+ flush=True,
241
+ )
242
+ logger.warning(
243
+ "Milestone %s depends on skipped milestone(s): %s",
244
+ m.id, depends_on_skipped,
245
+ )
246
+
247
+ # Mark milestones as skipped
248
+ for m in plan.milestones:
249
+ if m.id in skip_ids:
250
+ m.status = "skipped"
251
+ print(f"[Boris] Skipping milestone {m.id}: {m.title}", flush=True)
252
+ logger.info("User skipped milestone %s: %s", m.id, m.title)
253
+
254
+ return skip_ids
255
+
256
+
85
257
  def generate_summary(plan: state_module.Plan, project_dir: str, start_time: datetime) -> str:
86
258
  """Generate a summary markdown file and return its path."""
87
259
  os.makedirs(PLANS_DIR, exist_ok=True)
@@ -245,6 +417,41 @@ def parse_args() -> argparse.Namespace:
245
417
  help="Stop execution after completing this milestone ID (e.g. M4). Implies --incremental."
246
418
  )
247
419
 
420
+ # Swarm mode flags
421
+ parser.add_argument(
422
+ "--swarm", action="store_true",
423
+ help="Enable swarm mode: DaveLoops can spawn sub-agents via Task tool"
424
+ )
425
+ parser.add_argument(
426
+ "--swarm-budget", type=int, default=5,
427
+ help="Max sub-agents per DaveLoop worker in swarm mode (default: 5)"
428
+ )
429
+ parser.add_argument(
430
+ "--swarm-depth", type=int, default=1, choices=[1, 2],
431
+ help="Max sub-agent depth in swarm mode (default: 1, no recursive spawning)"
432
+ )
433
+ parser.add_argument(
434
+ "--preset", choices=["conservative", "balanced", "aggressive", "yolo"],
435
+ help="Apply a swarm configuration preset (implies --swarm)"
436
+ )
437
+ parser.add_argument(
438
+ "--isolation", choices=["none", "worktree"], default="none",
439
+ help="Isolation strategy for parallel workers (default: none)"
440
+ )
441
+ parser.add_argument(
442
+ "--no-converge", action="store_true", dest="no_converge",
443
+ help="Skip convergence phase after each swarm batch"
444
+ )
445
+ parser.add_argument(
446
+ "--max-tokens", type=int, default=None, dest="max_tokens",
447
+ help="Maximum estimated token budget for swarm/turbo mode execution (e.g. 500000)"
448
+ )
449
+ parser.add_argument(
450
+ "--estimate", metavar="TASK", nargs="?", const="__FROM_TASK__", dest="estimate",
451
+ help="Generate plan and print token estimate WITHOUT executing. "
452
+ "Use alone (boris --estimate 'task') or with positional task."
453
+ )
454
+
248
455
  return parser.parse_args()
249
456
 
250
457
 
@@ -516,16 +723,177 @@ def _process_milestone_verdict(verdict_result, result, milestone, plan, st, proj
516
723
  state_module.save(st)
517
724
 
518
725
 
726
+ def validate_turbo_batch(ready: list, plan: state_module.Plan, logger: logging.Logger) -> list:
727
+ """Filter out milestones from a turbo batch whose dependencies were skipped.
728
+
729
+ These milestones cannot succeed without their dependency's output.
730
+ Also enforces foundation-first: first batch only runs milestones with no dependencies.
731
+ """
732
+ skipped_ids = {m.id for m in plan.milestones if m.status == "skipped"}
733
+ valid = []
734
+ for m in ready:
735
+ skipped_deps = [d for d in m.depends_on if d in skipped_ids]
736
+ if skipped_deps:
737
+ logger.warning(
738
+ "Milestone %s depends on skipped milestone(s) %s - deferring from turbo batch",
739
+ m.id, skipped_deps
740
+ )
741
+ print(f"[Boris] WARNING: {m.id} skipped from turbo batch - depends on skipped: {skipped_deps}", flush=True)
742
+ else:
743
+ valid.append(m)
744
+ return valid
745
+
746
+
747
+ def _run_convergence(st, plan, project_dir, batch_milestones, logger):
748
+ """Run a convergence agent after a parallel batch to resolve conflicts."""
749
+ completed_in_batch = [m for m in batch_milestones if m.status == "completed"]
750
+ if len(completed_in_batch) < 2:
751
+ return # No conflicts possible with 0-1 completed milestones
752
+
753
+ # Collect all files touched by this batch and check for overlaps
754
+ file_owners = {}
755
+ conflicts = []
756
+ for m in completed_in_batch:
757
+ for f in (m.files_to_create or []) + (m.files_to_modify or []):
758
+ if f in file_owners:
759
+ conflicts.append((f, file_owners[f], m.id))
760
+ file_owners[f] = m.id
761
+
762
+ if not conflicts:
763
+ logger.info("Convergence: No file conflicts detected in batch")
764
+ print("[Boris] Convergence: No conflicts detected. Skipping.", flush=True)
765
+ return
766
+
767
+ print(f"[Boris] Convergence: {len(conflicts)} potential conflict(s) detected", flush=True)
768
+ for filepath, owner1, owner2 in conflicts:
769
+ print(f" [Boris] {filepath}: written by {owner1} and {owner2}", flush=True)
770
+
771
+ # Build convergence prompt
772
+ conflict_text = "\n".join(
773
+ f"- {f}: modified by {o1} and {o2}" for f, o1, o2 in conflicts
774
+ )
775
+
776
+ convergence_prompt = f"""# Convergence Task
777
+
778
+ Multiple parallel agents modified overlapping files. Resolve any conflicts.
779
+
780
+ ## Conflicts Detected
781
+ {conflict_text}
782
+
783
+ ## Milestones Completed in This Batch
784
+ {chr(10).join(f'- {m.id}: {m.title}' for m in completed_in_batch)}
785
+
786
+ ## Instructions
787
+ 1. Read each conflicted file
788
+ 2. Check for type mismatches, duplicate definitions, incompatible interfaces
789
+ 3. Reconcile into a consistent state
790
+ 4. Run `tsc --noEmit` (TypeScript) or equivalent type checker
791
+ 5. Fix any remaining build errors
792
+ 6. Do NOT add new features - only resolve conflicts between existing code
793
+
794
+ When all conflicts are resolved and the build is clean, output [DAVELOOP:RESOLVED].
795
+ """
796
+
797
+ # Run convergence via DaveLoop (without Task tool - no swarm for convergence)
798
+ result = engine.run(convergence_prompt, project_dir, max_iterations=5)
799
+ if result.resolved:
800
+ print("[Boris] Convergence: All conflicts resolved", flush=True)
801
+ logger.info("Convergence phase completed successfully")
802
+ else:
803
+ print("[Boris] WARNING: Convergence could not resolve all conflicts", flush=True)
804
+ logger.warning("Convergence phase did not fully resolve")
805
+
806
+
807
+ def _print_swarm_dashboard(project_dir: str, batch_num: int, batch_milestones: list):
808
+ """Print a swarm status dashboard showing all active workers in the current batch (B7)."""
809
+ statuses = engine.read_worker_statuses(project_dir)
810
+ if not statuses:
811
+ return
812
+
813
+ active = sum(1 for s in statuses.values() if s.get("state") in ("starting", "working"))
814
+ total_actions = sum(s.get("actions", 0) for s in statuses.values())
815
+
816
+ print(flush=True)
817
+ print("=== BORIS SWARM STATUS ===", flush=True)
818
+ print(f"Batch {batch_num} | Workers: {active} active / {len(statuses)} total | Actions: {total_actions}", flush=True)
819
+ print(flush=True)
820
+
821
+ for m in batch_milestones:
822
+ status = statuses.get(m.id, {})
823
+ state = status.get("state", "unknown")
824
+ actions = status.get("actions", 0)
825
+ reasoning = status.get("reasoning_blocks", 0)
826
+ interrupts = status.get("interrupts", 0)
827
+ last = status.get("last_action", "")
828
+
829
+ # Progress indicator based on reasoning blocks (rough proxy)
830
+ bar_len = min(reasoning, 10)
831
+ bar = "#" * bar_len + "-" * (10 - bar_len)
832
+
833
+ state_str = state.upper()
834
+ interrupt_str = f" | {interrupts} interrupts" if interrupts > 0 else ""
835
+ print(f"[{m.id}] {m.title[:30]:<30} [{bar}] {state_str} | {actions} actions{interrupt_str}", flush=True)
836
+ if last:
837
+ print(f" Last: {last}", flush=True)
838
+
839
+ # Show file locks if file_lock.py is available
840
+ try:
841
+ from file_lock import FileLockManager
842
+ flm = FileLockManager(project_dir)
843
+ locks = flm.get_locked_files()
844
+ if locks:
845
+ lock_strs = [f"{os.path.basename(f)} ({owner})" for f, owner in locks.items()]
846
+ print(f"\nFile locks: {', '.join(lock_strs)}", flush=True)
847
+ except ImportError:
848
+ pass
849
+
850
+ print("===========================", flush=True)
851
+ print(flush=True)
852
+
853
+
854
+ def _apply_preset(args):
855
+ """Apply a swarm preset to args if specified. Preset implies --swarm."""
856
+ if not args.preset:
857
+ return
858
+ args.swarm = True
859
+ preset = SWARM_PRESETS[args.preset]
860
+ # Only override if user didn't explicitly set these
861
+ if args.swarm_budget == 5: # default value
862
+ args.swarm_budget = preset["swarm_budget"]
863
+ if args.swarm_depth == 1: # default value
864
+ args.swarm_depth = preset["swarm_depth"]
865
+ if args.isolation == "none": # default value
866
+ args.isolation = preset.get("isolation", "none")
867
+ if not args.no_converge:
868
+ args.no_converge = preset["no_converge"]
869
+
870
+
519
871
  def main():
520
872
  """Main Boris orchestration loop."""
521
873
  args = parse_args()
522
874
 
875
+ # Apply swarm preset if specified (implies --swarm)
876
+ _apply_preset(args)
877
+
523
878
  # --stop-at implies incremental mode
524
879
  if args.stop_at:
525
880
  args.incremental = True
526
881
 
882
+ # Handle --estimate: resolve the task description from either the flag or positional arg
883
+ if args.estimate is not None:
884
+ estimate_task = args.estimate if args.estimate != "__FROM_TASK__" else args.task
885
+ if not estimate_task:
886
+ print("[Boris] Error: --estimate requires a task description", flush=True)
887
+ sys.exit(1)
888
+ args.task = estimate_task
889
+
527
890
  logger = setup_logging()
528
891
 
892
+ # Initialize token estimator for swarm/turbo mode
893
+ token_estimator = None
894
+ if getattr(args, 'swarm', False) or getattr(args, 'max_tokens', None) or getattr(args, 'turbo', False):
895
+ token_estimator = TokenEstimator(max_tokens=args.max_tokens)
896
+
529
897
  # Create required dirs
530
898
  os.makedirs(PLANS_DIR, exist_ok=True)
531
899
  os.makedirs(LOGS_DIR, exist_ok=True)
@@ -567,6 +935,9 @@ def main():
567
935
 
568
936
  print_plan_summary(plan)
569
937
 
938
+ # Prompt user to skip milestones before execution
939
+ prompt_skip_milestones(plan, logger)
940
+
570
941
  # Create initial state from the loaded plan
571
942
  st = state_module.State(
572
943
  plan=plan,
@@ -625,10 +996,32 @@ def main():
625
996
 
626
997
  print_plan_summary(plan)
627
998
 
999
+ # --estimate mode: print token estimate and exit without executing
1000
+ if args.estimate is not None:
1001
+ estimator = TokenEstimator()
1002
+ estimate = estimator.estimate_task(plan)
1003
+ print("=" * 60, flush=True)
1004
+ print(" BORIS - Token Estimate", flush=True)
1005
+ print("=" * 60, flush=True)
1006
+ print(f" Milestones: {estimate['num_milestones']}", flush=True)
1007
+ print(f" Avg iterations each: {estimate['avg_iterations_per_milestone']}", flush=True)
1008
+ print(f" Tokens/milestone: {estimate['tokens_per_milestone']:,}", flush=True)
1009
+ print(f" Milestone tokens: {estimate['total_milestone_tokens']:,}", flush=True)
1010
+ print(f" Convergence tokens: {estimate['convergence_tokens']:,}", flush=True)
1011
+ print(f" ----------------------------------------", flush=True)
1012
+ print(f" TOTAL ESTIMATED: {estimate['total_tokens']:,} tokens", flush=True)
1013
+ print("=" * 60, flush=True)
1014
+ print(flush=True)
1015
+ print("[Boris] Estimate-only mode. No execution performed.", flush=True)
1016
+ return
1017
+
628
1018
  if args.plan_only:
629
1019
  print("[Boris] Plan-only mode. Exiting.", flush=True)
630
1020
  return
631
1021
 
1022
+ # Prompt user to skip milestones before execution
1023
+ prompt_skip_milestones(plan, logger)
1024
+
632
1025
  # Create initial state
633
1026
  st = state_module.State(
634
1027
  plan=plan,
@@ -686,6 +1079,27 @@ def main():
686
1079
  state_module.save(st)
687
1080
  break
688
1081
 
1082
+ # Dependency-aware batch validation: filter milestones with skipped deps
1083
+ ready = validate_turbo_batch(ready, plan, logger)
1084
+ if not ready:
1085
+ logger.warning("All ready milestones filtered out by turbo batch validation")
1086
+ break
1087
+
1088
+ # Foundation-first: first batch only runs milestones with no dependencies
1089
+ if batch_num == 0:
1090
+ foundation = [m for m in ready if not m.depends_on]
1091
+ if foundation:
1092
+ ready = foundation[:1] # Only one foundation milestone at a time
1093
+
1094
+ # Token budget check (swarm/turbo mode)
1095
+ if token_estimator:
1096
+ estimated = token_estimator.estimate_batch(len(ready), args.max_iter, args.swarm_budget if getattr(args, 'swarm', False) else 0)
1097
+ if not token_estimator.check_budget(estimated):
1098
+ print(f"[Boris] Token budget exceeded. Estimated: {token_estimator.estimated_tokens + estimated:,}, Budget: {token_estimator.max_tokens:,} tokens", flush=True)
1099
+ logger.warning("Token budget exceeded, stopping execution")
1100
+ break
1101
+ token_estimator.record_tokens(estimated)
1102
+
689
1103
  batch_num += 1
690
1104
  batch_ids = [m.id for m in ready]
691
1105
  print(flush=True)
@@ -711,7 +1125,12 @@ def main():
711
1125
  prompt_map[m.id] = prompt
712
1126
 
713
1127
  # Run all DaveLoops in parallel
714
- parallel_results = engine.run_parallel(tasks, project_dir, args.max_iter)
1128
+ isolation = getattr(args, 'isolation', 'none') if getattr(args, 'swarm', False) else 'none'
1129
+ parallel_results = engine.run_parallel(tasks, project_dir, args.max_iter, isolation=isolation)
1130
+
1131
+ # Print swarm dashboard after parallel run completes (B7)
1132
+ _print_swarm_dashboard(project_dir, batch_num, ready)
1133
+ engine.clear_worker_statuses(project_dir)
715
1134
 
716
1135
  # Process verdicts sequentially (corrections/retries run sequentially)
717
1136
  batch_summary = {}
@@ -731,6 +1150,13 @@ def main():
731
1150
  )
732
1151
  batch_summary[milestone.id] = milestone.status.upper()
733
1152
 
1153
+ # Convergence phase: reconcile type conflicts from parallel workers
1154
+ if not getattr(args, 'no_converge', False):
1155
+ completed_in_batch = [m for m, r in parallel_results if m.status == "completed"]
1156
+ if len(completed_in_batch) > 1:
1157
+ print(f"[Boris] TURBO: Running convergence phase for batch {batch_num}...", flush=True)
1158
+ _run_convergence(st, plan, project_dir, completed_in_batch, logger)
1159
+
734
1160
  # Git commits sequentially after entire batch
735
1161
  if not st.no_git:
736
1162
  for milestone, result in parallel_results: