borisxdave 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
boris.py CHANGED
@@ -14,6 +14,8 @@ if hasattr(sys.stdout, "reconfigure"):
14
14
  pass
15
15
  os.environ["PYTHONUNBUFFERED"] = "1"
16
16
 
17
+ import threading
18
+ import time as _time
17
19
  import engine
18
20
  import git_manager
19
21
  import prompts
@@ -811,30 +813,39 @@ def _print_swarm_dashboard(project_dir: str, batch_num: int, batch_milestones: l
811
813
  return
812
814
 
813
815
  active = sum(1 for s in statuses.values() if s.get("state") in ("starting", "working"))
816
+ done = sum(1 for s in statuses.values() if s.get("state") == "done")
814
817
  total_actions = sum(s.get("actions", 0) for s in statuses.values())
815
818
 
819
+ elapsed = ""
820
+ started_times = [s.get("started_at", 0) for s in statuses.values() if s.get("started_at")]
821
+ if started_times:
822
+ elapsed_sec = int(_time.time() - min(started_times))
823
+ elapsed = f" | Elapsed: {elapsed_sec // 60}m {elapsed_sec % 60}s"
824
+
816
825
  print(flush=True)
817
- print("=== BORIS SWARM STATUS ===", flush=True)
818
- print(f"Batch {batch_num} | Workers: {active} active / {len(statuses)} total | Actions: {total_actions}", flush=True)
819
- print(flush=True)
826
+ print("╔══════════════════════════════════════════════════════════════════╗", flush=True)
827
+ print(f"║ BORIS SWARM DASHBOARD │ Batch {batch_num} │ {active} active / {done} done / {len(statuses)} total{elapsed}", flush=True)
828
+ print("╠══════════════════════════════════════════════════════════════════╣", flush=True)
820
829
 
821
830
  for m in batch_milestones:
822
831
  status = statuses.get(m.id, {})
823
- state = status.get("state", "unknown")
832
+ state = status.get("state", "waiting")
824
833
  actions = status.get("actions", 0)
825
834
  reasoning = status.get("reasoning_blocks", 0)
826
835
  interrupts = status.get("interrupts", 0)
827
836
  last = status.get("last_action", "")
828
837
 
829
838
  # Progress indicator based on reasoning blocks (rough proxy)
830
- bar_len = min(reasoning, 10)
831
- bar = "#" * bar_len + "-" * (10 - bar_len)
839
+ bar_len = min(reasoning, 20)
840
+ bar = "" * bar_len + "" * (20 - bar_len)
832
841
 
842
+ state_icons = {"starting": "⏳", "working": "⚙️", "done": "✅", "failed": "❌"}
843
+ icon = state_icons.get(state, "⏸️")
833
844
  state_str = state.upper()
834
- interrupt_str = f" | {interrupts} interrupts" if interrupts > 0 else ""
835
- print(f"[{m.id}] {m.title[:30]:<30} [{bar}] {state_str} | {actions} actions{interrupt_str}", flush=True)
845
+ interrupt_str = f" {interrupts} interrupts" if interrupts > 0 else ""
846
+ print(f"║ {icon} [{m.id}] {m.title[:28]:<28} [{bar}] {state_str:>8} {actions:>3} actions{interrupt_str}", flush=True)
836
847
  if last:
837
- print(f" Last: {last}", flush=True)
848
+ print(f"║ └─ {last[:60]}", flush=True)
838
849
 
839
850
  # Show file locks if file_lock.py is available
840
851
  try:
@@ -843,14 +854,29 @@ def _print_swarm_dashboard(project_dir: str, batch_num: int, batch_milestones: l
843
854
  locks = flm.get_locked_files()
844
855
  if locks:
845
856
  lock_strs = [f"{os.path.basename(f)} ({owner})" for f, owner in locks.items()]
846
- print(f"\nFile locks: {', '.join(lock_strs)}", flush=True)
857
+ print(f"║ 🔒 Locks: {', '.join(lock_strs)}", flush=True)
847
858
  except ImportError:
848
859
  pass
849
860
 
850
- print("===========================", flush=True)
861
+ print("╚══════════════════════════════════════════════════════════════════╝", flush=True)
851
862
  print(flush=True)
852
863
 
853
864
 
865
+ def _start_live_dashboard(project_dir: str, batch_num: int, batch_milestones: list, interval: float = 10.0):
866
+ """Start a background thread that prints the swarm dashboard periodically."""
867
+ stop_event = threading.Event()
868
+
869
+ def _loop():
870
+ while not stop_event.is_set():
871
+ stop_event.wait(interval)
872
+ if not stop_event.is_set():
873
+ _print_swarm_dashboard(project_dir, batch_num, batch_milestones)
874
+
875
+ t = threading.Thread(target=_loop, daemon=True)
876
+ t.start()
877
+ return stop_event
878
+
879
+
854
880
  def _apply_preset(args):
855
881
  """Apply a swarm preset to args if specified. Preset implies --swarm."""
856
882
  if not args.preset:
@@ -1141,32 +1167,50 @@ def main():
1141
1167
  tasks.append((prompt, m))
1142
1168
  prompt_map[m.id] = prompt
1143
1169
 
1144
- # Run all DaveLoops in parallel
1170
+ # Run all DaveLoops in parallel with live dashboard
1145
1171
  isolation = getattr(args, 'isolation', 'none') if getattr(args, 'swarm', False) else 'none'
1172
+ _print_swarm_dashboard(project_dir, batch_num, ready)
1173
+ dashboard_stop = _start_live_dashboard(project_dir, batch_num, ready, interval=15.0)
1146
1174
  parallel_results = engine.run_parallel(tasks, project_dir, args.max_iter, isolation=isolation)
1175
+ dashboard_stop.set()
1147
1176
 
1148
- # Print swarm dashboard after parallel run completes (B7)
1177
+ # Print final swarm dashboard after parallel run completes (B7)
1149
1178
  _print_swarm_dashboard(project_dir, batch_num, ready)
1150
1179
  engine.clear_worker_statuses(project_dir)
1151
1180
 
1152
1181
  # Process verdicts sequentially (corrections/retries run sequentially)
1153
1182
  batch_summary = {}
1154
1183
  for milestone, result in parallel_results:
1155
- print(f"[Boris] TURBO: Checking verdict for {milestone.id}...", flush=True)
1156
- verdict_result = engine.check(result, milestone)
1157
- logger.info(
1158
- "TURBO milestone %s verdict: %s - %s",
1159
- milestone.id,
1160
- verdict_result.verdict.value,
1161
- verdict_result.reason,
1162
- )
1163
-
1164
- _process_milestone_verdict(
1165
- verdict_result, result, milestone, plan, st,
1166
- project_dir, args, logger, prompt_map[milestone.id]
1167
- )
1184
+ try:
1185
+ print(f"[Boris] TURBO: Checking verdict for {milestone.id}...", flush=True)
1186
+ verdict_result = engine.check(result, milestone)
1187
+ logger.info(
1188
+ "TURBO milestone %s verdict: %s - %s",
1189
+ milestone.id,
1190
+ verdict_result.verdict.value,
1191
+ verdict_result.reason,
1192
+ )
1193
+
1194
+ _process_milestone_verdict(
1195
+ verdict_result, result, milestone, plan, st,
1196
+ project_dir, args, logger, prompt_map[milestone.id]
1197
+ )
1198
+ except Exception as e:
1199
+ print(f"[Boris] TURBO: Error processing verdict for {milestone.id}: {e}", flush=True)
1200
+ logger.error("Verdict processing error for %s: %s", milestone.id, e)
1201
+ milestone.status = "skipped"
1202
+ state_module.save(st)
1168
1203
  batch_summary[milestone.id] = milestone.status.upper()
1169
1204
 
1205
+ # Safety net: any milestone still in_progress after batch must be marked skipped
1206
+ for m in ready:
1207
+ if m.status == "in_progress":
1208
+ print(f"[Boris] TURBO: {m.id} still in_progress after batch - marking skipped", flush=True)
1209
+ logger.warning("Milestone %s stuck in_progress after batch, marking skipped", m.id)
1210
+ m.status = "skipped"
1211
+ batch_summary[m.id] = "SKIPPED"
1212
+ state_module.save(st)
1213
+
1170
1214
  # Convergence phase: reconcile type conflicts from parallel workers
1171
1215
  if not getattr(args, 'no_converge', False):
1172
1216
  completed_in_batch = [m for m, r in parallel_results if m.status == "completed"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: borisxdave
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Boris - Autonomous Project Orchestrator
5
5
  Requires-Python: >=3.8
6
6
  Dynamic: requires-python
@@ -1,14 +1,14 @@
1
- boris.py,sha256=x758NOe-Gl-57nYlfgMyEyPNfqf0RdfSSjeNVZFak9I,63834
1
+ boris.py,sha256=bBQtt-_sPC7dYX99knZR7GmDZVojqPUlQM87M3xcom4,66784
2
2
  boris_prompt_data.py,sha256=ZBvWMrQOBrl07cNFzgeGumJ54cYg0Be9RSSnK6a3YQY,7940
3
3
  config.py,sha256=KfFKyCGasdm1yBvIRFv-ykzA_oRo-zu1Euu9YC7V1Cg,324
4
- engine.py,sha256=Pdu0i4XrNxiU246EV8MjXvYp9CBvuJWGLA18QMIYvFM,37468
4
+ engine.py,sha256=QGwPdbaekYkT2XuANO26eWMO7bDuJn-5olayooR-hkI,38577
5
5
  file_lock.py,sha256=1YriAAayVy8YFe7JFuGIloiJWWvN2FSY0Ry1sB043Sc,4823
6
6
  git_manager.py,sha256=BuuTT4naPb5-jLhOik1xHM2ztzuKvJ_bnecZmlYgwFs,8493
7
7
  planner.py,sha256=UrU--kBvzvyD1gOVxIn-kdbJiu8tt4rcowsln66WkGw,5670
8
8
  prompts.py,sha256=-eSwZ-oTBR12Wx4Md57sVF816T9vHEFlMsvT4zMkwOg,35187
9
9
  state.py,sha256=2DCPlcM7SBlCkwWvcnIabltcduv74W46FZ7DxKurWkw,5752
10
- borisxdave-0.3.4.dist-info/METADATA,sha256=2xBFOjfdS4w_Tlb5MrdRqFs6rJ4YBWCBvnXhitrWC9I,175
11
- borisxdave-0.3.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
12
- borisxdave-0.3.4.dist-info/entry_points.txt,sha256=a6FLWgxiQjGMJIRSV5sDxaaaaQchunm04ZuzX8N7-6I,61
13
- borisxdave-0.3.4.dist-info/top_level.txt,sha256=C3fTm1vt0QEQyJtvSZiFiOvmR4d0hWmmr6hujJqFrQE,82
14
- borisxdave-0.3.4.dist-info/RECORD,,
10
+ borisxdave-0.3.6.dist-info/METADATA,sha256=NmCdAb7IuHdrY71AXiQ1_AhNYNPdXClzYRvJBhOVqJU,175
11
+ borisxdave-0.3.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
12
+ borisxdave-0.3.6.dist-info/entry_points.txt,sha256=a6FLWgxiQjGMJIRSV5sDxaaaaQchunm04ZuzX8N7-6I,61
13
+ borisxdave-0.3.6.dist-info/top_level.txt,sha256=C3fTm1vt0QEQyJtvSZiFiOvmR4d0hWmmr6hujJqFrQE,82
14
+ borisxdave-0.3.6.dist-info/RECORD,,
engine.py CHANGED
@@ -609,19 +609,27 @@ def run_parallel(tasks: list, project_dir: str, max_iterations: int = None,
609
609
  with concurrent.futures.ThreadPoolExecutor(max_workers=len(tasks)) as executor:
610
610
  futures = {executor.submit(_run_one_worktree, t): t for t in tasks}
611
611
  for future in concurrent.futures.as_completed(futures):
612
- milestone, result = future.result()
613
- # Merge worktree back if it was used
614
- wt_info = worktree_map.get(milestone.id)
615
- if wt_info and result.resolved:
616
- wt_path, branch = wt_info
617
- merge_ok = _merge_worktree(project_dir, wt_path, branch, milestone.id)
618
- if not merge_ok:
619
- print(f" [Boris] WARNING: Merge conflict for {milestone.id} worktree", flush=True)
620
- logger.warning("Worktree merge conflict for %s", milestone.id)
621
- elif wt_info:
622
- # Failed milestone - just clean up worktree
623
- _cleanup_worktree(project_dir, wt_info[0], wt_info[1])
624
- results.append((milestone, result))
612
+ try:
613
+ milestone, result = future.result()
614
+ # Merge worktree back if it was used
615
+ wt_info = worktree_map.get(milestone.id)
616
+ if wt_info and result.resolved:
617
+ wt_path, branch = wt_info
618
+ merge_ok = _merge_worktree(project_dir, wt_path, branch, milestone.id)
619
+ if not merge_ok:
620
+ print(f" [Boris] WARNING: Merge conflict for {milestone.id} worktree", flush=True)
621
+ logger.warning("Worktree merge conflict for %s", milestone.id)
622
+ elif wt_info:
623
+ # Failed milestone - just clean up worktree
624
+ _cleanup_worktree(project_dir, wt_info[0], wt_info[1])
625
+ results.append((milestone, result))
626
+ except Exception as e:
627
+ # Worker crashed - return a failed result
628
+ _, crashed_milestone = futures[future]
629
+ print(f" [Boris] Worker for {crashed_milestone.id} crashed: {e}", flush=True)
630
+ logger.error("Worker crashed for %s: %s", crashed_milestone.id, e)
631
+ failed_result = ExecutionResult(output=f"Worker crashed: {e}", resolved=False, exit_code=1)
632
+ results.append((crashed_milestone, failed_result))
625
633
 
626
634
  else:
627
635
  # No isolation or single task - original behavior
@@ -633,7 +641,14 @@ def run_parallel(tasks: list, project_dir: str, max_iterations: int = None,
633
641
  with concurrent.futures.ThreadPoolExecutor(max_workers=len(tasks)) as executor:
634
642
  futures = {executor.submit(_run_one, t): t for t in tasks}
635
643
  for future in concurrent.futures.as_completed(futures):
636
- results.append(future.result())
644
+ try:
645
+ results.append(future.result())
646
+ except Exception as e:
647
+ _, crashed_milestone = futures[future]
648
+ print(f" [Boris] Worker for {crashed_milestone.id} crashed: {e}", flush=True)
649
+ logger.error("Worker crashed for %s: %s", crashed_milestone.id, e)
650
+ failed_result = ExecutionResult(output=f"Worker crashed: {e}", resolved=False, exit_code=1)
651
+ results.append((crashed_milestone, failed_result))
637
652
 
638
653
  return results
639
654