borisxdave 0.2.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: borisxdave
3
- Version: 0.2.0
3
+ Version: 0.3.1
4
4
  Summary: Boris - Autonomous Project Orchestrator
5
5
  Requires-Python: >=3.8
6
6
 
@@ -0,0 +1,14 @@
1
+ boris.py,sha256=VwVjetw6Tfs397btdCDRuM_iRlPI5XSzqfalqlwSLlA,62940
2
+ config.py,sha256=KfFKyCGasdm1yBvIRFv-ykzA_oRo-zu1Euu9YC7V1Cg,324
3
+ engine.py,sha256=Pdu0i4XrNxiU246EV8MjXvYp9CBvuJWGLA18QMIYvFM,37468
4
+ file_lock.py,sha256=1YriAAayVy8YFe7JFuGIloiJWWvN2FSY0Ry1sB043Sc,4823
5
+ git_manager.py,sha256=BuuTT4naPb5-jLhOik1xHM2ztzuKvJ_bnecZmlYgwFs,8493
6
+ planner.py,sha256=UrU--kBvzvyD1gOVxIn-kdbJiu8tt4rcowsln66WkGw,5670
7
+ prompts.py,sha256=Sln8ukCby2gWcs_U3ru4YSXCTWI5MgkI4WB4ONLIyWk,34779
8
+ state.py,sha256=2DCPlcM7SBlCkwWvcnIabltcduv74W46FZ7DxKurWkw,5752
9
+ Users/david/AppData/Local/Programs/Python/Python313/Lib/site-packages/boris_prompt.md,sha256=W8bQP4c-iLLtxSsscIxbjXI2PlWTNbOrq05UGp9mLWs,7839
10
+ borisxdave-0.3.1.dist-info/METADATA,sha256=1Q8uBCfA2BpHdmgK-6kwR58ESpChi2JcTsRoLbz2MoU,133
11
+ borisxdave-0.3.1.dist-info/WHEEL,sha256=hPN0AlP2dZM_3ZJZWP4WooepkmU9wzjGgCLCeFjkHLA,92
12
+ borisxdave-0.3.1.dist-info/entry_points.txt,sha256=a6FLWgxiQjGMJIRSV5sDxaaaaQchunm04ZuzX8N7-6I,61
13
+ borisxdave-0.3.1.dist-info/top_level.txt,sha256=GSKxzJ_M15C-hpRGaC1C5pusFxA1JIaxaSHYaLg4rQc,64
14
+ borisxdave-0.3.1.dist-info/RECORD,,
@@ -1,2 +1,3 @@
1
1
  [console_scripts]
2
2
  boris = boris:main
3
+ borisxdave = boris:main
@@ -1,6 +1,7 @@
1
1
  boris
2
2
  config
3
3
  engine
4
+ file_lock
4
5
  git_manager
5
6
  planner
6
7
  prompts
engine.py CHANGED
@@ -1,5 +1,7 @@
1
1
  """Boris engine - execution and monitoring (merged from executor + monitor)."""
2
+ import concurrent.futures
2
3
  import enum
4
+ import json
3
5
  import logging
4
6
  import os
5
7
  import re
@@ -7,8 +9,10 @@ import shutil
7
9
  import subprocess
8
10
  import sys
9
11
  import tempfile
12
+ import time
10
13
  from dataclasses import dataclass
11
14
  from datetime import datetime
15
+ from pathlib import Path
12
16
  from typing import Optional
13
17
 
14
18
  # Force unbuffered stdout for real-time output on Windows
@@ -61,6 +65,49 @@ def _clean_output(text: str) -> str:
61
65
  return text
62
66
 
63
67
 
68
+ # --- Worker Status (B7: Swarm Dashboard) ---
69
+
70
+
71
+ def _write_worker_status(project_dir: str, milestone_id: str, status: dict):
72
+ """Write worker status to .boris/workers/ for the swarm dashboard."""
73
+ try:
74
+ status_dir = Path(project_dir) / ".boris" / "workers"
75
+ status_dir.mkdir(parents=True, exist_ok=True)
76
+ status_file = status_dir / f"{milestone_id}.json"
77
+ status["updated_at"] = time.time()
78
+ status_file.write_text(json.dumps(status, indent=2), encoding="utf-8")
79
+ except OSError:
80
+ pass # Non-critical: dashboard is informational only
81
+
82
+
83
+ def read_worker_statuses(project_dir: str) -> dict:
84
+ """Read all worker status files from .boris/workers/. Returns {milestone_id: status_dict}."""
85
+ statuses = {}
86
+ status_dir = Path(project_dir) / ".boris" / "workers"
87
+ if not status_dir.exists():
88
+ return statuses
89
+ for status_file in status_dir.glob("*.json"):
90
+ try:
91
+ data = json.loads(status_file.read_text(encoding="utf-8"))
92
+ milestone_id = status_file.stem
93
+ statuses[milestone_id] = data
94
+ except (json.JSONDecodeError, OSError):
95
+ pass
96
+ return statuses
97
+
98
+
99
+ def clear_worker_statuses(project_dir: str):
100
+ """Remove all worker status files (call after a batch completes)."""
101
+ status_dir = Path(project_dir) / ".boris" / "workers"
102
+ if not status_dir.exists():
103
+ return
104
+ for status_file in status_dir.glob("*.json"):
105
+ try:
106
+ status_file.unlink()
107
+ except (FileNotFoundError, OSError):
108
+ pass
109
+
110
+
64
111
  # --- Execution (from executor.py) ---
65
112
 
66
113
 
@@ -245,6 +292,17 @@ def run(prompt: str, project_dir: str, max_iterations: int = None,
245
292
 
246
293
  print(f" [Boris] Spawning DaveLoop: max_iter={max_iter}, project={project_dir}", flush=True)
247
294
  logger.info("Spawning DaveLoop: max_iter=%d, project=%s", max_iter, project_dir)
295
+
296
+ # Write initial worker status for dashboard (B7)
297
+ if milestone:
298
+ _write_worker_status(project_dir, milestone.id, {
299
+ "milestone_id": milestone.id,
300
+ "title": milestone.title,
301
+ "state": "starting",
302
+ "started_at": time.time(),
303
+ "actions": 0,
304
+ "interrupts": 0,
305
+ })
248
306
  logger.debug("Prompt length: %d chars", len(prompt))
249
307
 
250
308
  # Boris's own log for this execution
@@ -279,6 +337,14 @@ def run(prompt: str, project_dir: str, max_iterations: int = None,
279
337
  all_accomplishments = [] # cumulative for the whole run
280
338
  interrupt_count = 0
281
339
  MAX_INTERRUPTS = 3 # After 3 interrupts, let DaveLoop finish and fail at verdict
340
+ # Off-rail detection is suppressed during prompt echo phase.
341
+ # DaveLoop echoes/processes the prompt at startup, which contains sibling
342
+ # milestone IDs (from the PARALLEL EXECUTION WARNING section). Without this
343
+ # guard, _check_off_rail() fires false positives on the prompt's own text.
344
+ # We suppress until DaveLoop starts actual work (first reasoning block) or
345
+ # after a generous line threshold.
346
+ offrail_active = False
347
+ OFFRAIL_WARMUP_LINES = 80 # Lines before off-rail activates even without reasoning
282
348
 
283
349
  for raw_line in process.stdout:
284
350
  line = raw_line.decode("utf-8", errors="replace")
@@ -297,10 +363,17 @@ def run(prompt: str, project_dir: str, max_iterations: int = None,
297
363
  accomplishments.append(acc)
298
364
  all_accomplishments.append(acc)
299
365
 
366
+ # Activate off-rail detection after warmup threshold (prompt echo complete)
367
+ if not offrail_active and len(output_lines) >= OFFRAIL_WARMUP_LINES:
368
+ offrail_active = True
369
+
300
370
  # --- Reasoning block detection ---
301
371
  if "REASONING" in clean and ("===" in clean or "---" in clean or "KNOWN" in clean):
302
372
  in_reasoning = True
303
373
  reasoning_lines = []
374
+ # First reasoning block means DaveLoop is doing real work - activate off-rail
375
+ if not offrail_active:
376
+ offrail_active = True
304
377
  continue
305
378
 
306
379
  if in_reasoning:
@@ -316,14 +389,26 @@ def run(prompt: str, project_dir: str, max_iterations: int = None,
316
389
  reasoning[key] = rl.split(":", 1)[1].strip()
317
390
  if reasoning:
318
391
  _boris_commentary(reasoning, reasoning_count, accomplishments)
392
+ # Update worker status for dashboard (B7)
393
+ if milestone:
394
+ _write_worker_status(project_dir, milestone.id, {
395
+ "milestone_id": milestone.id,
396
+ "title": milestone.title,
397
+ "state": "working",
398
+ "started_at": time.time(),
399
+ "reasoning_blocks": reasoning_count,
400
+ "actions": len(all_accomplishments),
401
+ "interrupts": interrupt_count,
402
+ "last_action": all_accomplishments[-1] if all_accomplishments else None,
403
+ })
319
404
  # Reset per-block accomplishments, keep cumulative
320
405
  accomplishments = []
321
406
  reasoning_lines = []
322
407
  else:
323
408
  reasoning_lines.append(clean)
324
409
 
325
- # --- Off-rail detection ---
326
- if milestone and interrupt_count < MAX_INTERRUPTS:
410
+ # --- Off-rail detection (suppressed during prompt echo phase) ---
411
+ if milestone and interrupt_count < MAX_INTERRUPTS and offrail_active:
327
412
  interrupt_msg = _check_off_rail(clean, milestone)
328
413
  if interrupt_msg:
329
414
  _send_interrupt(process, interrupt_msg, boris_log)
@@ -331,12 +416,29 @@ def run(prompt: str, project_dir: str, max_iterations: int = None,
331
416
  if interrupt_count >= MAX_INTERRUPTS:
332
417
  warn = (
333
418
  f"[Boris] Sent {MAX_INTERRUPTS} interrupts. "
334
- f"DaveLoop keeps going off-rail. Will check at verdict."
419
+ f"DaveLoop keeps going off-rail. Terminating process."
335
420
  )
336
421
  print(f"\n {warn}\n", flush=True)
337
- logger.warning(warn)
422
+ logger.warning("Terminating DaveLoop process after %d ignored interrupts", MAX_INTERRUPTS)
338
423
  if boris_log:
339
424
  boris_log.write(f"\n{warn}\n")
425
+ # Hard kill: terminate the process since interrupts are being ignored
426
+ process.terminate()
427
+ try:
428
+ process.wait(timeout=10)
429
+ except subprocess.TimeoutExpired:
430
+ process.kill()
431
+ process.wait(timeout=5)
432
+ output = "".join(output_lines)
433
+ boris_log.write(f"\n=== DaveLoop FORCE KILLED after {MAX_INTERRUPTS} ignored interrupts ===\n")
434
+ boris_log.close()
435
+ boris_log = None # prevent double-close in finally
436
+ return ExecutionResult(
437
+ output=output,
438
+ exit_code=-1,
439
+ resolved=False,
440
+ log_path=log_path,
441
+ )
340
442
 
341
443
  process.wait()
342
444
  output = "".join(output_lines)
@@ -413,6 +515,129 @@ def _setup_log(project_dir: str) -> str:
413
515
  return os.path.join(_LOGS_DIR, f"boris_exec_{timestamp}.log")
414
516
 
415
517
 
518
+ def _create_worktree(project_dir: str, milestone_id: str) -> tuple:
519
+ """Create a git worktree for a milestone. Returns (worktree_path, branch_name) or None on failure."""
520
+ worktree_path = os.path.join(project_dir, f".boris_worktree_{milestone_id}")
521
+ branch_name = f"boris/{milestone_id}"
522
+ try:
523
+ result = subprocess.run(
524
+ ["git", "worktree", "add", "-b", branch_name, worktree_path],
525
+ cwd=project_dir, capture_output=True, timeout=30,
526
+ encoding="utf-8", errors="replace",
527
+ )
528
+ if result.returncode == 0:
529
+ logger.info("Created worktree for %s at %s", milestone_id, worktree_path)
530
+ return (worktree_path, branch_name)
531
+ else:
532
+ logger.warning("Failed to create worktree for %s: %s", milestone_id, result.stderr.strip())
533
+ return None
534
+ except (subprocess.SubprocessError, OSError) as e:
535
+ logger.warning("Worktree creation error for %s: %s", milestone_id, e)
536
+ return None
537
+
538
+
539
+ def _merge_worktree(project_dir: str, worktree_path: str, branch_name: str, milestone_id: str) -> bool:
540
+ """Merge a worktree branch back into the current branch and clean up. Returns success."""
541
+ try:
542
+ # Merge the branch back
543
+ merge_result = subprocess.run(
544
+ ["git", "merge", branch_name, "--no-edit", "-m",
545
+ f"Merge boris/{milestone_id} worktree back"],
546
+ cwd=project_dir, capture_output=True, timeout=60,
547
+ encoding="utf-8", errors="replace",
548
+ )
549
+ if merge_result.returncode != 0:
550
+ logger.warning("Merge failed for %s: %s", milestone_id, merge_result.stderr.strip())
551
+ # Abort merge on conflict
552
+ subprocess.run(["git", "merge", "--abort"], cwd=project_dir,
553
+ capture_output=True, timeout=10)
554
+ return False
555
+ return True
556
+ except (subprocess.SubprocessError, OSError) as e:
557
+ logger.warning("Merge error for %s: %s", milestone_id, e)
558
+ return False
559
+ finally:
560
+ _cleanup_worktree(project_dir, worktree_path, branch_name)
561
+
562
+
563
+ def _cleanup_worktree(project_dir: str, worktree_path: str, branch_name: str):
564
+ """Remove a git worktree and its branch."""
565
+ try:
566
+ subprocess.run(["git", "worktree", "remove", worktree_path, "--force"],
567
+ cwd=project_dir, capture_output=True, timeout=30)
568
+ except (subprocess.SubprocessError, OSError):
569
+ pass
570
+ try:
571
+ subprocess.run(["git", "branch", "-D", branch_name],
572
+ cwd=project_dir, capture_output=True, timeout=10)
573
+ except (subprocess.SubprocessError, OSError):
574
+ pass
575
+
576
+
577
+ def run_parallel(tasks: list, project_dir: str, max_iterations: int = None,
578
+ isolation: str = "none") -> list:
579
+ """Run multiple DaveLoop instances in parallel using ThreadPoolExecutor.
580
+
581
+ Args:
582
+ tasks: List of (prompt, milestone) tuples.
583
+ project_dir: Working directory for the project.
584
+ max_iterations: Max DaveLoop iterations per milestone.
585
+ isolation: Isolation strategy - "none" (shared dir), "worktree" (git worktrees).
586
+
587
+ Returns:
588
+ List of (milestone, ExecutionResult) tuples, one per input task.
589
+ """
590
+ results = []
591
+
592
+ if isolation == "worktree" and len(tasks) > 1:
593
+ # Create worktrees for each task
594
+ worktree_map = {} # milestone_id -> (worktree_path, branch_name)
595
+ for prompt, milestone in tasks:
596
+ wt = _create_worktree(project_dir, milestone.id)
597
+ if wt:
598
+ worktree_map[milestone.id] = wt
599
+ else:
600
+ logger.warning("Worktree failed for %s, falling back to shared dir", milestone.id)
601
+
602
+ def _run_one_worktree(prompt_milestone):
603
+ prompt, milestone = prompt_milestone
604
+ wt_info = worktree_map.get(milestone.id)
605
+ work_dir = wt_info[0] if wt_info else project_dir
606
+ result = run(prompt, work_dir, max_iterations, milestone=milestone)
607
+ return (milestone, result)
608
+
609
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(tasks)) as executor:
610
+ futures = {executor.submit(_run_one_worktree, t): t for t in tasks}
611
+ for future in concurrent.futures.as_completed(futures):
612
+ milestone, result = future.result()
613
+ # Merge worktree back if it was used
614
+ wt_info = worktree_map.get(milestone.id)
615
+ if wt_info and result.resolved:
616
+ wt_path, branch = wt_info
617
+ merge_ok = _merge_worktree(project_dir, wt_path, branch, milestone.id)
618
+ if not merge_ok:
619
+ print(f" [Boris] WARNING: Merge conflict for {milestone.id} worktree", flush=True)
620
+ logger.warning("Worktree merge conflict for %s", milestone.id)
621
+ elif wt_info:
622
+ # Failed milestone - just clean up worktree
623
+ _cleanup_worktree(project_dir, wt_info[0], wt_info[1])
624
+ results.append((milestone, result))
625
+
626
+ else:
627
+ # No isolation or single task - original behavior
628
+ def _run_one(prompt_milestone):
629
+ prompt, milestone = prompt_milestone
630
+ result = run(prompt, project_dir, max_iterations, milestone=milestone)
631
+ return (milestone, result)
632
+
633
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(tasks)) as executor:
634
+ futures = {executor.submit(_run_one, t): t for t in tasks}
635
+ for future in concurrent.futures.as_completed(futures):
636
+ results.append(future.result())
637
+
638
+ return results
639
+
640
+
416
641
  # --- Monitoring (from monitor.py) ---
417
642
 
418
643
 
file_lock.py ADDED
@@ -0,0 +1,123 @@
1
+ """File-level locking for parallel swarm workers.
2
+
3
+ Prevents parallel DaveLoop agents from corrupting shared files by providing
4
+ file-level locks via atomic file creation. Works on both Windows and Unix.
5
+ """
6
+ import json
7
+ import os
8
+ import time
9
+ from contextlib import contextmanager
10
+ from pathlib import Path
11
+
12
+
13
+ class FileLockManager:
14
+ """Manages file-level locks for parallel swarm workers.
15
+
16
+ Lock state is stored in .boris/locks/ in the project directory.
17
+ Each lock is an atomic file recording: owner (milestone ID), timestamp, file path.
18
+ """
19
+
20
+ def __init__(self, project_dir: str):
21
+ self.lock_dir = Path(project_dir) / ".boris" / "locks"
22
+ self.lock_dir.mkdir(parents=True, exist_ok=True)
23
+
24
+ def _lock_path(self, filepath: str) -> Path:
25
+ """Get the lock file path for a given source file."""
26
+ normalized = os.path.normpath(filepath)
27
+ # Replace path separators with underscores for flat lock directory
28
+ safe_name = normalized.replace(os.sep, "_").replace("/", "_").replace("\\", "_")
29
+ return self.lock_dir / f"{safe_name}.lock"
30
+
31
+ @contextmanager
32
+ def lock_file(self, filepath: str, owner: str, timeout: int = 30):
33
+ """Acquire a lock on a file. Blocks until available or timeout.
34
+
35
+ Args:
36
+ filepath: The file to lock (relative or absolute path).
37
+ owner: Identifier for the lock owner (e.g. milestone ID).
38
+ timeout: Max seconds to wait for the lock.
39
+
40
+ Raises:
41
+ TimeoutError: If the lock cannot be acquired within timeout.
42
+ """
43
+ lock_path = self._lock_path(filepath)
44
+ start = time.time()
45
+
46
+ while True:
47
+ try:
48
+ # Atomic create-or-fail: 'x' mode fails if file exists
49
+ fd = open(lock_path, "x", encoding="utf-8")
50
+ fd.write(json.dumps({
51
+ "owner": owner,
52
+ "file": filepath,
53
+ "time": time.time(),
54
+ }))
55
+ fd.close()
56
+ break
57
+ except FileExistsError:
58
+ if time.time() - start > timeout:
59
+ # Read who holds the lock for better error messages
60
+ try:
61
+ holder = json.loads(lock_path.read_text(encoding="utf-8"))
62
+ holder_info = f" (held by {holder.get('owner', 'unknown')})"
63
+ except Exception:
64
+ holder_info = ""
65
+ raise TimeoutError(
66
+ f"Could not acquire lock on {filepath}{holder_info} "
67
+ f"after {timeout}s"
68
+ )
69
+ time.sleep(0.5)
70
+
71
+ try:
72
+ yield
73
+ finally:
74
+ try:
75
+ lock_path.unlink()
76
+ except FileNotFoundError:
77
+ pass
78
+
79
+ def get_locked_files(self) -> dict:
80
+ """Return dict of currently locked files and their owners."""
81
+ locks = {}
82
+ for lock_file in self.lock_dir.glob("*.lock"):
83
+ try:
84
+ data = json.loads(lock_file.read_text(encoding="utf-8"))
85
+ original_file = data.get("file", lock_file.stem.replace("_", os.sep))
86
+ locks[original_file] = data.get("owner", "unknown")
87
+ except (json.JSONDecodeError, KeyError, OSError):
88
+ pass
89
+ return locks
90
+
91
+ def is_locked(self, filepath: str) -> bool:
92
+ """Check if a file is currently locked."""
93
+ lock_path = self._lock_path(filepath)
94
+ return lock_path.exists()
95
+
96
+ def lock_owner(self, filepath: str) -> str:
97
+ """Return the owner of the lock on a file, or None if unlocked."""
98
+ lock_path = self._lock_path(filepath)
99
+ if not lock_path.exists():
100
+ return None
101
+ try:
102
+ data = json.loads(lock_path.read_text(encoding="utf-8"))
103
+ return data.get("owner")
104
+ except (json.JSONDecodeError, KeyError, OSError):
105
+ return None
106
+
107
+ def release_all(self, owner: str):
108
+ """Release all locks held by a specific owner (milestone cleanup)."""
109
+ for lock_file in self.lock_dir.glob("*.lock"):
110
+ try:
111
+ data = json.loads(lock_file.read_text(encoding="utf-8"))
112
+ if data.get("owner") == owner:
113
+ lock_file.unlink()
114
+ except (json.JSONDecodeError, KeyError, FileNotFoundError, OSError):
115
+ pass
116
+
117
+ def cleanup(self):
118
+ """Remove all lock files (use after all workers complete)."""
119
+ for lock_file in self.lock_dir.glob("*.lock"):
120
+ try:
121
+ lock_file.unlink()
122
+ except (FileNotFoundError, OSError):
123
+ pass
planner.py CHANGED
@@ -43,7 +43,7 @@ def create_plan(task: str, project_dir: str) -> Plan:
43
43
 
44
44
  try:
45
45
  result = subprocess.run(
46
- [config.CLAUDE_CMD, "-p"],
46
+ [config.CLAUDE_CMD, "-p", "--output-format", "text"],
47
47
  input=prompt,
48
48
  capture_output=True,
49
49
  text=True,
@@ -63,6 +63,14 @@ def create_plan(task: str, project_dir: str) -> Plan:
63
63
  response = re.sub(r"\n?```\s*$", "", response)
64
64
  response = response.strip()
65
65
 
66
+ # Try to extract JSON array from response if it contains extra text
67
+ if not response.startswith("["):
68
+ match = re.search(r"\[.*\]", response, re.DOTALL)
69
+ if match:
70
+ response = match.group(0)
71
+ else:
72
+ logger.error("Claude response (no JSON found): %s", response[:500])
73
+
66
74
  milestones_data = json.loads(response)
67
75
 
68
76
  milestones = [