monoco-toolkit 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. monoco/core/config.py +35 -0
  2. monoco/core/integrations.py +0 -6
  3. monoco/core/resources/en/AGENTS.md +25 -0
  4. monoco/core/resources/en/SKILL.md +32 -1
  5. monoco/core/resources/zh/AGENTS.md +25 -0
  6. monoco/core/resources/zh/SKILL.md +32 -0
  7. monoco/core/sync.py +6 -19
  8. monoco/features/i18n/core.py +31 -11
  9. monoco/features/issue/commands.py +24 -1
  10. monoco/features/issue/core.py +90 -39
  11. monoco/features/issue/domain/models.py +1 -0
  12. monoco/features/issue/domain_commands.py +47 -0
  13. monoco/features/issue/domain_service.py +69 -0
  14. monoco/features/issue/linter.py +119 -11
  15. monoco/features/issue/validator.py +47 -0
  16. monoco/features/scheduler/__init__.py +19 -0
  17. monoco/features/scheduler/cli.py +204 -0
  18. monoco/features/scheduler/config.py +32 -0
  19. monoco/features/scheduler/defaults.py +54 -0
  20. monoco/features/scheduler/manager.py +49 -0
  21. monoco/features/scheduler/models.py +24 -0
  22. monoco/features/scheduler/reliability.py +99 -0
  23. monoco/features/scheduler/session.py +87 -0
  24. monoco/features/scheduler/worker.py +129 -0
  25. monoco/main.py +4 -0
  26. {monoco_toolkit-0.3.1.dist-info → monoco_toolkit-0.3.3.dist-info}/METADATA +1 -1
  27. {monoco_toolkit-0.3.1.dist-info → monoco_toolkit-0.3.3.dist-info}/RECORD +30 -24
  28. monoco/core/agent/__init__.py +0 -3
  29. monoco/core/agent/action.py +0 -168
  30. monoco/core/agent/adapters.py +0 -133
  31. monoco/core/agent/protocol.py +0 -32
  32. monoco/core/agent/state.py +0 -106
  33. {monoco_toolkit-0.3.1.dist-info → monoco_toolkit-0.3.3.dist-info}/WHEEL +0 -0
  34. {monoco_toolkit-0.3.1.dist-info → monoco_toolkit-0.3.3.dist-info}/entry_points.txt +0 -0
  35. {monoco_toolkit-0.3.1.dist-info → monoco_toolkit-0.3.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,69 @@
1
+ from typing import Dict, Optional, Set
2
+ from monoco.core.config import get_config, DomainConfig
3
+
4
+
5
+ class DomainService:
6
+ """
7
+ Service for managing domain ontology, aliases, and validation.
8
+ """
9
+
10
+ def __init__(self, config: Optional[DomainConfig] = None):
11
+ self.config = config or get_config().domains
12
+ self._alias_map: Dict[str, str] = {}
13
+ self._canonical_domains: Set[str] = set()
14
+ self._build_index()
15
+
16
+ def _build_index(self):
17
+ self._alias_map.clear()
18
+ self._canonical_domains.clear()
19
+
20
+ for item in self.config.items:
21
+ self._canonical_domains.add(item.name)
22
+ for alias in item.aliases:
23
+ self._alias_map[alias] = item.name
24
+
25
+ def reload(self):
26
+ """Reload configuration (if get_config returns new instance referenced)"""
27
+ # Usually get_config() returns the singleton. If singleton updates, we might see it?
28
+ # But we stored self.config.
29
+ # Ideally we fetch fresh config if we want reload.
30
+ self.config = get_config().domains
31
+ self._build_index()
32
+
33
+ def is_defined(self, domain: str) -> bool:
34
+ """Check if domain is known (canonical or alias)."""
35
+ return domain in self._canonical_domains or domain in self._alias_map
36
+
37
+ def is_canonical(self, domain: str) -> bool:
38
+ """Check if domain is a canonical name."""
39
+ return domain in self._canonical_domains
40
+
41
+ def is_alias(self, domain: str) -> bool:
42
+ """Check if domain is a known alias."""
43
+ return domain in self._alias_map
44
+
45
+ def get_canonical(self, domain: str) -> Optional[str]:
46
+ """
47
+ Resolve alias to canonical name.
48
+ Returns Canonical Name if found.
49
+ Returns None if it is not an alias (could be canonical or unknown).
50
+ """
51
+ return self._alias_map.get(domain)
52
+
53
+ def normalize(self, domain: str) -> str:
54
+ """
55
+ Normalize domain: return canonical if it's an alias, else return original.
56
+ """
57
+ return self._alias_map.get(domain, domain)
58
+
59
+ def suggest_correction(self, domain: str) -> Optional[str]:
60
+ """
61
+ Suggest a correction for an unknown domain (Fuzzy matching).
62
+ """
63
+ # Simple fuzzy match implementation (optional)
64
+ # Using simple containment or levenshtein if available?
65
+ # Let's keep it simple: check if domain is substring of canonical?
66
+ # Or simple typo check loop.
67
+
68
+ # For now, just return None as fuzzy match is optional and requires dependency or complex logic
69
+ return None
@@ -7,7 +7,7 @@ import re
7
7
  from monoco.core import git
8
8
  from . import core
9
9
  from .validator import IssueValidator
10
- from monoco.core.lsp import Diagnostic, DiagnosticSeverity
10
+ from monoco.core.lsp import Diagnostic, DiagnosticSeverity, Range, Position
11
11
 
12
12
  console = Console()
13
13
 
@@ -68,15 +68,29 @@ def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnost
68
68
  files.extend(status_dir.rglob("*.md"))
69
69
 
70
70
  for f in files:
71
- meta = core.parse_issue(f)
72
- if meta:
73
- local_id = meta.id
74
- full_id = f"{project_name}::{local_id}"
75
-
76
- all_issue_ids.add(local_id)
77
- all_issue_ids.add(full_id)
78
-
79
- project_issues.append((f, meta))
71
+ try:
72
+ meta = core.parse_issue(f, raise_error=True)
73
+ if meta:
74
+ local_id = meta.id
75
+ full_id = f"{project_name}::{local_id}"
76
+
77
+ all_issue_ids.add(local_id)
78
+ all_issue_ids.add(full_id)
79
+
80
+ project_issues.append((f, meta))
81
+ except Exception as e:
82
+ # Report parsing failure as diagnostic
83
+ d = Diagnostic(
84
+ range=Range(
85
+ start=Position(line=0, character=0),
86
+ end=Position(line=0, character=0),
87
+ ),
88
+ message=f"Schema Error: {str(e)}",
89
+ severity=DiagnosticSeverity.Error,
90
+ source="System",
91
+ )
92
+ d.data = {"path": f}
93
+ diagnostics.append(d)
80
94
  return project_issues
81
95
 
82
96
  from monoco.core.config import get_config
@@ -193,7 +207,7 @@ def run_lint(
193
207
 
194
208
  # Parse and validate file
195
209
  try:
196
- meta = core.parse_issue(file)
210
+ meta = core.parse_issue(file, raise_error=True)
197
211
  if not meta:
198
212
  console.print(
199
213
  f"[yellow]Warning:[/yellow] Failed to parse issue metadata from {file_path}. Skipping."
@@ -315,6 +329,36 @@ def run_lint(
315
329
  new_content = "\n".join(lines) + "\n"
316
330
  has_changes = True
317
331
 
332
+ if (
333
+ "Hierarchy Violation" in d.message
334
+ and "Epics must have a parent" in d.message
335
+ ):
336
+ try:
337
+ fm_match = re.search(
338
+ r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
339
+ )
340
+ if fm_match:
341
+ import yaml
342
+
343
+ fm_text = fm_match.group(1)
344
+ data = yaml.safe_load(fm_text) or {}
345
+
346
+ # Default to EPIC-0000
347
+ data["parent"] = "EPIC-0000"
348
+
349
+ new_fm_text = yaml.dump(
350
+ data, sort_keys=False, allow_unicode=True
351
+ )
352
+ # Replace FM block
353
+ new_content = new_content.replace(
354
+ fm_match.group(1), "\n" + new_fm_text
355
+ )
356
+ has_changes = True
357
+ except Exception as ex:
358
+ console.print(
359
+ f"[red]Failed to fix parent hierarchy: {ex}[/red]"
360
+ )
361
+
318
362
  if "Tag Check: Missing required context tags" in d.message:
319
363
  # Extract missing tags from message
320
364
  # Message format: "Tag Check: Missing required context tags: #TAG1, #TAG2"
@@ -426,6 +470,70 @@ def run_lint(
426
470
  except Exception as e:
427
471
  console.print(f"[red]Failed to fix domains for {path.name}: {e}[/red]")
428
472
 
473
+ # Domain Alias Fix
474
+ try:
475
+ alias_fixes = [
476
+ d for d in current_file_diags if "Domain Alias:" in d.message
477
+ ]
478
+ if alias_fixes:
479
+ fm_match = re.search(
480
+ r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
481
+ )
482
+ if fm_match:
483
+ import yaml
484
+
485
+ fm_text = fm_match.group(1)
486
+ data = yaml.safe_load(fm_text) or {}
487
+
488
+ domain_changed = False
489
+ if "domains" in data and isinstance(data["domains"], list):
490
+ domains = data["domains"]
491
+ for d in alias_fixes:
492
+ # Parse message: Domain Alias: 'alias' is an alias for 'canonical'.
493
+ m = re.search(
494
+ r"Domain Alias: '([^']+)' is an alias for '([^']+)'",
495
+ d.message,
496
+ )
497
+ if m:
498
+ old_d = m.group(1)
499
+ new_d = m.group(2)
500
+
501
+ if old_d in domains:
502
+ domains = [
503
+ new_d if x == old_d else x for x in domains
504
+ ]
505
+ domain_changed = True
506
+
507
+ if domain_changed:
508
+ data["domains"] = domains
509
+ new_fm_text = yaml.dump(
510
+ data, sort_keys=False, allow_unicode=True
511
+ )
512
+ new_content = new_content.replace(
513
+ fm_match.group(1), "\n" + new_fm_text
514
+ )
515
+ has_changes = True
516
+
517
+ # Write immediately if not handled by previous block?
518
+ # We are in standard flow where has_changes flag handles write at end of loop?
519
+ # Wait, the previous block (Missing domains) logic wrote internally ONLY if has_changes.
520
+ # AND it reset has_changes=False at start of try?
521
+ # Actually the previous block structure was separate try-except blocks.
522
+ # But here I am inserting AFTER the Missing Domains try-except (which was lines 390-442).
523
+ # But I need to write if I changed it.
524
+ path.write_text(new_content)
525
+ if not any(path == p for p in processed_paths):
526
+ fixed_count += 1
527
+ processed_paths.add(path)
528
+ console.print(
529
+ f"[dim]Fixed (Domain Alias): {path.name}[/dim]"
530
+ )
531
+
532
+ except Exception as e:
533
+ console.print(
534
+ f"[red]Failed to fix domain aliases for {path.name}: {e}[/red]"
535
+ )
536
+
429
537
  console.print(f"[green]Applied auto-fixes to {fixed_count} files.[/green]")
430
538
 
431
539
  # Re-run validation to verify
@@ -444,6 +444,17 @@ class IssueValidator:
444
444
  if not all_ids:
445
445
  return diagnostics
446
446
 
447
+ # Logic: Epics must have a parent (unless it is the Sink Root EPIC-0000)
448
+ if meta.type == "epic" and meta.id != "EPIC-0000" and not meta.parent:
449
+ line = self._get_field_line(content, "parent")
450
+ diagnostics.append(
451
+ self._create_diagnostic(
452
+ "Hierarchy Violation: Epics must have a parent (e.g., 'EPIC-0000').",
453
+ DiagnosticSeverity.Error,
454
+ line=line,
455
+ )
456
+ )
457
+
447
458
  if (
448
459
  meta.parent
449
460
  and meta.parent not in all_ids
@@ -575,6 +586,7 @@ class IssueValidator:
575
586
  has_domains_field = False
576
587
  lines = content.splitlines()
577
588
  in_fm = False
589
+ field_line = 0
578
590
  for i, line_content in enumerate(lines):
579
591
  stripped = line_content.strip()
580
592
  if stripped == "---":
@@ -585,6 +597,7 @@ class IssueValidator:
585
597
  elif in_fm:
586
598
  if stripped.startswith("domains:"):
587
599
  has_domains_field = True
600
+ field_line = i
588
601
  break
589
602
 
590
603
  # Governance Maturity Check
@@ -607,6 +620,40 @@ class IssueValidator:
607
620
  )
608
621
  )
609
622
 
623
+ # Domain Content Validation
624
+ from .domain_service import DomainService
625
+
626
+ service = DomainService()
627
+
628
+ if hasattr(meta, "domains") and meta.domains:
629
+ for domain in meta.domains:
630
+ if service.is_alias(domain):
631
+ canonical = service.get_canonical(domain)
632
+ diagnostics.append(
633
+ self._create_diagnostic(
634
+ f"Domain Alias: '{domain}' is an alias for '{canonical}'. Preference: Canonical.",
635
+ DiagnosticSeverity.Warning,
636
+ line=field_line,
637
+ )
638
+ )
639
+ elif not service.is_defined(domain):
640
+ if service.config.strict:
641
+ diagnostics.append(
642
+ self._create_diagnostic(
643
+ f"Unknown Domain: '{domain}' is not defined in domain ontology.",
644
+ DiagnosticSeverity.Error,
645
+ line=field_line,
646
+ )
647
+ )
648
+ else:
649
+ diagnostics.append(
650
+ self._create_diagnostic(
651
+ f"Unknown Domain: '{domain}' is not defined in domain ontology.",
652
+ DiagnosticSeverity.Warning,
653
+ line=field_line,
654
+ )
655
+ )
656
+
610
657
  return diagnostics
611
658
 
612
659
  def _validate_checkbox_logic_blocks(
@@ -0,0 +1,19 @@
1
+ from .models import RoleTemplate, SchedulerConfig
2
+ from .worker import Worker
3
+ from .config import load_scheduler_config
4
+ from .defaults import DEFAULT_ROLES
5
+ from .session import Session, RuntimeSession
6
+ from .manager import SessionManager
7
+ from .reliability import ApoptosisManager
8
+
9
+ __all__ = [
10
+ "RoleTemplate",
11
+ "SchedulerConfig",
12
+ "Worker",
13
+ "load_scheduler_config",
14
+ "DEFAULT_ROLES",
15
+ "Session",
16
+ "RuntimeSession",
17
+ "SessionManager",
18
+ "ApoptosisManager",
19
+ ]
@@ -0,0 +1,204 @@
1
+ import typer
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Optional
5
+ from monoco.core.output import print_output
6
+ from monoco.core.config import get_config
7
+ from monoco.features.scheduler import SessionManager, load_scheduler_config
8
+
9
+ app = typer.Typer(name="agent", help="Manage agent sessions")
10
+
11
+
12
+ @app.command()
13
+ def run(
14
+ target: str = typer.Argument(
15
+ ..., help="Issue ID (e.g. FEAT-101) or a Task Description in quotes."
16
+ ),
17
+ role: Optional[str] = typer.Option(
18
+ None,
19
+ help="Specific role to use (crafter/builder/auditor). Default: intelligent selection.",
20
+ ),
21
+ detach: bool = typer.Option(
22
+ False, "--detach", "-d", help="Run in background (Daemon)"
23
+ ),
24
+ fail: bool = typer.Option(
25
+ False, "--fail", help="Simulate a crash for testing Apoptosis."
26
+ ),
27
+ ):
28
+ """
29
+ Start an agent session.
30
+ - If TARGET is an Issue ID: Work on that issue.
31
+ - If TARGET is a text description: Create a new issue (Crafter).
32
+ """
33
+ settings = get_config()
34
+ project_root = Path(settings.paths.root).resolve()
35
+
36
+ # 1. Smart Intent Recognition
37
+ import re
38
+
39
+ is_id = re.match(r"^[a-zA-Z]+-\d+$", target)
40
+
41
+ if is_id:
42
+ issue_id = target.upper()
43
+ role_name = role or "builder"
44
+ description = None
45
+ else:
46
+ issue_id = "NEW_TASK"
47
+ role_name = role or "crafter"
48
+ description = target
49
+
50
+ # 2. Load Roles
51
+ roles = load_scheduler_config(project_root)
52
+ selected_role = roles.get(role_name)
53
+
54
+ if not selected_role:
55
+ from monoco.core.output import print_error
56
+
57
+ print_error(f"Role '{role_name}' not found. Available: {list(roles.keys())}")
58
+ raise typer.Exit(code=1)
59
+
60
+ print_output(
61
+ f"Starting Agent Session for '{target}' as {role_name}...",
62
+ title="Agent Scheduler",
63
+ )
64
+
65
+ # 3. Initialize Session
66
+ manager = SessionManager()
67
+ session = manager.create_session(issue_id, selected_role)
68
+
69
+ if detach:
70
+ print_output(
71
+ "Background mode not fully implemented yet. Running in foreground."
72
+ )
73
+
74
+ try:
75
+ # Pass description if it's a new task
76
+ context = {"description": description} if description else None
77
+
78
+ if fail:
79
+ from monoco.core.output import rprint
80
+
81
+ rprint("[bold yellow]DEBUG: Simulating immediate crash...[/bold yellow]")
82
+ session.model.status = "failed"
83
+ else:
84
+ session.start(context=context)
85
+
86
+ # Monitoring Loop
87
+ while session.model.status == "running":
88
+ time.sleep(1)
89
+
90
+ if session.model.status == "failed":
91
+ from monoco.core.output import print_error
92
+
93
+ print_error(
94
+ f"Session {session.model.id} FAILED. Use 'monoco agent autopsy {session.model.id}' for analysis."
95
+ )
96
+ else:
97
+ print_output(
98
+ f"Session finished with status: {session.model.status}",
99
+ title="Agent Scheduler",
100
+ )
101
+
102
+ except KeyboardInterrupt:
103
+ print("\nStopping...")
104
+ session.terminate()
105
+ print_output("Session terminated.")
106
+
107
+
108
+ @app.command()
109
+ def kill(session_id: str):
110
+ """
111
+ Terminate a session.
112
+ """
113
+ manager = SessionManager()
114
+ session = manager.get_session(session_id)
115
+ if session:
116
+ session.terminate()
117
+ print_output(f"Session {session_id} terminated.")
118
+ else:
119
+ print_output(f"Session {session_id} not found.", style="red")
120
+
121
+
122
+ @app.command()
123
+ def autopsy(
124
+ target: str = typer.Argument(..., help="Session ID or Issue ID to analyze."),
125
+ ):
126
+ """
127
+ Execute Post-Mortem analysis on a failed session or target Issue.
128
+ """
129
+ from .reliability import ApoptosisManager
130
+
131
+ manager = SessionManager()
132
+
133
+ print_output(f"Initiating Autopsy for '{target}'...", title="Coroner")
134
+
135
+ # Try to find session
136
+ session = manager.get_session(target)
137
+ if not session:
138
+ # Fallback: Treat target as Issue ID and create a dummy failed session context
139
+ import re
140
+
141
+ if re.match(r"^[a-zA-Z]+-\d+$", target):
142
+ print_output(f"Session not in memory. Analyzing Issue {target} directly.")
143
+ # We create a transient session just to trigger the coroner
144
+ from .defaults import DEFAULT_ROLES
145
+
146
+ builder_role = next(r for r in DEFAULT_ROLES if r.name == "builder")
147
+ session = manager.create_session(target.upper(), builder_role)
148
+ session.model.status = "failed"
149
+ else:
150
+ print_output(
151
+ f"Could not find session or valid Issue ID for '{target}'", style="red"
152
+ )
153
+ raise typer.Exit(code=1)
154
+
155
+ apoptosis = ApoptosisManager(manager)
156
+ apoptosis.trigger_apoptosis(session.model.id)
157
+
158
+
159
+ @app.command(name="list")
160
+ def list_sessions():
161
+ """
162
+ List active agent sessions.
163
+ """
164
+ manager = SessionManager()
165
+ sessions = manager.list_sessions()
166
+
167
+ output = []
168
+ for s in sessions:
169
+ output.append(
170
+ {
171
+ "id": s.model.id,
172
+ "issue": s.model.issue_id,
173
+ "role": s.model.role_name,
174
+ "status": s.model.status,
175
+ "branch": s.model.branch_name,
176
+ }
177
+ )
178
+
179
+ print_output(
180
+ output
181
+ or "No active sessions found (Note: Persistence not implemented in CLI list yet).",
182
+ title="Active Sessions",
183
+ )
184
+
185
+
186
+ @app.command()
187
+ def logs(session_id: str):
188
+ """
189
+ Stream logs for a session.
190
+ """
191
+ print_output(f"Streaming logs for {session_id}...", title="Session Logs")
192
+ # Placeholder
193
+ print("[12:00:00] Session started")
194
+ print("[12:00:01] Worker initialized")
195
+
196
+
197
+ @app.command()
198
+ def kill(session_id: str):
199
+ """
200
+ Terminate a session.
201
+ """
202
+ print_output(f"Killing session {session_id}...", title="Kill Session")
203
+ # Placeholder
204
+ print("Signal sent.")
@@ -0,0 +1,32 @@
1
+ import yaml
2
+ from pathlib import Path
3
+ from typing import Dict
4
+ from .models import RoleTemplate, SchedulerConfig
5
+ from .defaults import DEFAULT_ROLES
6
+
7
+
8
+ def load_scheduler_config(project_root: Path) -> Dict[str, RoleTemplate]:
9
+ """
10
+ Load scheduler configuration from .monoco/scheduler.yaml
11
+ Merges with default roles.
12
+ """
13
+ roles = {role.name: role for role in DEFAULT_ROLES}
14
+
15
+ config_path = project_root / ".monoco" / "scheduler.yaml"
16
+ if config_path.exists():
17
+ try:
18
+ with open(config_path, "r") as f:
19
+ data = yaml.safe_load(f) or {}
20
+
21
+ # Use Pydantic to validate the whole config if possible, or just the roles list
22
+ # Depending on file structure. Assuming the file has a 'roles' key.
23
+ if "roles" in data:
24
+ # We can validate using SchedulerConfig
25
+ config = SchedulerConfig(roles=data["roles"])
26
+ for role in config.roles:
27
+ roles[role.name] = role
28
+ except Exception as e:
29
+ # For now, just log or print. Ideally use a logger.
30
+ print(f"Warning: Failed to load scheduler config: {e}")
31
+
32
+ return roles
@@ -0,0 +1,54 @@
1
+ from .models import RoleTemplate
2
+
3
+ DEFAULT_ROLES = [
4
+ RoleTemplate(
5
+ name="crafter",
6
+ description="Responsible for initial design, research, and drafting issues from descriptions.",
7
+ trigger="task.received",
8
+ goal="Produce a structured Issue file and/or detailed design document.",
9
+ tools=[
10
+ "create_issue_file",
11
+ "read_file",
12
+ "search_web",
13
+ "view_file_outline",
14
+ "write_to_file",
15
+ ],
16
+ system_prompt=(
17
+ "You are a Crafter agent. Your goal is to turn vague ideas into structured engineering plans.\n"
18
+ "If the user provides a description, use 'monoco issue create' and 'monoco issue update' to build the task.\n"
19
+ "If the user provides an existing Issue, analyze the context and provide a detailed design or implementation plan."
20
+ ),
21
+ engine="gemini",
22
+ ),
23
+ RoleTemplate(
24
+ name="builder",
25
+ description="Responsible for implementation.",
26
+ trigger="design.approved",
27
+ goal="Implement code and tests",
28
+ tools=["read_file", "write_to_file", "run_command", "git"],
29
+ system_prompt="You are a Builder agent. Your job is to implement the code based on the design.",
30
+ engine="gemini",
31
+ ),
32
+ RoleTemplate(
33
+ name="auditor",
34
+ description="Responsible for code review.",
35
+ trigger="implementation.submitted",
36
+ goal="Review code and provide feedback",
37
+ tools=[
38
+ "read_file",
39
+ "read_terminal",
40
+ "run_command",
41
+ ], # Assumed read_diff and lint are via run_command
42
+ system_prompt="You are an Auditor agent. Your job is to review the code for quality and correctness.",
43
+ engine="gemini",
44
+ ),
45
+ RoleTemplate(
46
+ name="coroner",
47
+ description="Responsible for analyzing failure root causes (Autopsy).",
48
+ trigger="session.crashed",
49
+ goal="Produce a post-mortem report",
50
+ tools=["read_file", "read_terminal", "git_log"],
51
+ system_prompt="You are a Coroner agent. Your job is to analyze why the previous session failed and write a post-mortem report.",
52
+ engine="gemini",
53
+ ),
54
+ ]
@@ -0,0 +1,49 @@
1
+ from typing import Dict, List, Optional
2
+ import uuid
3
+ from .models import RoleTemplate
4
+ from .worker import Worker
5
+ from .session import Session, RuntimeSession
6
+
7
+
8
+ class SessionManager:
9
+ """
10
+ Manages the lifecycle of sessions.
11
+ Responsible for creating, tracking, and retrieving sessions.
12
+ """
13
+
14
+ def __init__(self):
15
+ # In-memory storage for now. In prod, this might be a DB or file-backed.
16
+ self._sessions: Dict[str, RuntimeSession] = {}
17
+
18
+ def create_session(self, issue_id: str, role: RoleTemplate) -> RuntimeSession:
19
+ session_id = str(uuid.uuid4())
20
+ branch_name = (
21
+ f"agent/{issue_id}/{session_id[:8]}" # Simple branch naming strategy
22
+ )
23
+
24
+ session_model = Session(
25
+ id=session_id,
26
+ issue_id=issue_id,
27
+ role_name=role.name,
28
+ branch_name=branch_name,
29
+ )
30
+
31
+ worker = Worker(role, issue_id)
32
+ runtime = RuntimeSession(session_model, worker)
33
+ self._sessions[session_id] = runtime
34
+ return runtime
35
+
36
+ def get_session(self, session_id: str) -> Optional[RuntimeSession]:
37
+ return self._sessions.get(session_id)
38
+
39
+ def list_sessions(self, issue_id: Optional[str] = None) -> List[RuntimeSession]:
40
+ if issue_id:
41
+ return [s for s in self._sessions.values() if s.model.issue_id == issue_id]
42
+ return list(self._sessions.values())
43
+
44
+ def terminate_session(self, session_id: str):
45
+ session = self.get_session(session_id)
46
+ if session:
47
+ session.terminate()
48
+ # We might want to keep the record for a while, so don't delete immediately
49
+ # del self._sessions[session_id]