monoco-toolkit 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/core/__init__.py +0 -0
- monoco/core/config.py +113 -0
- monoco/core/git.py +184 -0
- monoco/core/output.py +97 -0
- monoco/core/setup.py +285 -0
- monoco/core/telemetry.py +89 -0
- monoco/core/workspace.py +40 -0
- monoco/daemon/__init__.py +0 -0
- monoco/daemon/app.py +378 -0
- monoco/daemon/commands.py +36 -0
- monoco/daemon/models.py +24 -0
- monoco/daemon/reproduce_stats.py +41 -0
- monoco/daemon/services.py +265 -0
- monoco/daemon/stats.py +124 -0
- monoco/features/__init__.py +0 -0
- monoco/features/config/commands.py +70 -0
- monoco/features/i18n/__init__.py +0 -0
- monoco/features/i18n/commands.py +121 -0
- monoco/features/i18n/core.py +178 -0
- monoco/features/issue/commands.py +710 -0
- monoco/features/issue/core.py +1183 -0
- monoco/features/issue/linter.py +172 -0
- monoco/features/issue/models.py +157 -0
- monoco/features/pty/core.py +185 -0
- monoco/features/pty/router.py +138 -0
- monoco/features/pty/server.py +56 -0
- monoco/features/skills/__init__.py +1 -0
- monoco/features/skills/core.py +96 -0
- monoco/features/spike/commands.py +110 -0
- monoco/features/spike/core.py +154 -0
- monoco/main.py +110 -0
- monoco_toolkit-0.1.5.dist-info/METADATA +93 -0
- monoco_toolkit-0.1.5.dist-info/RECORD +36 -0
- monoco_toolkit-0.1.5.dist-info/WHEEL +4 -0
- monoco_toolkit-0.1.5.dist-info/entry_points.txt +2 -0
- monoco_toolkit-0.1.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from typing import List, Optional, Tuple, Set
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
import typer
|
|
6
|
+
|
|
7
|
+
from . import core
|
|
8
|
+
from .models import IssueStatus, IssueStage
|
|
9
|
+
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def validate_issue(path: Path, meta: core.IssueMetadata, all_issue_ids: Set[str] = set(), issues_root: Optional[Path] = None) -> List[str]:
|
|
14
|
+
"""
|
|
15
|
+
Validate a single issue's integrity.
|
|
16
|
+
"""
|
|
17
|
+
errors = []
|
|
18
|
+
|
|
19
|
+
# A. Directory/Status Consistency
|
|
20
|
+
expected_status = meta.status.value
|
|
21
|
+
path_parts = path.parts
|
|
22
|
+
# We might be validating a temp file, so we skip path check if it's not in the tree?
|
|
23
|
+
# Or strict check? For "Safe Edit", the file might be in a temp dir.
|
|
24
|
+
# So we probably only care about content/metadata integrity.
|
|
25
|
+
|
|
26
|
+
# But wait, if we overwrite the file, it MUST be valid.
|
|
27
|
+
# Let's assume the validation is about the content itself (metadata logic).
|
|
28
|
+
|
|
29
|
+
# B. Solution Compliance
|
|
30
|
+
if meta.status == IssueStatus.CLOSED and not meta.solution:
|
|
31
|
+
errors.append(f"[red]Solution Missing:[/red] {meta.id} is closed but has no [dim]solution[/dim] field.")
|
|
32
|
+
|
|
33
|
+
# C. Link Integrity
|
|
34
|
+
if meta.parent:
|
|
35
|
+
if all_issue_ids and meta.parent not in all_issue_ids:
|
|
36
|
+
# Check workspace (fallback)
|
|
37
|
+
found = False
|
|
38
|
+
if issues_root:
|
|
39
|
+
if core.find_issue_path(issues_root, meta.parent):
|
|
40
|
+
found = True
|
|
41
|
+
|
|
42
|
+
if not found:
|
|
43
|
+
errors.append(f"[red]Broken Link:[/red] {meta.id} refers to non-existent parent [bold]{meta.parent}[/bold].")
|
|
44
|
+
|
|
45
|
+
# D. Lifecycle Guard (Backlog)
|
|
46
|
+
if meta.status == IssueStatus.BACKLOG and meta.stage != IssueStage.FREEZED:
|
|
47
|
+
errors.append(f"[red]Lifecycle Error:[/red] {meta.id} is backlog but stage is not [bold]freezed[/bold] (found: {meta.stage}).")
|
|
48
|
+
|
|
49
|
+
return errors
|
|
50
|
+
|
|
51
|
+
def check_integrity(issues_root: Path, recursive: bool = False) -> List[str]:
|
|
52
|
+
"""
|
|
53
|
+
Verify the integrity of the Issues directory.
|
|
54
|
+
Returns a list of error messages.
|
|
55
|
+
|
|
56
|
+
If recursive=True, performs workspace-level validation including:
|
|
57
|
+
- Cross-project ID collision detection
|
|
58
|
+
- Cross-project UID collision detection
|
|
59
|
+
"""
|
|
60
|
+
errors = []
|
|
61
|
+
all_issue_ids = set() # For parent reference validation (includes namespaced IDs)
|
|
62
|
+
id_to_projects = {} # local_id -> [(project_name, meta, file)]
|
|
63
|
+
all_uids = {} # uid -> (project, issue_id)
|
|
64
|
+
all_issues = []
|
|
65
|
+
|
|
66
|
+
# Helper to collect issues from a project
|
|
67
|
+
def collect_project_issues(project_issues_root: Path, project_name: str = "local"):
|
|
68
|
+
project_issues = []
|
|
69
|
+
for subdir in ["Epics", "Features", "Chores", "Fixes"]:
|
|
70
|
+
d = project_issues_root / subdir
|
|
71
|
+
if d.exists():
|
|
72
|
+
files = []
|
|
73
|
+
for status in ["open", "closed", "backlog"]:
|
|
74
|
+
status_dir = d / status
|
|
75
|
+
if status_dir.exists():
|
|
76
|
+
files.extend(status_dir.rglob("*.md"))
|
|
77
|
+
|
|
78
|
+
for f in files:
|
|
79
|
+
meta = core.parse_issue(f)
|
|
80
|
+
if meta:
|
|
81
|
+
local_id = meta.id
|
|
82
|
+
full_id = f"{project_name}::{local_id}" if project_name != "local" else local_id
|
|
83
|
+
|
|
84
|
+
# Track ID occurrences per project
|
|
85
|
+
if local_id not in id_to_projects:
|
|
86
|
+
id_to_projects[local_id] = []
|
|
87
|
+
id_to_projects[local_id].append((project_name, meta, f))
|
|
88
|
+
|
|
89
|
+
# Add IDs for reference validation
|
|
90
|
+
all_issue_ids.add(local_id) # Local ID
|
|
91
|
+
if project_name != "local":
|
|
92
|
+
all_issue_ids.add(full_id) # Namespaced ID
|
|
93
|
+
|
|
94
|
+
# Check UID collision (if UID exists)
|
|
95
|
+
if meta.uid:
|
|
96
|
+
if meta.uid in all_uids:
|
|
97
|
+
existing_project, existing_id = all_uids[meta.uid]
|
|
98
|
+
errors.append(
|
|
99
|
+
f"[red]UID Collision:[/red] UID {meta.uid} is duplicated.\n"
|
|
100
|
+
f" - {existing_project}::{existing_id}\n"
|
|
101
|
+
f" - {project_name}::{local_id}"
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
all_uids[meta.uid] = (project_name, local_id)
|
|
105
|
+
|
|
106
|
+
project_issues.append((f, meta, project_name))
|
|
107
|
+
return project_issues
|
|
108
|
+
|
|
109
|
+
# 1. Collect local issues
|
|
110
|
+
all_issues.extend(collect_project_issues(issues_root, "local"))
|
|
111
|
+
|
|
112
|
+
# 2. If recursive, collect workspace member issues
|
|
113
|
+
if recursive:
|
|
114
|
+
try:
|
|
115
|
+
from monoco.core.config import get_config
|
|
116
|
+
project_root = issues_root.parent
|
|
117
|
+
conf = get_config(str(project_root))
|
|
118
|
+
|
|
119
|
+
for member_name, rel_path in conf.project.members.items():
|
|
120
|
+
member_root = (project_root / rel_path).resolve()
|
|
121
|
+
member_issues_dir = member_root / "Issues"
|
|
122
|
+
|
|
123
|
+
if member_issues_dir.exists():
|
|
124
|
+
all_issues.extend(collect_project_issues(member_issues_dir, member_name))
|
|
125
|
+
except Exception as e:
|
|
126
|
+
# Fail gracefully if workspace config is missing
|
|
127
|
+
pass
|
|
128
|
+
|
|
129
|
+
# 3. Check for ID collisions within same project
|
|
130
|
+
for local_id, occurrences in id_to_projects.items():
|
|
131
|
+
# Group by project
|
|
132
|
+
projects_with_id = {}
|
|
133
|
+
for project_name, meta, f in occurrences:
|
|
134
|
+
if project_name not in projects_with_id:
|
|
135
|
+
projects_with_id[project_name] = []
|
|
136
|
+
projects_with_id[project_name].append((meta, f))
|
|
137
|
+
|
|
138
|
+
# Check for duplicates within same project
|
|
139
|
+
for project_name, metas in projects_with_id.items():
|
|
140
|
+
if len(metas) > 1:
|
|
141
|
+
# Same ID appears multiple times in same project - this is an error
|
|
142
|
+
error_msg = f"[red]ID Collision:[/red] {local_id} appears {len(metas)} times in project '{project_name}':\n"
|
|
143
|
+
for idx, (meta, f) in enumerate(metas, 1):
|
|
144
|
+
error_msg += f" {idx}. uid: {meta.uid or 'N/A'} | created: {meta.created_at} | stage: {meta.stage} | status: {meta.status.value}\n"
|
|
145
|
+
error_msg += f" [yellow]→ Action:[/yellow] Remove duplicate or use 'monoco issue move --to <target> --renumber' to resolve."
|
|
146
|
+
errors.append(error_msg)
|
|
147
|
+
|
|
148
|
+
# 4. Validation
|
|
149
|
+
for path, meta, project_name in all_issues:
|
|
150
|
+
# A. Directory/Status Consistency (Only check this for files in the tree)
|
|
151
|
+
expected_status = meta.status.value
|
|
152
|
+
path_parts = path.parts
|
|
153
|
+
if expected_status not in path_parts:
|
|
154
|
+
errors.append(f"[yellow]Placement Error:[/yellow] {meta.id} has status [cyan]{expected_status}[/cyan] but is not under a [dim]{expected_status}/[/dim] directory.")
|
|
155
|
+
|
|
156
|
+
# Reuse common logic
|
|
157
|
+
errors.extend(validate_issue(path, meta, all_issue_ids, issues_root))
|
|
158
|
+
|
|
159
|
+
return errors
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def run_lint(issues_root: Path, recursive: bool = False):
|
|
163
|
+
errors = check_integrity(issues_root, recursive)
|
|
164
|
+
|
|
165
|
+
if not errors:
|
|
166
|
+
console.print("[green]✔[/green] Issue integrity check passed. No integrity errors found.")
|
|
167
|
+
else:
|
|
168
|
+
table = Table(title="Issue Integrity Issues", show_header=False, border_style="red")
|
|
169
|
+
for err in errors:
|
|
170
|
+
table.add_row(err)
|
|
171
|
+
console.print(table)
|
|
172
|
+
raise typer.Exit(code=1)
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import List, Optional, Any
|
|
3
|
+
from pydantic import BaseModel, Field, model_validator
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
import hashlib
|
|
6
|
+
import secrets
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class IssueID:
|
|
10
|
+
"""
|
|
11
|
+
Helper for parsing Issue IDs that might be namespaced (e.g. 'toolkit::FEAT-0001').
|
|
12
|
+
"""
|
|
13
|
+
def __init__(self, raw: str):
|
|
14
|
+
self.raw = raw
|
|
15
|
+
if "::" in raw:
|
|
16
|
+
self.namespace, self.local_id = raw.split("::", 1)
|
|
17
|
+
else:
|
|
18
|
+
self.namespace = None
|
|
19
|
+
self.local_id = raw
|
|
20
|
+
|
|
21
|
+
def __str__(self):
|
|
22
|
+
if self.namespace:
|
|
23
|
+
return f"{self.namespace}::{self.local_id}"
|
|
24
|
+
return self.local_id
|
|
25
|
+
|
|
26
|
+
def __repr__(self):
|
|
27
|
+
return f"IssueID({self.raw})"
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def is_local(self) -> bool:
|
|
31
|
+
return self.namespace is None
|
|
32
|
+
|
|
33
|
+
def matches(self, other_id: str) -> bool:
|
|
34
|
+
"""Check if this ID matches another ID string."""
|
|
35
|
+
return str(self) == other_id or (self.is_local and self.local_id == other_id)
|
|
36
|
+
|
|
37
|
+
def current_time() -> datetime:
|
|
38
|
+
return datetime.now().replace(microsecond=0)
|
|
39
|
+
|
|
40
|
+
def generate_uid() -> str:
|
|
41
|
+
"""
|
|
42
|
+
Generate a globally unique 6-character short hash for issue identity.
|
|
43
|
+
Uses timestamp + random bytes to ensure uniqueness across projects.
|
|
44
|
+
"""
|
|
45
|
+
timestamp = str(datetime.now().timestamp()).encode()
|
|
46
|
+
random_bytes = secrets.token_bytes(8)
|
|
47
|
+
combined = timestamp + random_bytes
|
|
48
|
+
hash_digest = hashlib.sha256(combined).hexdigest()
|
|
49
|
+
return hash_digest[:6]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class IssueType(str, Enum):
|
|
53
|
+
EPIC = "epic"
|
|
54
|
+
FEATURE = "feature"
|
|
55
|
+
CHORE = "chore"
|
|
56
|
+
FIX = "fix"
|
|
57
|
+
|
|
58
|
+
class IssueStatus(str, Enum):
|
|
59
|
+
OPEN = "open"
|
|
60
|
+
CLOSED = "closed"
|
|
61
|
+
BACKLOG = "backlog"
|
|
62
|
+
|
|
63
|
+
class IssueStage(str, Enum):
|
|
64
|
+
TODO = "todo"
|
|
65
|
+
DOING = "doing"
|
|
66
|
+
REVIEW = "review"
|
|
67
|
+
DONE = "done"
|
|
68
|
+
FREEZED = "freezed"
|
|
69
|
+
|
|
70
|
+
class IssueSolution(str, Enum):
|
|
71
|
+
IMPLEMENTED = "implemented"
|
|
72
|
+
CANCELLED = "cancelled"
|
|
73
|
+
WONTFIX = "wontfix"
|
|
74
|
+
DUPLICATE = "duplicate"
|
|
75
|
+
|
|
76
|
+
class IsolationType(str, Enum):
|
|
77
|
+
BRANCH = "branch"
|
|
78
|
+
WORKTREE = "worktree"
|
|
79
|
+
|
|
80
|
+
class IssueIsolation(BaseModel):
|
|
81
|
+
type: IsolationType
|
|
82
|
+
ref: str # Git branch name
|
|
83
|
+
path: Optional[str] = None # Worktree path (relative to repo root or absolute)
|
|
84
|
+
created_at: datetime = Field(default_factory=current_time)
|
|
85
|
+
|
|
86
|
+
class IssueMetadata(BaseModel):
|
|
87
|
+
model_config = {"extra": "allow"}
|
|
88
|
+
|
|
89
|
+
id: str
|
|
90
|
+
uid: Optional[str] = None # Global unique identifier for cross-project identity
|
|
91
|
+
type: IssueType
|
|
92
|
+
status: IssueStatus = IssueStatus.OPEN
|
|
93
|
+
stage: Optional[IssueStage] = None
|
|
94
|
+
title: str
|
|
95
|
+
|
|
96
|
+
# Time Anchors
|
|
97
|
+
created_at: datetime = Field(default_factory=current_time)
|
|
98
|
+
opened_at: Optional[datetime] = None
|
|
99
|
+
updated_at: datetime = Field(default_factory=current_time)
|
|
100
|
+
closed_at: Optional[datetime] = None
|
|
101
|
+
|
|
102
|
+
parent: Optional[str] = None
|
|
103
|
+
sprint: Optional[str] = None
|
|
104
|
+
solution: Optional[IssueSolution] = None
|
|
105
|
+
isolation: Optional[IssueIsolation] = None
|
|
106
|
+
dependencies: List[str] = []
|
|
107
|
+
related: List[str] = []
|
|
108
|
+
dependencies: List[str] = []
|
|
109
|
+
related: List[str] = []
|
|
110
|
+
tags: List[str] = []
|
|
111
|
+
path: Optional[str] = None # Absolute path to the issue file
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@model_validator(mode='before')
|
|
115
|
+
@classmethod
|
|
116
|
+
def normalize_fields(cls, v: Any) -> Any:
|
|
117
|
+
if isinstance(v, dict):
|
|
118
|
+
# Normalize type and status to lowercase for compatibility
|
|
119
|
+
if "type" in v and isinstance(v["type"], str):
|
|
120
|
+
v["type"] = v["type"].lower()
|
|
121
|
+
if "status" in v and isinstance(v["status"], str):
|
|
122
|
+
v["status"] = v["status"].lower()
|
|
123
|
+
if "solution" in v and isinstance(v["solution"], str):
|
|
124
|
+
v["solution"] = v["solution"].lower()
|
|
125
|
+
# Stage normalization
|
|
126
|
+
if "stage" in v and isinstance(v["stage"], str):
|
|
127
|
+
v["stage"] = v["stage"].lower()
|
|
128
|
+
return v
|
|
129
|
+
|
|
130
|
+
@model_validator(mode='after')
|
|
131
|
+
def validate_lifecycle(self) -> 'IssueMetadata':
|
|
132
|
+
# Logic Definition:
|
|
133
|
+
# status: backlog -> stage: null
|
|
134
|
+
# status: closed -> stage: done
|
|
135
|
+
# status: open -> stage: todo | doing | review (default todo)
|
|
136
|
+
|
|
137
|
+
if self.status == IssueStatus.BACKLOG:
|
|
138
|
+
self.stage = IssueStage.FREEZED
|
|
139
|
+
|
|
140
|
+
elif self.status == IssueStatus.CLOSED:
|
|
141
|
+
# Enforce stage=done for closed issues
|
|
142
|
+
if self.stage != IssueStage.DONE:
|
|
143
|
+
self.stage = IssueStage.DONE
|
|
144
|
+
# Auto-fill closed_at if missing
|
|
145
|
+
if not self.closed_at:
|
|
146
|
+
self.closed_at = current_time()
|
|
147
|
+
|
|
148
|
+
elif self.status == IssueStatus.OPEN:
|
|
149
|
+
# Ensure valid stage for open status
|
|
150
|
+
if self.stage is None or self.stage == IssueStage.DONE:
|
|
151
|
+
self.stage = IssueStage.TODO
|
|
152
|
+
|
|
153
|
+
return self
|
|
154
|
+
|
|
155
|
+
class IssueDetail(IssueMetadata):
|
|
156
|
+
body: str = ""
|
|
157
|
+
raw_content: Optional[str] = None # Full file content including frontmatter for editing
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
|
|
2
|
+
import asyncio
|
|
3
|
+
import os
|
|
4
|
+
import pty
|
|
5
|
+
import select
|
|
6
|
+
import signal
|
|
7
|
+
import struct
|
|
8
|
+
import fcntl
|
|
9
|
+
import termios
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Dict, Optional, Tuple, Any
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger("monoco.pty")
|
|
14
|
+
|
|
15
|
+
class PTYSession:
|
|
16
|
+
"""
|
|
17
|
+
Manages a single PTY session connected to a subprocess (shell).
|
|
18
|
+
"""
|
|
19
|
+
def __init__(self, session_id: str, cmd: list[str], env: Optional[Dict[str, str]] = None, cwd: Optional[str] = None):
|
|
20
|
+
self.session_id = session_id
|
|
21
|
+
self.cmd = cmd
|
|
22
|
+
self.env = env or os.environ.copy()
|
|
23
|
+
self.cwd = cwd or os.getcwd()
|
|
24
|
+
|
|
25
|
+
self.fd: Optional[int] = None
|
|
26
|
+
self.pid: Optional[int] = None
|
|
27
|
+
self.proc = None # subprocess.Popen object
|
|
28
|
+
self.running = False
|
|
29
|
+
self.loop = asyncio.get_running_loop()
|
|
30
|
+
|
|
31
|
+
def start(self, cols: int = 80, rows: int = 24):
|
|
32
|
+
"""
|
|
33
|
+
Spawn a subprocess connected to a new PTY using subprocess.Popen.
|
|
34
|
+
This provides better safety in threaded/asyncio environments than pty.fork().
|
|
35
|
+
"""
|
|
36
|
+
import subprocess
|
|
37
|
+
|
|
38
|
+
# 1. Open PTY pair
|
|
39
|
+
master_fd, slave_fd = pty.openpty()
|
|
40
|
+
|
|
41
|
+
# 2. Set initial size
|
|
42
|
+
self._set_winsize(master_fd, rows, cols)
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
# 3. Spawn process
|
|
46
|
+
# start_new_session=True executes setsid()
|
|
47
|
+
self.proc = subprocess.Popen(
|
|
48
|
+
self.cmd,
|
|
49
|
+
stdin=slave_fd,
|
|
50
|
+
stdout=slave_fd,
|
|
51
|
+
stderr=slave_fd,
|
|
52
|
+
cwd=self.cwd,
|
|
53
|
+
env=self.env,
|
|
54
|
+
start_new_session=True,
|
|
55
|
+
close_fds=True # Important to close other FDs in child
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
self.pid = self.proc.pid
|
|
59
|
+
self.fd = master_fd
|
|
60
|
+
self.running = True
|
|
61
|
+
|
|
62
|
+
# 4. Close slave fd in parent (child has it open now)
|
|
63
|
+
os.close(slave_fd)
|
|
64
|
+
|
|
65
|
+
logger.info(f"Started session {self.session_id} (PID: {self.pid})")
|
|
66
|
+
|
|
67
|
+
except Exception as e:
|
|
68
|
+
logger.error(f"Failed to spawn process: {e}")
|
|
69
|
+
# Ensure we clean up fds if spawn fails
|
|
70
|
+
try:
|
|
71
|
+
os.close(master_fd)
|
|
72
|
+
except: pass
|
|
73
|
+
try:
|
|
74
|
+
os.close(slave_fd)
|
|
75
|
+
except: pass
|
|
76
|
+
raise e
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def resize(self, cols: int, rows: int):
|
|
80
|
+
"""
|
|
81
|
+
Resize the PTY.
|
|
82
|
+
"""
|
|
83
|
+
if self.fd and self.running:
|
|
84
|
+
self._set_winsize(self.fd, rows, cols)
|
|
85
|
+
|
|
86
|
+
def write(self, data: bytes):
|
|
87
|
+
"""
|
|
88
|
+
Write input data (from websocket) to the PTY master fd.
|
|
89
|
+
"""
|
|
90
|
+
if self.fd and self.running:
|
|
91
|
+
os.write(self.fd, data)
|
|
92
|
+
|
|
93
|
+
async def read(self) -> bytes:
|
|
94
|
+
"""
|
|
95
|
+
Read output data from PTY master fd (to forward to websocket).
|
|
96
|
+
"""
|
|
97
|
+
if not self.fd or not self.running:
|
|
98
|
+
return b""
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Run in executor to avoid blocking the event loop
|
|
102
|
+
# pty read is blocking
|
|
103
|
+
return await self.loop.run_in_executor(None, self._read_blocking)
|
|
104
|
+
except OSError:
|
|
105
|
+
return b""
|
|
106
|
+
|
|
107
|
+
def _read_blocking(self) -> bytes:
|
|
108
|
+
try:
|
|
109
|
+
return os.read(self.fd, 1024)
|
|
110
|
+
except OSError:
|
|
111
|
+
return b""
|
|
112
|
+
|
|
113
|
+
def terminate(self):
|
|
114
|
+
"""
|
|
115
|
+
Terminate the process and close the PTY.
|
|
116
|
+
"""
|
|
117
|
+
self.running = False
|
|
118
|
+
|
|
119
|
+
# Use Popen object if available
|
|
120
|
+
if self.proc:
|
|
121
|
+
try:
|
|
122
|
+
self.proc.terminate()
|
|
123
|
+
try:
|
|
124
|
+
self.proc.wait(timeout=1.0)
|
|
125
|
+
except:
|
|
126
|
+
# Force kill if not terminated
|
|
127
|
+
self.proc.kill()
|
|
128
|
+
self.proc.wait()
|
|
129
|
+
except Exception as e:
|
|
130
|
+
logger.error(f"Error terminating process: {e}")
|
|
131
|
+
self.proc = None
|
|
132
|
+
self.pid = None
|
|
133
|
+
elif self.pid:
|
|
134
|
+
# Fallback for legacy or if Popen obj lost
|
|
135
|
+
try:
|
|
136
|
+
os.kill(self.pid, signal.SIGTERM)
|
|
137
|
+
os.waitpid(self.pid, 0) # Reap zombie
|
|
138
|
+
except OSError:
|
|
139
|
+
pass
|
|
140
|
+
self.pid = None
|
|
141
|
+
|
|
142
|
+
if self.fd:
|
|
143
|
+
try:
|
|
144
|
+
os.close(self.fd)
|
|
145
|
+
except OSError:
|
|
146
|
+
pass
|
|
147
|
+
self.fd = None
|
|
148
|
+
logger.info(f"Terminated session {self.session_id}")
|
|
149
|
+
|
|
150
|
+
def _set_winsize(self, fd: int, row: int, col: int, xpix: int = 0, ypix: int = 0):
|
|
151
|
+
winsize = struct.pack("HHHH", row, col, xpix, ypix)
|
|
152
|
+
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class PTYManager:
|
|
156
|
+
"""
|
|
157
|
+
Singleton to manage multiple PTY sessions.
|
|
158
|
+
"""
|
|
159
|
+
def __init__(self):
|
|
160
|
+
self.sessions: Dict[str, PTYSession] = {}
|
|
161
|
+
|
|
162
|
+
def create_session(self, session_id: str, cwd: str, cmd: list[str] = ["/bin/zsh"], env: Dict = None) -> PTYSession:
|
|
163
|
+
if session_id in self.sessions:
|
|
164
|
+
# In a real app, we might want to attach to existing?
|
|
165
|
+
# For now, kill and recreate (or error)
|
|
166
|
+
self.close_session(session_id)
|
|
167
|
+
|
|
168
|
+
session = PTYSession(session_id, cmd, env, cwd)
|
|
169
|
+
self.sessions[session_id] = session
|
|
170
|
+
return session
|
|
171
|
+
|
|
172
|
+
def get_session(self, session_id: str) -> Optional[PTYSession]:
|
|
173
|
+
return self.sessions.get(session_id)
|
|
174
|
+
|
|
175
|
+
def close_session(self, session_id: str):
|
|
176
|
+
if session_id in self.sessions:
|
|
177
|
+
self.sessions[session_id].terminate()
|
|
178
|
+
del self.sessions[session_id]
|
|
179
|
+
|
|
180
|
+
def close_all_sessions(self):
|
|
181
|
+
"""
|
|
182
|
+
Terminate all active PTY sessions.
|
|
183
|
+
"""
|
|
184
|
+
for session_id in list(self.sessions.keys()):
|
|
185
|
+
self.close_session(session_id)
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
|
|
2
|
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Query
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from typing import Optional, Dict
|
|
5
|
+
import json
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from monoco.features.pty.core import PTYManager
|
|
11
|
+
from monoco.core.config import get_config
|
|
12
|
+
|
|
13
|
+
# We will use dependency injection or a global singleton for now
|
|
14
|
+
# Ideally attached to app state
|
|
15
|
+
pty_manager = PTYManager()
|
|
16
|
+
|
|
17
|
+
router = APIRouter(prefix="/api/v1/pty", tags=["pty"])
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("monoco.pty")
|
|
20
|
+
|
|
21
|
+
@router.websocket("/ws/{session_id}")
|
|
22
|
+
async def websocket_pty_endpoint(
|
|
23
|
+
websocket: WebSocket,
|
|
24
|
+
session_id: str,
|
|
25
|
+
cwd: Optional[str] = Query(None),
|
|
26
|
+
cols: int = Query(80),
|
|
27
|
+
rows: int = Query(24),
|
|
28
|
+
env: Optional[str] = Query(None) # JSON-encoded env vars
|
|
29
|
+
):
|
|
30
|
+
await websocket.accept()
|
|
31
|
+
|
|
32
|
+
# Determine working directory
|
|
33
|
+
# 1. Provide explicit CWD in query
|
|
34
|
+
# 2. Or fallback to ProjectRoot from env (if integrated)
|
|
35
|
+
# 3. Or fallback to process CWD
|
|
36
|
+
|
|
37
|
+
# Since monoco pty runs as a separate service, we expect CWD to be passed
|
|
38
|
+
# or we default to where monoco pty was started
|
|
39
|
+
working_dir = cwd if cwd else os.getcwd()
|
|
40
|
+
|
|
41
|
+
# Prepare environment
|
|
42
|
+
env_vars = os.environ.copy()
|
|
43
|
+
env_vars["TERM"] = "xterm-256color"
|
|
44
|
+
env_vars["COLORTERM"] = "truecolor"
|
|
45
|
+
if "SHELL" not in env_vars:
|
|
46
|
+
env_vars["SHELL"] = "/bin/zsh"
|
|
47
|
+
if "HOME" not in env_vars:
|
|
48
|
+
import pathlib
|
|
49
|
+
env_vars["HOME"] = str(pathlib.Path.home())
|
|
50
|
+
|
|
51
|
+
# Filter out Trae/Gemini specific variables to avoid shell integration conflicts
|
|
52
|
+
# This prevents the shell from trying to write to IDE-specific logs which causes EPERM
|
|
53
|
+
keys_to_remove = [k for k in env_vars.keys() if k.startswith("TRAE_") or k.startswith("GEMINI_") or k == "AI_AGENT"]
|
|
54
|
+
for k in keys_to_remove:
|
|
55
|
+
del env_vars[k]
|
|
56
|
+
|
|
57
|
+
if env:
|
|
58
|
+
try:
|
|
59
|
+
custom_env = json.loads(env)
|
|
60
|
+
env_vars.update(custom_env)
|
|
61
|
+
except:
|
|
62
|
+
logger.warning("Failed to parse custom env vars")
|
|
63
|
+
|
|
64
|
+
# Start Session
|
|
65
|
+
try:
|
|
66
|
+
session = pty_manager.create_session(
|
|
67
|
+
session_id=session_id,
|
|
68
|
+
cwd=working_dir,
|
|
69
|
+
cmd=["/bin/zsh", "-l"], # Use login shell to ensure full user environment
|
|
70
|
+
env=env_vars
|
|
71
|
+
)
|
|
72
|
+
session.start(cols, rows)
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.error(f"Failed to start session: {e}")
|
|
75
|
+
await websocket.close(code=1011)
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
# Pipe Loop
|
|
79
|
+
reader_task = None
|
|
80
|
+
try:
|
|
81
|
+
# Task to read from PTY and send to WebSocket
|
|
82
|
+
async def pty_reader():
|
|
83
|
+
while session.running:
|
|
84
|
+
data = await session.read()
|
|
85
|
+
if not data:
|
|
86
|
+
break
|
|
87
|
+
# xterm.js expects string or binary. We send string/bytes.
|
|
88
|
+
# Usually text is fine, but binary is safer for control codes.
|
|
89
|
+
await websocket.send_bytes(data)
|
|
90
|
+
|
|
91
|
+
# If PTY exits, close WS
|
|
92
|
+
await websocket.close()
|
|
93
|
+
|
|
94
|
+
reader_task = asyncio.create_task(pty_reader())
|
|
95
|
+
|
|
96
|
+
# Main loop: Read from WebSocket and write to PTY
|
|
97
|
+
try:
|
|
98
|
+
while True:
|
|
99
|
+
# Receive message from Client (xterm.js)
|
|
100
|
+
# Message can be simple input string, or a JSON command (resize)
|
|
101
|
+
message = await websocket.receive()
|
|
102
|
+
|
|
103
|
+
if message["type"] == "websocket.disconnect":
|
|
104
|
+
raise WebSocketDisconnect(code=message.get("code", 1000))
|
|
105
|
+
|
|
106
|
+
if "text" in message:
|
|
107
|
+
payload = message["text"]
|
|
108
|
+
|
|
109
|
+
# Check if it's a control message (Hack: usually client sends raw input)
|
|
110
|
+
# We can enforce a protocol: binary for Input, text JSON for Control.
|
|
111
|
+
try:
|
|
112
|
+
# Try parsing as JSON control message
|
|
113
|
+
cmd = json.loads(payload)
|
|
114
|
+
if cmd.get("type") == "resize":
|
|
115
|
+
session.resize(cmd["cols"], cmd["rows"])
|
|
116
|
+
continue
|
|
117
|
+
except:
|
|
118
|
+
pass # Not JSON, treat as raw input
|
|
119
|
+
|
|
120
|
+
session.write(payload.encode())
|
|
121
|
+
|
|
122
|
+
elif "bytes" in message:
|
|
123
|
+
session.write(message["bytes"])
|
|
124
|
+
except RuntimeError:
|
|
125
|
+
# Handle "Cannot call 'receive' once a disconnect message has been received"
|
|
126
|
+
# This happens if Starlette/FastAPI already processed the disconnect internally
|
|
127
|
+
# but we called receive() again.
|
|
128
|
+
logger.info(f"Runtime disconnect for session {session_id}")
|
|
129
|
+
|
|
130
|
+
except WebSocketDisconnect:
|
|
131
|
+
logger.info(f"Client disconnected for session {session_id}")
|
|
132
|
+
except Exception as e:
|
|
133
|
+
logger.error(f"WebSocket error: {e}")
|
|
134
|
+
finally:
|
|
135
|
+
# Cleanup
|
|
136
|
+
pty_manager.close_session(session_id)
|
|
137
|
+
if reader_task and not reader_task.done():
|
|
138
|
+
reader_task.cancel()
|