rorchestra 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rorchestra-0.1.0/PKG-INFO +92 -0
- rorchestra-0.1.0/README.md +66 -0
- rorchestra-0.1.0/app/__init__.py +1 -0
- rorchestra-0.1.0/app/adapters/__init__.py +0 -0
- rorchestra-0.1.0/app/adapters/gemini_cli.py +272 -0
- rorchestra-0.1.0/app/adapters/luau_lsp.py +94 -0
- rorchestra-0.1.0/app/adapters/roblox_mcp.py +163 -0
- rorchestra-0.1.0/app/adapters/rojo.py +113 -0
- rorchestra-0.1.0/app/config.py +161 -0
- rorchestra-0.1.0/app/main.py +494 -0
- rorchestra-0.1.0/app/models/__init__.py +0 -0
- rorchestra-0.1.0/app/models/entities.py +357 -0
- rorchestra-0.1.0/app/models/schemas.py +244 -0
- rorchestra-0.1.0/app/policies/__init__.py +0 -0
- rorchestra-0.1.0/app/policies/safety.py +58 -0
- rorchestra-0.1.0/app/rochester.py +1577 -0
- rorchestra-0.1.0/app/rochester_main.py +5 -0
- rorchestra-0.1.0/app/services/__init__.py +0 -0
- rorchestra-0.1.0/app/services/agents/__init__.py +1 -0
- rorchestra-0.1.0/app/services/agents/large_change.py +148 -0
- rorchestra-0.1.0/app/services/agents/orchestrator.py +1727 -0
- rorchestra-0.1.0/app/services/agents/tools.py +305 -0
- rorchestra-0.1.0/app/services/graph/__init__.py +0 -0
- rorchestra-0.1.0/app/services/graph/builder.py +95 -0
- rorchestra-0.1.0/app/services/ingest/__init__.py +0 -0
- rorchestra-0.1.0/app/services/ingest/pipeline.py +235 -0
- rorchestra-0.1.0/app/services/mcp/__init__.py +0 -0
- rorchestra-0.1.0/app/services/mcp/capability_router.py +81 -0
- rorchestra-0.1.0/app/services/mcp/trigger_policy.py +51 -0
- rorchestra-0.1.0/app/services/mcp/validator.py +99 -0
- rorchestra-0.1.0/app/services/memory/__init__.py +0 -0
- rorchestra-0.1.0/app/services/memory/hierarchy.py +226 -0
- rorchestra-0.1.0/app/services/memory/refresh.py +115 -0
- rorchestra-0.1.0/app/services/memory/skill_loader.py +219 -0
- rorchestra-0.1.0/app/services/memory/store.py +169 -0
- rorchestra-0.1.0/app/services/packets/__init__.py +0 -0
- rorchestra-0.1.0/app/services/packets/assembler.py +196 -0
- rorchestra-0.1.0/app/services/patch_apply.py +230 -0
- rorchestra-0.1.0/app/services/summarization/__init__.py +0 -0
- rorchestra-0.1.0/app/services/summarization/summarizer.py +175 -0
- rorchestra-0.1.0/app/services/token_tracker.py +72 -0
- rorchestra-0.1.0/app/services/validation/__init__.py +0 -0
- rorchestra-0.1.0/app/services/validation/static.py +86 -0
- rorchestra-0.1.0/app/services/workers/__init__.py +0 -0
- rorchestra-0.1.0/app/services/workers/lifecycle.py +596 -0
- rorchestra-0.1.0/app/storage/__init__.py +0 -0
- rorchestra-0.1.0/app/storage/artifacts.py +44 -0
- rorchestra-0.1.0/app/storage/database.py +87 -0
- rorchestra-0.1.0/app/telemetry/__init__.py +0 -0
- rorchestra-0.1.0/app/telemetry/metrics.py +101 -0
- rorchestra-0.1.0/pyproject.toml +44 -0
- rorchestra-0.1.0/rorchestra.egg-info/PKG-INFO +92 -0
- rorchestra-0.1.0/rorchestra.egg-info/SOURCES.txt +56 -0
- rorchestra-0.1.0/rorchestra.egg-info/dependency_links.txt +1 -0
- rorchestra-0.1.0/rorchestra.egg-info/entry_points.txt +3 -0
- rorchestra-0.1.0/rorchestra.egg-info/requires.txt +13 -0
- rorchestra-0.1.0/rorchestra.egg-info/top_level.txt +1 -0
- rorchestra-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rorchestra
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Roblox/Luau AI orchestration system for codebase understanding and scoped edit generation
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: roblox,luau,ai,orchestration,gemini
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Classifier: Topic :: Software Development :: Code Generators
|
|
12
|
+
Requires-Python: >=3.11
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
Requires-Dist: typer[all]>=0.9.0
|
|
15
|
+
Requires-Dist: pydantic>=2.0
|
|
16
|
+
Requires-Dist: pydantic-settings>=2.0
|
|
17
|
+
Requires-Dist: sqlalchemy>=2.0
|
|
18
|
+
Requires-Dist: alembic>=1.12
|
|
19
|
+
Requires-Dist: tiktoken>=0.5
|
|
20
|
+
Requires-Dist: aiofiles>=23.0
|
|
21
|
+
Requires-Dist: rich>=13.0
|
|
22
|
+
Requires-Dist: prompt_toolkit>=3.0
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
25
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
|
|
26
|
+
|
|
27
|
+
# Roblox/Luau AI Orchestration System
|
|
28
|
+
|
|
29
|
+
A Python orchestration layer that maintains hierarchical memory of a Rojo/Luau codebase, assembles compact context packets, and dispatches Gemini CLI workers for scoped code edits.
|
|
30
|
+
|
|
31
|
+
## Quick Start
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
cd orchestrator
|
|
35
|
+
pip install -e ".[dev]"
|
|
36
|
+
|
|
37
|
+
# Ingest a Rojo project
|
|
38
|
+
python -m app.main ingest C:\path\to\your\rojo\project
|
|
39
|
+
|
|
40
|
+
# Generate memory summaries
|
|
41
|
+
python -m app.main summarize --repo-id 1
|
|
42
|
+
|
|
43
|
+
# Request an edit
|
|
44
|
+
python -m app.main edit "Add error handling to the data save module" --repo-id 1 --scope DataManager --side server
|
|
45
|
+
|
|
46
|
+
# Validate a patch
|
|
47
|
+
python -m app.main validate --task-id 1
|
|
48
|
+
|
|
49
|
+
# Check live Studio state (uncertainty-triggered)
|
|
50
|
+
python -m app.main check ui_existence StarterGui.ScreenGui.MainHUD
|
|
51
|
+
|
|
52
|
+
# View system status
|
|
53
|
+
python -m app.main status
|
|
54
|
+
|
|
55
|
+
# Enter the interactive REPL (Rorchestra)
|
|
56
|
+
python -m app.rochester
|
|
57
|
+
# Inside the REPL, you can use:
|
|
58
|
+
# /edit "description" -> Propose an edit
|
|
59
|
+
# /apply <id> -> Apply a patch and rebuild memory cascade
|
|
60
|
+
# /mcp -> Check connected MCP servers and statuses
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Architecture
|
|
64
|
+
|
|
65
|
+
| Layer | Purpose |
|
|
66
|
+
|---|---|
|
|
67
|
+
| **Adapters** | Rojo, luau-lsp, Gemini CLI, MCP dispatcher |
|
|
68
|
+
| **Services** | Ingest, graph, memory, summarization, packets, workers, validation, MCP |
|
|
69
|
+
| **Policies** | Safety gates, MCP trigger policy |
|
|
70
|
+
| **Telemetry** | JSONL event log |
|
|
71
|
+
| **Storage** | SQLite (ORM), file-based artifact store |
|
|
72
|
+
|
|
73
|
+
## MCP Integration
|
|
74
|
+
|
|
75
|
+
Uses a **canonical capability dispatcher** that routes through:
|
|
76
|
+
- **Primary**: Official Roblox Studio MCP
|
|
77
|
+
- **Fallback**: Community robloxstudio-mcp (filtered toolset)
|
|
78
|
+
|
|
79
|
+
Raw MCP output is stored out-of-band in `artifacts/mcp_raw/` — never injected into planner context.
|
|
80
|
+
|
|
81
|
+
## Memory Model
|
|
82
|
+
|
|
83
|
+
Memory records are **invalidation-driven**, not time-based. A record is only stale when its source files change:
|
|
84
|
+
- Accepted patches trigger `invalidate_by_file()`
|
|
85
|
+
- Stale scopes are re-summarised on demand via `summarize`
|
|
86
|
+
|
|
87
|
+
## Requirements
|
|
88
|
+
|
|
89
|
+
- Python 3.11+
|
|
90
|
+
- Rojo (on PATH)
|
|
91
|
+
- luau-lsp (optional, for static validation)
|
|
92
|
+
- Gemini CLI (for worker invocations and summarisation)
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Roblox/Luau AI Orchestration System
|
|
2
|
+
|
|
3
|
+
A Python orchestration layer that maintains hierarchical memory of a Rojo/Luau codebase, assembles compact context packets, and dispatches Gemini CLI workers for scoped code edits.
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
cd orchestrator
|
|
9
|
+
pip install -e ".[dev]"
|
|
10
|
+
|
|
11
|
+
# Ingest a Rojo project
|
|
12
|
+
python -m app.main ingest C:\path\to\your\rojo\project
|
|
13
|
+
|
|
14
|
+
# Generate memory summaries
|
|
15
|
+
python -m app.main summarize --repo-id 1
|
|
16
|
+
|
|
17
|
+
# Request an edit
|
|
18
|
+
python -m app.main edit "Add error handling to the data save module" --repo-id 1 --scope DataManager --side server
|
|
19
|
+
|
|
20
|
+
# Validate a patch
|
|
21
|
+
python -m app.main validate --task-id 1
|
|
22
|
+
|
|
23
|
+
# Check live Studio state (uncertainty-triggered)
|
|
24
|
+
python -m app.main check ui_existence StarterGui.ScreenGui.MainHUD
|
|
25
|
+
|
|
26
|
+
# View system status
|
|
27
|
+
python -m app.main status
|
|
28
|
+
|
|
29
|
+
# Enter the interactive REPL (Rorchestra)
|
|
30
|
+
python -m app.rochester
|
|
31
|
+
# Inside the REPL, you can use:
|
|
32
|
+
# /edit "description" -> Propose an edit
|
|
33
|
+
# /apply <id> -> Apply a patch and rebuild memory cascade
|
|
34
|
+
# /mcp -> Check connected MCP servers and statuses
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Architecture
|
|
38
|
+
|
|
39
|
+
| Layer | Purpose |
|
|
40
|
+
|---|---|
|
|
41
|
+
| **Adapters** | Rojo, luau-lsp, Gemini CLI, MCP dispatcher |
|
|
42
|
+
| **Services** | Ingest, graph, memory, summarization, packets, workers, validation, MCP |
|
|
43
|
+
| **Policies** | Safety gates, MCP trigger policy |
|
|
44
|
+
| **Telemetry** | JSONL event log |
|
|
45
|
+
| **Storage** | SQLite (ORM), file-based artifact store |
|
|
46
|
+
|
|
47
|
+
## MCP Integration
|
|
48
|
+
|
|
49
|
+
Uses a **canonical capability dispatcher** that routes through:
|
|
50
|
+
- **Primary**: Official Roblox Studio MCP
|
|
51
|
+
- **Fallback**: Community robloxstudio-mcp (filtered toolset)
|
|
52
|
+
|
|
53
|
+
Raw MCP output is stored out-of-band in `artifacts/mcp_raw/` — never injected into planner context.
|
|
54
|
+
|
|
55
|
+
## Memory Model
|
|
56
|
+
|
|
57
|
+
Memory records are **invalidation-driven**, not time-based. A record is only stale when its source files change:
|
|
58
|
+
- Accepted patches trigger `invalidate_by_file()`
|
|
59
|
+
- Stale scopes are re-summarised on demand via `summarize`
|
|
60
|
+
|
|
61
|
+
## Requirements
|
|
62
|
+
|
|
63
|
+
- Python 3.11+
|
|
64
|
+
- Rojo (on PATH)
|
|
65
|
+
- luau-lsp (optional, for static validation)
|
|
66
|
+
- Gemini CLI (for worker invocations and summarisation)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Roblox/Luau AI Orchestration System
|
|
File without changes
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gemini CLI adapter — subprocess wrapper for standalone and subagent invocations.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import subprocess
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from app.config import settings
|
|
16
|
+
from app.models.schemas import WorkerResult
|
|
17
|
+
from app.storage.artifacts import save_artifact
|
|
18
|
+
|
|
19
|
+
# On Windows, .cmd wrappers (e.g. gemini.cmd) need shell=True
|
|
20
|
+
_SHELL = sys.platform == "win32"
|
|
21
|
+
|
|
22
|
+
# Lines of noise that Gemini CLI appends to stdout
|
|
23
|
+
_NOISE_PREFIXES = [
|
|
24
|
+
"MCP issues detected.",
|
|
25
|
+
"Run /mcp list for status.",
|
|
26
|
+
"Loaded cached credentials.",
|
|
27
|
+
"ClearcutLogger:",
|
|
28
|
+
"Error flushing log events:",
|
|
29
|
+
"[MESSAGE_BUS]",
|
|
30
|
+
"Flushing log events to Clearcut.",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
# Patterns that indicate Node.js stack trace lines (from node-pty, etc.)
|
|
34
|
+
_NOISE_PATTERNS = [
|
|
35
|
+
"at Module._compile",
|
|
36
|
+
"at Object..js",
|
|
37
|
+
"at Module.load",
|
|
38
|
+
"at Function._load",
|
|
39
|
+
"at TracingChannel.traceSync",
|
|
40
|
+
"at wrapModuleLoad",
|
|
41
|
+
"at Function.executeUserEntryPoint",
|
|
42
|
+
"at node:internal/",
|
|
43
|
+
"conpty_console_list_agent.js",
|
|
44
|
+
"Node.js v",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
import re
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _strip_cli_noise(text: str) -> str:
|
|
52
|
+
"""Strip Gemini CLI noise from stdout."""
|
|
53
|
+
lines = text.split("\n")
|
|
54
|
+
cleaned = []
|
|
55
|
+
for line in lines:
|
|
56
|
+
stripped = line.strip()
|
|
57
|
+
if any(stripped.startswith(p) for p in _NOISE_PREFIXES):
|
|
58
|
+
continue
|
|
59
|
+
if any(p in stripped for p in _NOISE_PATTERNS):
|
|
60
|
+
continue
|
|
61
|
+
cleaned.append(line)
|
|
62
|
+
return "\n".join(cleaned).strip()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _parse_json_output(raw: str) -> tuple[str, int, int]:
|
|
66
|
+
"""
|
|
67
|
+
Parse Gemini CLI ``--output-format json`` output.
|
|
68
|
+
|
|
69
|
+
Handles multiple output formats:
|
|
70
|
+
1. Single JSON object: {"session_id": ..., "response": "...", "stats": {...}}
|
|
71
|
+
2. NDJSON (one JSON object per line) with candidates/parts
|
|
72
|
+
3. Mixed output with JSON embedded in noise lines
|
|
73
|
+
|
|
74
|
+
Returns (response_text, input_tokens, output_tokens).
|
|
75
|
+
"""
|
|
76
|
+
text_parts: list[str] = []
|
|
77
|
+
input_tokens = 0
|
|
78
|
+
output_tokens = 0
|
|
79
|
+
|
|
80
|
+
# --- Strategy 1: Try parsing the entire output as a single JSON object ---
|
|
81
|
+
stripped = raw.strip()
|
|
82
|
+
try:
|
|
83
|
+
obj = json.loads(stripped)
|
|
84
|
+
if isinstance(obj, dict):
|
|
85
|
+
# New format: {"session_id": ..., "response": "text", "stats": {...}}
|
|
86
|
+
if "response" in obj and isinstance(obj["response"], str):
|
|
87
|
+
text_parts.append(obj["response"])
|
|
88
|
+
|
|
89
|
+
# Extract tokens from stats.models
|
|
90
|
+
stats = obj.get("stats", {})
|
|
91
|
+
for model_info in stats.get("models", {}).values():
|
|
92
|
+
tokens = model_info.get("tokens", {})
|
|
93
|
+
input_tokens += tokens.get("input", 0) or tokens.get("prompt", 0)
|
|
94
|
+
output_tokens += tokens.get("candidates", 0)
|
|
95
|
+
|
|
96
|
+
return "".join(text_parts), input_tokens, output_tokens
|
|
97
|
+
|
|
98
|
+
# Older format: top-level "result" key
|
|
99
|
+
if "result" in obj and isinstance(obj["result"], str):
|
|
100
|
+
text_parts.append(obj["result"])
|
|
101
|
+
return "".join(text_parts), input_tokens, output_tokens
|
|
102
|
+
except (json.JSONDecodeError, ValueError):
|
|
103
|
+
pass
|
|
104
|
+
|
|
105
|
+
# --- Strategy 2: NDJSON (one JSON object per line) ---
|
|
106
|
+
for line in raw.splitlines():
|
|
107
|
+
line = line.strip()
|
|
108
|
+
if not line:
|
|
109
|
+
continue
|
|
110
|
+
try:
|
|
111
|
+
obj = json.loads(line)
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
if not isinstance(obj, dict):
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
# Extract text from candidates/parts (Gemini API format)
|
|
119
|
+
for candidate in obj.get("candidates", []):
|
|
120
|
+
content = candidate.get("content", {})
|
|
121
|
+
for part in content.get("parts", []):
|
|
122
|
+
if "text" in part:
|
|
123
|
+
text_parts.append(part["text"])
|
|
124
|
+
|
|
125
|
+
# Check top-level text keys
|
|
126
|
+
if "response" in obj and isinstance(obj["response"], str):
|
|
127
|
+
text_parts.append(obj["response"])
|
|
128
|
+
elif "result" in obj and isinstance(obj["result"], str):
|
|
129
|
+
text_parts.append(obj["result"])
|
|
130
|
+
|
|
131
|
+
# Extract token counts
|
|
132
|
+
usage = obj.get("usageMetadata", {})
|
|
133
|
+
if usage:
|
|
134
|
+
input_tokens += usage.get("promptTokenCount", 0)
|
|
135
|
+
output_tokens += usage.get("candidatesTokenCount", 0)
|
|
136
|
+
|
|
137
|
+
model_usage = obj.get("modelUsage", {})
|
|
138
|
+
if model_usage:
|
|
139
|
+
input_tokens += model_usage.get("inputTokens", 0)
|
|
140
|
+
output_tokens += model_usage.get("outputTokens", 0)
|
|
141
|
+
|
|
142
|
+
# Stats block
|
|
143
|
+
stats = obj.get("stats", {})
|
|
144
|
+
for model_info in stats.get("models", {}).values():
|
|
145
|
+
tokens = model_info.get("tokens", {})
|
|
146
|
+
input_tokens += tokens.get("input", 0) or tokens.get("prompt", 0)
|
|
147
|
+
output_tokens += tokens.get("candidates", 0)
|
|
148
|
+
|
|
149
|
+
response_text = "".join(text_parts) if text_parts else raw
|
|
150
|
+
return response_text, input_tokens, output_tokens
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def invoke_standalone(
|
|
154
|
+
prompt: str,
|
|
155
|
+
*,
|
|
156
|
+
allowed_tools: list[str] | None = None,
|
|
157
|
+
timeout: int | None = None,
|
|
158
|
+
cwd: str | None = None,
|
|
159
|
+
debug: bool = False,
|
|
160
|
+
no_mcp: bool = False,
|
|
161
|
+
) -> WorkerResult:
|
|
162
|
+
"""
|
|
163
|
+
Launch a fresh Gemini CLI process with a one-shot prompt.
|
|
164
|
+
|
|
165
|
+
The prompt is passed via stdin so it can be arbitrarily long.
|
|
166
|
+
Uses ``-p`` for proper headless mode and ``--output-format json``
|
|
167
|
+
for structured token usage.
|
|
168
|
+
|
|
169
|
+
no_mcp: If True, blocks MCP server initialization via
|
|
170
|
+
--allowed-mcp-server-names (faster startup).
|
|
171
|
+
"""
|
|
172
|
+
timeout = timeout or settings.worker_timeout_secs
|
|
173
|
+
cmd = [
|
|
174
|
+
settings.gemini_cli_bin,
|
|
175
|
+
"--output-format", "json",
|
|
176
|
+
]
|
|
177
|
+
|
|
178
|
+
if allowed_tools:
|
|
179
|
+
# Auto-approve all tool usage in headless worker mode
|
|
180
|
+
cmd += ["--yolo"]
|
|
181
|
+
|
|
182
|
+
# Block MCP if requested or if using allowed_tools (workers don't need MCP)
|
|
183
|
+
if no_mcp or allowed_tools:
|
|
184
|
+
cmd += ["--allowed-mcp-server-names", "_no_mcp_"]
|
|
185
|
+
|
|
186
|
+
if debug:
|
|
187
|
+
cmd += ["--debug"]
|
|
188
|
+
|
|
189
|
+
t0 = time.monotonic()
|
|
190
|
+
try:
|
|
191
|
+
result = subprocess.run(
|
|
192
|
+
cmd,
|
|
193
|
+
input=prompt,
|
|
194
|
+
capture_output=True,
|
|
195
|
+
text=True,
|
|
196
|
+
timeout=timeout,
|
|
197
|
+
cwd=cwd,
|
|
198
|
+
shell=_SHELL,
|
|
199
|
+
encoding="utf-8",
|
|
200
|
+
env={**os.environ, "GEMINI_CLI_HEADLESS": "1"},
|
|
201
|
+
)
|
|
202
|
+
elapsed = time.monotonic() - t0
|
|
203
|
+
|
|
204
|
+
# Parse JSON output for text + token counts
|
|
205
|
+
response_text, in_tok, out_tok = _parse_json_output(result.stdout)
|
|
206
|
+
|
|
207
|
+
# Store transcript
|
|
208
|
+
transcript_ref = save_artifact(
|
|
209
|
+
"transcripts",
|
|
210
|
+
"gemini_standalone",
|
|
211
|
+
{
|
|
212
|
+
"prompt_preview": prompt[:500],
|
|
213
|
+
"stdout": result.stdout[-2000:],
|
|
214
|
+
"stderr": result.stderr[-500:],
|
|
215
|
+
"exit_code": result.returncode,
|
|
216
|
+
"input_tokens": in_tok,
|
|
217
|
+
"output_tokens": out_tok,
|
|
218
|
+
},
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Strip CLI noise from the parsed response
|
|
222
|
+
clean_stdout = _strip_cli_noise(response_text)
|
|
223
|
+
|
|
224
|
+
return WorkerResult(
|
|
225
|
+
worker_type="gemini_standalone",
|
|
226
|
+
exit_code=result.returncode,
|
|
227
|
+
stdout=clean_stdout,
|
|
228
|
+
stderr=result.stderr,
|
|
229
|
+
transcript_ref=str(transcript_ref),
|
|
230
|
+
elapsed_secs=elapsed,
|
|
231
|
+
input_tokens=in_tok,
|
|
232
|
+
output_tokens=out_tok,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
except subprocess.TimeoutExpired:
|
|
236
|
+
elapsed = time.monotonic() - t0
|
|
237
|
+
return WorkerResult(
|
|
238
|
+
worker_type="gemini_standalone",
|
|
239
|
+
exit_code=-1,
|
|
240
|
+
stderr=f"Timed out after {timeout}s",
|
|
241
|
+
elapsed_secs=elapsed,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def invoke_subagent(
|
|
246
|
+
agent_name: str,
|
|
247
|
+
context: str,
|
|
248
|
+
*,
|
|
249
|
+
agents_dir: Path | None = None,
|
|
250
|
+
timeout: int | None = None,
|
|
251
|
+
cwd: str | None = None,
|
|
252
|
+
no_mcp: bool = True,
|
|
253
|
+
) -> WorkerResult:
|
|
254
|
+
"""
|
|
255
|
+
Invoke a Gemini CLI subagent by name.
|
|
256
|
+
|
|
257
|
+
The agent Markdown definition is expected at
|
|
258
|
+
``<agents_dir>/<agent_name>.md``.
|
|
259
|
+
|
|
260
|
+
no_mcp: Blocks MCP server init by default for faster startup.
|
|
261
|
+
"""
|
|
262
|
+
timeout = timeout or settings.worker_timeout_secs
|
|
263
|
+
|
|
264
|
+
# Build the prompt that tells Gemini to use the subagent
|
|
265
|
+
prompt = f"@{agent_name} {context}"
|
|
266
|
+
|
|
267
|
+
return invoke_standalone(
|
|
268
|
+
prompt,
|
|
269
|
+
timeout=timeout,
|
|
270
|
+
cwd=cwd,
|
|
271
|
+
no_mcp=no_mcp,
|
|
272
|
+
)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""
|
|
2
|
+
luau-lsp adapter — run standalone analysis for symbols, diagnostics,
|
|
3
|
+
and references using ``luau-lsp analyze``.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import subprocess
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from app.config import settings
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run_analyze(
|
|
17
|
+
repo_root: Path,
|
|
18
|
+
sourcemap_path: Path | None = None,
|
|
19
|
+
*,
|
|
20
|
+
target_files: list[Path] | None = None,
|
|
21
|
+
) -> dict[str, Any]:
|
|
22
|
+
"""
|
|
23
|
+
Run ``luau-lsp analyze`` and parse its JSON diagnostic output.
|
|
24
|
+
|
|
25
|
+
Returns::
|
|
26
|
+
|
|
27
|
+
{
|
|
28
|
+
"diagnostics": [ { "file": ..., "severity": ..., "message": ..., ... } ],
|
|
29
|
+
"raw_stderr": "..."
|
|
30
|
+
}
|
|
31
|
+
"""
|
|
32
|
+
cmd = [settings.luau_lsp_bin, "analyze"]
|
|
33
|
+
|
|
34
|
+
if sourcemap_path and sourcemap_path.exists():
|
|
35
|
+
cmd += ["--sourcemap", str(sourcemap_path)]
|
|
36
|
+
|
|
37
|
+
if target_files:
|
|
38
|
+
cmd += [str(f) for f in target_files]
|
|
39
|
+
else:
|
|
40
|
+
cmd.append(str(repo_root))
|
|
41
|
+
|
|
42
|
+
import re
|
|
43
|
+
result = subprocess.run(cmd, cwd=str(repo_root), capture_output=True, text=True)
|
|
44
|
+
|
|
45
|
+
diagnostics: list[dict[str, Any]] = []
|
|
46
|
+
|
|
47
|
+
# Strategy 1: Parse stdout as JSON lines (if --formatter=json is used or emits JSON)
|
|
48
|
+
for line in result.stdout.splitlines():
|
|
49
|
+
line = line.strip()
|
|
50
|
+
if not line:
|
|
51
|
+
continue
|
|
52
|
+
try:
|
|
53
|
+
diagnostics.append(json.loads(line))
|
|
54
|
+
except json.JSONDecodeError:
|
|
55
|
+
pass
|
|
56
|
+
|
|
57
|
+
# Strategy 2: Parse stderr text format: file(line,col-col): severity: message
|
|
58
|
+
_DIAG_PATTERN = re.compile(
|
|
59
|
+
r"^(.+?)\((\d+),(\d+)(?:-\d+)?\):\s*(Error|Warning|Information)\s*[:\-]\s*(.+)$",
|
|
60
|
+
re.IGNORECASE,
|
|
61
|
+
)
|
|
62
|
+
for line in result.stderr.splitlines():
|
|
63
|
+
line = line.strip()
|
|
64
|
+
m = _DIAG_PATTERN.match(line)
|
|
65
|
+
if m:
|
|
66
|
+
diagnostics.append({
|
|
67
|
+
"file": m.group(1).strip(),
|
|
68
|
+
"line": int(m.group(2)),
|
|
69
|
+
"col": int(m.group(3)),
|
|
70
|
+
"severity": m.group(4).capitalize(),
|
|
71
|
+
"message": m.group(5).strip(),
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
return {
|
|
75
|
+
"diagnostics": diagnostics,
|
|
76
|
+
"raw_stderr": result.stderr,
|
|
77
|
+
"exit_code": result.returncode,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def check_patch(
|
|
82
|
+
repo_root: Path,
|
|
83
|
+
patched_file: Path,
|
|
84
|
+
sourcemap_path: Path | None = None,
|
|
85
|
+
) -> list[dict[str, Any]]:
|
|
86
|
+
"""
|
|
87
|
+
Run analysis on a single patched file and return only *new* diagnostics.
|
|
88
|
+
"""
|
|
89
|
+
output = run_analyze(
|
|
90
|
+
repo_root,
|
|
91
|
+
sourcemap_path,
|
|
92
|
+
target_files=[patched_file],
|
|
93
|
+
)
|
|
94
|
+
return output["diagnostics"]
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Canonical MCP capability dispatcher.
|
|
3
|
+
|
|
4
|
+
Implements the routing table from the design doc:
|
|
5
|
+
primary → Roblox_Studio tools
|
|
6
|
+
fallback → robloxstudio-mcp tools
|
|
7
|
+
|
|
8
|
+
Raw MCP output is stored in the artifact store.
|
|
9
|
+
Only compact ValidationArtifact-shaped dicts are returned.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
from typing import Any, Callable
|
|
16
|
+
|
|
17
|
+
from app.storage.artifacts import save_artifact
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# ── Capability Routing Table ──────────────────────────────────────────────
|
|
21
|
+
|
|
22
|
+
# Each entry: canonical_name → (primary_tool, fallback_tool)
|
|
23
|
+
# Tool names match the MCP tool names from the design doc.
|
|
24
|
+
CAPABILITY_MAP: dict[str, tuple[str, str | None]] = {
|
|
25
|
+
"list_studios": ("Roblox_Studio.list_roblox_studios", None),
|
|
26
|
+
"set_active_studio": ("Roblox_Studio.set_active_studio", None),
|
|
27
|
+
"search_tree": (
|
|
28
|
+
"Roblox_Studio.search_game_tree",
|
|
29
|
+
"robloxstudio-mcp.get_project_structure",
|
|
30
|
+
),
|
|
31
|
+
"inspect_instance": (
|
|
32
|
+
"Roblox_Studio.inspect_instance",
|
|
33
|
+
"robloxstudio-mcp.get_instance_properties",
|
|
34
|
+
),
|
|
35
|
+
"read_script": (
|
|
36
|
+
"Roblox_Studio.script_read",
|
|
37
|
+
"robloxstudio-mcp.get_script_source",
|
|
38
|
+
),
|
|
39
|
+
"search_scripts": (
|
|
40
|
+
"Roblox_Studio.script_grep",
|
|
41
|
+
"robloxstudio-mcp.grep_scripts",
|
|
42
|
+
),
|
|
43
|
+
"playtest_control": (
|
|
44
|
+
"Roblox_Studio.start_stop_play",
|
|
45
|
+
"robloxstudio-mcp.start_playtest",
|
|
46
|
+
),
|
|
47
|
+
"playtest_logs": (
|
|
48
|
+
"Roblox_Studio.get_console_output",
|
|
49
|
+
"robloxstudio-mcp.get_playtest_output",
|
|
50
|
+
),
|
|
51
|
+
"edit_script_bounded": (
|
|
52
|
+
"Roblox_Studio.multi_edit",
|
|
53
|
+
"robloxstudio-mcp.edit_script_lines",
|
|
54
|
+
),
|
|
55
|
+
"execute_validation_luau": (
|
|
56
|
+
"Roblox_Studio.execute_luau",
|
|
57
|
+
None, # robloxstudio-mcp.execute_luau only if explicitly allowed
|
|
58
|
+
),
|
|
59
|
+
"get_class_metadata": (
|
|
60
|
+
"robloxstudio-mcp.get_class_info",
|
|
61
|
+
None, # could fall back to docs MCP
|
|
62
|
+
),
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# ── Dispatcher ────────────────────────────────────────────────────────────
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class MCPDispatcher:
|
|
70
|
+
"""
|
|
71
|
+
Routes canonical capability calls to the correct MCP tool,
|
|
72
|
+
with automatic fallback and raw-output isolation.
|
|
73
|
+
|
|
74
|
+
In the MVP, the actual MCP calls are delegated to callback functions
|
|
75
|
+
that the orchestrator registers. This keeps the dispatcher decoupled
|
|
76
|
+
from the transport (subprocess, HTTP, direct MCP client, etc.).
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self) -> None:
|
|
80
|
+
# tool_name → callable that actually executes the MCP tool
|
|
81
|
+
self._executors: dict[str, Callable[..., Any]] = {}
|
|
82
|
+
|
|
83
|
+
def register_executor(self, tool_name: str, fn: Callable[..., Any]) -> None:
|
|
84
|
+
"""Register a function that can execute the given raw MCP tool."""
|
|
85
|
+
self._executors[tool_name] = fn
|
|
86
|
+
|
|
87
|
+
def call(
|
|
88
|
+
self,
|
|
89
|
+
capability: str,
|
|
90
|
+
params: dict[str, Any] | None = None,
|
|
91
|
+
*,
|
|
92
|
+
allow_fallback: bool = True,
|
|
93
|
+
) -> dict[str, Any]:
|
|
94
|
+
"""
|
|
95
|
+
Invoke a canonical capability.
|
|
96
|
+
|
|
97
|
+
1. Try the primary tool.
|
|
98
|
+
2. If it fails and a fallback exists, try the fallback.
|
|
99
|
+
3. Store raw output as an artifact.
|
|
100
|
+
4. Return a compact result dict.
|
|
101
|
+
"""
|
|
102
|
+
if capability not in CAPABILITY_MAP:
|
|
103
|
+
return {
|
|
104
|
+
"status": "error",
|
|
105
|
+
"error": f"Unknown capability: {capability}",
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
primary, fallback = CAPABILITY_MAP[capability]
|
|
109
|
+
params = params or {}
|
|
110
|
+
|
|
111
|
+
# Try primary
|
|
112
|
+
result = self._try_tool(primary, params)
|
|
113
|
+
if result is not None:
|
|
114
|
+
self._store_raw(capability, primary, result)
|
|
115
|
+
return self._compact(capability, result)
|
|
116
|
+
|
|
117
|
+
# Try fallback
|
|
118
|
+
if allow_fallback and fallback:
|
|
119
|
+
result = self._try_tool(fallback, params)
|
|
120
|
+
if result is not None:
|
|
121
|
+
self._store_raw(capability, fallback, result)
|
|
122
|
+
return self._compact(capability, result)
|
|
123
|
+
|
|
124
|
+
return {
|
|
125
|
+
"status": "fail",
|
|
126
|
+
"error": f"No executor available for {capability}",
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def _try_tool(self, tool_name: str, params: dict[str, Any]) -> Any | None:
|
|
130
|
+
fn = self._executors.get(tool_name)
|
|
131
|
+
if fn is None:
|
|
132
|
+
return None
|
|
133
|
+
try:
|
|
134
|
+
return fn(**params)
|
|
135
|
+
except Exception as exc:
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
def _store_raw(self, capability: str, tool_name: str, raw: Any) -> str:
|
|
139
|
+
"""Persist raw MCP output out of band."""
|
|
140
|
+
ref = save_artifact(
|
|
141
|
+
"mcp_raw",
|
|
142
|
+
f"{capability}__{tool_name.replace('.', '_')}",
|
|
143
|
+
json.dumps(raw, default=str) if not isinstance(raw, str) else raw,
|
|
144
|
+
)
|
|
145
|
+
return str(ref)
|
|
146
|
+
|
|
147
|
+
@staticmethod
|
|
148
|
+
def _compact(capability: str, raw: Any) -> dict[str, Any]:
|
|
149
|
+
"""
|
|
150
|
+
Reduce raw MCP output to a compact result.
|
|
151
|
+
|
|
152
|
+
The exact transformation is capability-specific;
|
|
153
|
+
this default just wraps the raw data.
|
|
154
|
+
"""
|
|
155
|
+
return {
|
|
156
|
+
"status": "pass",
|
|
157
|
+
"capability": capability,
|
|
158
|
+
"data": raw,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# Singleton
|
|
163
|
+
dispatcher = MCPDispatcher()
|