opencode-bridge 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opencode_bridge/server.py +950 -39
- {opencode_bridge-0.1.4.dist-info → opencode_bridge-0.2.0.dist-info}/METADATA +1 -1
- opencode_bridge-0.2.0.dist-info/RECORD +7 -0
- opencode_bridge-0.1.4.dist-info/RECORD +0 -7
- {opencode_bridge-0.1.4.dist-info → opencode_bridge-0.2.0.dist-info}/WHEEL +0 -0
- {opencode_bridge-0.1.4.dist-info → opencode_bridge-0.2.0.dist-info}/entry_points.txt +0 -0
opencode_bridge/server.py
CHANGED
|
@@ -30,6 +30,798 @@ from mcp.server.stdio import stdio_server
|
|
|
30
30
|
from mcp.types import Tool, TextContent, ServerCapabilities, ToolsCapability
|
|
31
31
|
|
|
32
32
|
|
|
33
|
+
# File size thresholds
|
|
34
|
+
SMALL_FILE = 500 # lines
|
|
35
|
+
MEDIUM_FILE = 1500 # lines
|
|
36
|
+
LARGE_FILE = 5000 # lines
|
|
37
|
+
|
|
38
|
+
# Language detection by extension
|
|
39
|
+
LANG_MAP = {
|
|
40
|
+
".py": "Python", ".js": "JavaScript", ".ts": "TypeScript", ".tsx": "TypeScript/React",
|
|
41
|
+
".jsx": "JavaScript/React", ".go": "Go", ".rs": "Rust", ".java": "Java",
|
|
42
|
+
".c": "C", ".cpp": "C++", ".h": "C/C++ Header", ".hpp": "C++ Header",
|
|
43
|
+
".cs": "C#", ".rb": "Ruby", ".php": "PHP", ".swift": "Swift",
|
|
44
|
+
".kt": "Kotlin", ".scala": "Scala", ".sh": "Shell", ".bash": "Bash",
|
|
45
|
+
".sql": "SQL", ".html": "HTML", ".css": "CSS", ".scss": "SCSS",
|
|
46
|
+
".yaml": "YAML", ".yml": "YAML", ".json": "JSON", ".toml": "TOML",
|
|
47
|
+
".xml": "XML", ".md": "Markdown", ".r": "R", ".lua": "Lua",
|
|
48
|
+
".zig": "Zig", ".nim": "Nim", ".ex": "Elixir", ".erl": "Erlang",
|
|
49
|
+
".clj": "Clojure", ".hs": "Haskell", ".ml": "OCaml", ".vue": "Vue",
|
|
50
|
+
".svelte": "Svelte", ".dart": "Dart", ".proto": "Protocol Buffers",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
_file_info_cache: dict[str, dict] = {}
|
|
55
|
+
|
|
56
|
+
MAX_READ_SIZE = 10 * 1024 * 1024 # 10MB - above this, estimate lines from size
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def get_file_info(filepath: str) -> dict:
|
|
60
|
+
"""Get metadata about a file: size, lines, language, etc. Results are cached per path."""
|
|
61
|
+
filepath = str(Path(filepath).resolve())
|
|
62
|
+
if filepath in _file_info_cache:
|
|
63
|
+
return _file_info_cache[filepath]
|
|
64
|
+
|
|
65
|
+
p = Path(filepath)
|
|
66
|
+
if not p.is_file():
|
|
67
|
+
return {}
|
|
68
|
+
try:
|
|
69
|
+
stat = p.stat()
|
|
70
|
+
ext = p.suffix.lower()
|
|
71
|
+
|
|
72
|
+
# Count lines efficiently: stream for large files, estimate for huge ones
|
|
73
|
+
if stat.st_size > MAX_READ_SIZE:
|
|
74
|
+
# Estimate: ~40 bytes per line for code files
|
|
75
|
+
line_count = stat.st_size // 40
|
|
76
|
+
else:
|
|
77
|
+
# Stream line counting without loading full content into memory
|
|
78
|
+
line_count = 0
|
|
79
|
+
with open(p, "r", errors="replace") as f:
|
|
80
|
+
for _ in f:
|
|
81
|
+
line_count += 1
|
|
82
|
+
|
|
83
|
+
result = {
|
|
84
|
+
"path": filepath,
|
|
85
|
+
"name": p.name,
|
|
86
|
+
"size_bytes": stat.st_size,
|
|
87
|
+
"size_human": _human_size(stat.st_size),
|
|
88
|
+
"lines": line_count,
|
|
89
|
+
"language": LANG_MAP.get(ext, ext.lstrip(".").upper() if ext else "Unknown"),
|
|
90
|
+
"ext": ext,
|
|
91
|
+
"category": (
|
|
92
|
+
"small" if line_count <= SMALL_FILE
|
|
93
|
+
else "medium" if line_count <= MEDIUM_FILE
|
|
94
|
+
else "large" if line_count <= LARGE_FILE
|
|
95
|
+
else "very large"
|
|
96
|
+
),
|
|
97
|
+
}
|
|
98
|
+
_file_info_cache[filepath] = result
|
|
99
|
+
return result
|
|
100
|
+
except Exception:
|
|
101
|
+
return {"path": filepath, "name": p.name}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _human_size(size_bytes: int) -> str:
|
|
105
|
+
"""Convert bytes to human-readable size."""
|
|
106
|
+
for unit in ("B", "KB", "MB", "GB"):
|
|
107
|
+
if size_bytes < 1024:
|
|
108
|
+
return f"{size_bytes:.0f}{unit}" if unit == "B" else f"{size_bytes:.1f}{unit}"
|
|
109
|
+
size_bytes /= 1024
|
|
110
|
+
return f"{size_bytes:.1f}TB"
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def build_file_context(file_paths: list[str]) -> str:
|
|
114
|
+
"""Build a context block describing attached files."""
|
|
115
|
+
if not file_paths:
|
|
116
|
+
return ""
|
|
117
|
+
infos = [info for f in file_paths if (info := get_file_info(f))]
|
|
118
|
+
if not infos:
|
|
119
|
+
return ""
|
|
120
|
+
|
|
121
|
+
parts = ["## Attached Files\n"]
|
|
122
|
+
for info in infos:
|
|
123
|
+
line = f"- **{info.get('name', '?')}**"
|
|
124
|
+
details = []
|
|
125
|
+
if "language" in info:
|
|
126
|
+
details.append(info["language"])
|
|
127
|
+
if "lines" in info:
|
|
128
|
+
details.append(f"{info['lines']} lines")
|
|
129
|
+
if "size_human" in info:
|
|
130
|
+
details.append(info["size_human"])
|
|
131
|
+
if "category" in info:
|
|
132
|
+
details.append(info["category"])
|
|
133
|
+
if details:
|
|
134
|
+
line += f" ({', '.join(details)})"
|
|
135
|
+
parts.append(line)
|
|
136
|
+
|
|
137
|
+
total_lines = sum(i.get("lines", 0) for i in infos)
|
|
138
|
+
if total_lines > LARGE_FILE:
|
|
139
|
+
parts.append(f"\n> Total: {total_lines} lines across {len(infos)} file(s) — this is a large review.")
|
|
140
|
+
parts.append("> Focus on the most critical issues first. Use a structured, section-by-section approach.")
|
|
141
|
+
|
|
142
|
+
return "\n".join(parts)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def build_review_prompt(file_infos: list[dict], focus: str) -> str:
|
|
146
|
+
"""Build an adaptive review prompt based on file size and type."""
|
|
147
|
+
total_lines = sum(i.get("lines", 0) for i in file_infos)
|
|
148
|
+
|
|
149
|
+
# Base review instructions
|
|
150
|
+
prompt_parts = [f"Please review the attached code, focusing on: **{focus}**\n"]
|
|
151
|
+
|
|
152
|
+
# Add file context
|
|
153
|
+
if file_infos:
|
|
154
|
+
prompt_parts.append("### Files to review:")
|
|
155
|
+
for info in file_infos:
|
|
156
|
+
prompt_parts.append(f"- {info.get('name', '?')} ({info.get('language', '?')}, {info.get('lines', '?')} lines)")
|
|
157
|
+
prompt_parts.append("")
|
|
158
|
+
|
|
159
|
+
# Adapt strategy to file size
|
|
160
|
+
if total_lines > LARGE_FILE:
|
|
161
|
+
prompt_parts.append("""### Review Strategy (Large File)
|
|
162
|
+
This is a large codebase review. Use this structured approach:
|
|
163
|
+
|
|
164
|
+
1. **Architecture Overview**: Describe the overall structure, main components, and data flow
|
|
165
|
+
2. **Critical Issues**: Security vulnerabilities, bugs, race conditions, memory leaks
|
|
166
|
+
3. **Design Concerns**: Architectural problems, tight coupling, missing abstractions
|
|
167
|
+
4. **Code Quality**: Naming, duplication, complexity hotspots (focus on the worst areas)
|
|
168
|
+
5. **Key Recommendations**: Top 5 most impactful improvements, prioritized
|
|
169
|
+
|
|
170
|
+
Do NOT try to comment on every line. Focus on patterns and the most impactful findings.""")
|
|
171
|
+
elif total_lines > MEDIUM_FILE:
|
|
172
|
+
prompt_parts.append("""### Review Strategy (Medium File)
|
|
173
|
+
Provide a structured review:
|
|
174
|
+
|
|
175
|
+
1. **Summary**: What does this code do? Overall assessment
|
|
176
|
+
2. **Issues Found**: Bugs, security concerns, edge cases, error handling gaps
|
|
177
|
+
3. **Design Feedback**: Structure, patterns, abstractions
|
|
178
|
+
4. **Specific Suggestions**: Concrete improvements with code examples where helpful""")
|
|
179
|
+
else:
|
|
180
|
+
prompt_parts.append("""### Review Guidelines
|
|
181
|
+
Provide a thorough review covering:
|
|
182
|
+
- Correctness and edge cases
|
|
183
|
+
- Error handling
|
|
184
|
+
- Code clarity and naming
|
|
185
|
+
- Any security concerns
|
|
186
|
+
- Concrete suggestions for improvement""")
|
|
187
|
+
|
|
188
|
+
return "\n".join(prompt_parts)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def build_message_prompt(message: str, file_paths: list[str]) -> str:
|
|
192
|
+
"""Build a smart prompt that includes file context and instructions."""
|
|
193
|
+
parts = []
|
|
194
|
+
|
|
195
|
+
# Add file context if files are attached
|
|
196
|
+
user_files = [f for f in file_paths if not Path(f).name.startswith("opencode_msg_")]
|
|
197
|
+
if user_files:
|
|
198
|
+
file_context = build_file_context(user_files)
|
|
199
|
+
if file_context:
|
|
200
|
+
parts.append(file_context)
|
|
201
|
+
parts.append("")
|
|
202
|
+
|
|
203
|
+
total_lines = sum(get_file_info(f).get("lines", 0) for f in user_files)
|
|
204
|
+
if total_lines > LARGE_FILE:
|
|
205
|
+
parts.append("**Note:** Large file(s) attached. Read through the full content carefully before responding. "
|
|
206
|
+
"If asked to analyze or review, use a structured section-by-section approach.")
|
|
207
|
+
parts.append("")
|
|
208
|
+
|
|
209
|
+
parts.append("## Request")
|
|
210
|
+
parts.append("Respond to the user's request in the attached message file. "
|
|
211
|
+
"Read all attached files completely before responding.")
|
|
212
|
+
|
|
213
|
+
return "\n".join(parts)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
# ---------------------------------------------------------------------------
|
|
217
|
+
# Domain Detection & Companion System
|
|
218
|
+
# ---------------------------------------------------------------------------
|
|
219
|
+
|
|
220
|
+
@dataclass
|
|
221
|
+
class DomainProfile:
|
|
222
|
+
"""Defines a domain of expertise with persona, frameworks, and approach."""
|
|
223
|
+
id: str
|
|
224
|
+
name: str
|
|
225
|
+
keywords: list[str]
|
|
226
|
+
phrases: list[str]
|
|
227
|
+
file_indicators: list[str] # file extensions or name patterns
|
|
228
|
+
expert_persona: str
|
|
229
|
+
thinking_frameworks: list[str]
|
|
230
|
+
key_questions: list[str]
|
|
231
|
+
structured_approach: list[str]
|
|
232
|
+
agent_hint: str # suggested opencode agent
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
DOMAIN_REGISTRY: dict[str, DomainProfile] = {}
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _register(*profiles: DomainProfile):
|
|
239
|
+
for p in profiles:
|
|
240
|
+
DOMAIN_REGISTRY[p.id] = p
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
_register(
|
|
244
|
+
DomainProfile(
|
|
245
|
+
id="architecture",
|
|
246
|
+
name="Architecture & System Design",
|
|
247
|
+
keywords=["architecture", "microservice", "monolith", "scalab", "distributed",
|
|
248
|
+
"component", "module", "layer", "decouple", "coupling", "cohesion",
|
|
249
|
+
"event", "queue", "broker", "gateway", "proxy", "load balancer"],
|
|
250
|
+
phrases=["system design", "event driven", "event sourcing", "service mesh",
|
|
251
|
+
"domain driven", "hexagonal architecture", "clean architecture",
|
|
252
|
+
"micro frontend", "message bus", "data pipeline", "cqrs"],
|
|
253
|
+
file_indicators=[".proto", ".yaml", ".yml", ".tf", ".hcl"],
|
|
254
|
+
expert_persona=(
|
|
255
|
+
"a senior distributed systems architect who has designed systems serving "
|
|
256
|
+
"millions of users. You think in terms of components, boundaries, data flow, "
|
|
257
|
+
"and failure modes. You've seen both over-engineered and under-engineered "
|
|
258
|
+
"systems and know when each approach is appropriate."
|
|
259
|
+
),
|
|
260
|
+
thinking_frameworks=["C4 model (context, containers, components, code)",
|
|
261
|
+
"CAP theorem", "DDD (bounded contexts, aggregates)",
|
|
262
|
+
"CQRS/Event Sourcing trade-offs",
|
|
263
|
+
"Twelve-Factor App principles"],
|
|
264
|
+
key_questions=["What are the key quality attributes (latency, throughput, availability)?",
|
|
265
|
+
"Where are the domain boundaries?",
|
|
266
|
+
"What data consistency model fits here?",
|
|
267
|
+
"What happens when a component fails?",
|
|
268
|
+
"How will this evolve in 6-12 months?"],
|
|
269
|
+
structured_approach=["Clarify requirements and constraints",
|
|
270
|
+
"Identify components and their responsibilities",
|
|
271
|
+
"Define interfaces and data flow",
|
|
272
|
+
"Analyze trade-offs and failure modes",
|
|
273
|
+
"Recommend with rationale"],
|
|
274
|
+
agent_hint="plan",
|
|
275
|
+
),
|
|
276
|
+
DomainProfile(
|
|
277
|
+
id="debugging",
|
|
278
|
+
name="Debugging & Troubleshooting",
|
|
279
|
+
keywords=["bug", "error", "crash", "fail", "exception", "traceback",
|
|
280
|
+
"stacktrace", "debug", "breakpoint", "segfault", "panic",
|
|
281
|
+
"hang", "freeze", "corrupt", "unexpected", "wrong"],
|
|
282
|
+
phrases=["root cause", "stack trace", "doesn't work", "stopped working",
|
|
283
|
+
"race condition", "deadlock", "memory leak", "null pointer",
|
|
284
|
+
"off by one", "regression", "flaky test", "intermittent failure"],
|
|
285
|
+
file_indicators=[".log", ".dump", ".core"],
|
|
286
|
+
expert_persona=(
|
|
287
|
+
"a seasoned debugger who has tracked down the most elusive bugs — race "
|
|
288
|
+
"conditions, heisenbugs, memory corruption, off-by-one errors hidden for "
|
|
289
|
+
"years. You are methodical, hypothesis-driven, and never jump to conclusions."
|
|
290
|
+
),
|
|
291
|
+
thinking_frameworks=["Five Whys (root cause analysis)",
|
|
292
|
+
"Scientific method (hypothesize, test, refine)",
|
|
293
|
+
"Binary search / bisection for isolating changes",
|
|
294
|
+
"Rubber duck debugging"],
|
|
295
|
+
key_questions=["When did it start happening? What changed?",
|
|
296
|
+
"Is it reproducible? Under what conditions?",
|
|
297
|
+
"What are the exact symptoms vs. expected behavior?",
|
|
298
|
+
"Have we ruled out environment differences?",
|
|
299
|
+
"What is the minimal reproduction case?"],
|
|
300
|
+
structured_approach=["Reproduce and isolate the issue",
|
|
301
|
+
"Form hypotheses ranked by likelihood",
|
|
302
|
+
"Gather evidence: logs, traces, state inspection",
|
|
303
|
+
"Narrow down via elimination",
|
|
304
|
+
"Fix, verify, and prevent regression"],
|
|
305
|
+
agent_hint="build",
|
|
306
|
+
),
|
|
307
|
+
DomainProfile(
|
|
308
|
+
id="performance",
|
|
309
|
+
name="Performance & Optimization",
|
|
310
|
+
keywords=["performance", "optimize", "bottleneck", "latency", "throughput",
|
|
311
|
+
"cache", "profil", "benchmark", "slow", "fast", "speed",
|
|
312
|
+
"memory", "cpu", "io", "bandwidth", "concurren"],
|
|
313
|
+
phrases=["cache miss", "hot path", "time complexity", "space complexity",
|
|
314
|
+
"p99 latency", "tail latency", "garbage collection", "connection pool",
|
|
315
|
+
"query plan", "flame graph", "load test"],
|
|
316
|
+
file_indicators=[".perf", ".prof", ".bench"],
|
|
317
|
+
expert_persona=(
|
|
318
|
+
"a performance engineer who obsesses over microseconds and memory allocations. "
|
|
319
|
+
"You profile before optimizing, know that premature optimization is the root of "
|
|
320
|
+
"all evil, and always ask 'what does the data say?' before recommending changes."
|
|
321
|
+
),
|
|
322
|
+
thinking_frameworks=["Amdahl's Law", "Little's Law",
|
|
323
|
+
"USE method (Utilization, Saturation, Errors)",
|
|
324
|
+
"Roofline model", "Big-O analysis with practical constants"],
|
|
325
|
+
key_questions=["What is the actual bottleneck (CPU, memory, I/O, network)?",
|
|
326
|
+
"Do we have profiling data or benchmarks?",
|
|
327
|
+
"What's the target performance? Current baseline?",
|
|
328
|
+
"What are the hot paths?",
|
|
329
|
+
"What trade-offs are acceptable (memory vs speed, complexity vs perf)?"],
|
|
330
|
+
structured_approach=["Measure current performance with profiling/benchmarks",
|
|
331
|
+
"Identify the bottleneck — do not guess",
|
|
332
|
+
"Propose targeted optimizations",
|
|
333
|
+
"Estimate impact and trade-offs",
|
|
334
|
+
"Measure again after changes"],
|
|
335
|
+
agent_hint="build",
|
|
336
|
+
),
|
|
337
|
+
DomainProfile(
|
|
338
|
+
id="security",
|
|
339
|
+
name="Security & Threat Modeling",
|
|
340
|
+
keywords=["security", "vulnerab", "auth", "token", "encrypt", "hash",
|
|
341
|
+
"ssl", "tls", "cors", "csrf", "xss", "injection", "sanitiz",
|
|
342
|
+
"permission", "privilege", "secret", "credential"],
|
|
343
|
+
phrases=["sql injection", "cross site", "threat model", "attack surface",
|
|
344
|
+
"zero trust", "defense in depth", "least privilege",
|
|
345
|
+
"owasp top 10", "security audit", "penetration test",
|
|
346
|
+
"access control", "input validation"],
|
|
347
|
+
file_indicators=[".pem", ".key", ".cert", ".env"],
|
|
348
|
+
expert_persona=(
|
|
349
|
+
"a senior application security engineer who thinks like an attacker but "
|
|
350
|
+
"builds like a defender. You know the OWASP Top 10 by heart, understand "
|
|
351
|
+
"cryptographic primitives, and always consider the full threat model."
|
|
352
|
+
),
|
|
353
|
+
thinking_frameworks=["STRIDE threat modeling",
|
|
354
|
+
"OWASP Top 10",
|
|
355
|
+
"Defense in depth",
|
|
356
|
+
"Zero trust architecture",
|
|
357
|
+
"Principle of least privilege"],
|
|
358
|
+
key_questions=["What is the threat model? Who are the adversaries?",
|
|
359
|
+
"What data is sensitive and how is it protected?",
|
|
360
|
+
"Where are the trust boundaries?",
|
|
361
|
+
"What authentication and authorization model is in use?",
|
|
362
|
+
"Are there known CVEs in dependencies?"],
|
|
363
|
+
structured_approach=["Identify assets and threat actors",
|
|
364
|
+
"Map the attack surface",
|
|
365
|
+
"Enumerate threats (STRIDE)",
|
|
366
|
+
"Assess risk (likelihood x impact)",
|
|
367
|
+
"Recommend mitigations prioritized by risk"],
|
|
368
|
+
agent_hint="plan",
|
|
369
|
+
),
|
|
370
|
+
DomainProfile(
|
|
371
|
+
id="testing",
|
|
372
|
+
name="Testing & Quality Assurance",
|
|
373
|
+
keywords=["test", "assert", "mock", "stub", "fixture", "coverage",
|
|
374
|
+
"spec", "suite", "expect", "verify", "tdd", "bdd"],
|
|
375
|
+
phrases=["unit test", "integration test", "end to end", "test coverage",
|
|
376
|
+
"test driven", "edge case", "boundary condition", "test pyramid",
|
|
377
|
+
"property based", "mutation testing", "snapshot test",
|
|
378
|
+
"regression test"],
|
|
379
|
+
file_indicators=["_test.py", "_test.go", ".test.js", ".test.ts", ".spec.js",
|
|
380
|
+
".spec.ts", "_spec.rb"],
|
|
381
|
+
expert_persona=(
|
|
382
|
+
"a testing specialist who believes tests are living documentation. You "
|
|
383
|
+
"understand the test pyramid, know when to mock and when not to, and "
|
|
384
|
+
"write tests that catch real bugs without being brittle."
|
|
385
|
+
),
|
|
386
|
+
thinking_frameworks=["Test pyramid (unit → integration → e2e)",
|
|
387
|
+
"FIRST principles (Fast, Independent, Repeatable, Self-validating, Timely)",
|
|
388
|
+
"Arrange-Act-Assert pattern",
|
|
389
|
+
"Equivalence partitioning & boundary value analysis"],
|
|
390
|
+
key_questions=["What behavior are we verifying?",
|
|
391
|
+
"What are the edge cases and boundary conditions?",
|
|
392
|
+
"Is this a unit, integration, or e2e concern?",
|
|
393
|
+
"What should we mock vs. use real implementations?",
|
|
394
|
+
"How will we know if this test is catching real bugs?"],
|
|
395
|
+
structured_approach=["Identify what behavior to test",
|
|
396
|
+
"Determine test level (unit/integration/e2e)",
|
|
397
|
+
"Design test cases covering happy path and edge cases",
|
|
398
|
+
"Write clear, maintainable assertions",
|
|
399
|
+
"Review for brittleness and false confidence"],
|
|
400
|
+
agent_hint="build",
|
|
401
|
+
),
|
|
402
|
+
DomainProfile(
|
|
403
|
+
id="devops",
|
|
404
|
+
name="DevOps & Infrastructure",
|
|
405
|
+
keywords=["deploy", "pipeline", "container", "docker", "kubernetes", "k8s",
|
|
406
|
+
"terraform", "ansible", "helm", "ci", "cd", "infra", "cloud",
|
|
407
|
+
"aws", "gcp", "azure", "monitoring", "alert", "observ"],
|
|
408
|
+
phrases=["ci/cd pipeline", "infrastructure as code", "blue green deployment",
|
|
409
|
+
"canary release", "rolling update", "auto scaling",
|
|
410
|
+
"service discovery", "container orchestration",
|
|
411
|
+
"gitops", "platform engineering"],
|
|
412
|
+
file_indicators=[".tf", ".hcl", "Dockerfile", ".yml", ".yaml",
|
|
413
|
+
"Jenkinsfile", ".github"],
|
|
414
|
+
expert_persona=(
|
|
415
|
+
"a senior DevOps/platform engineer who has managed production infrastructure "
|
|
416
|
+
"at scale. You think in terms of reliability, repeatability, and observability. "
|
|
417
|
+
"You know that every manual step is a future incident."
|
|
418
|
+
),
|
|
419
|
+
thinking_frameworks=["DORA metrics (deployment frequency, lead time, MTTR, change failure rate)",
|
|
420
|
+
"Infrastructure as Code principles",
|
|
421
|
+
"SRE golden signals (latency, traffic, errors, saturation)",
|
|
422
|
+
"GitOps workflow"],
|
|
423
|
+
key_questions=["What is the deployment target (cloud, on-prem, hybrid)?",
|
|
424
|
+
"What are the reliability requirements (SLOs)?",
|
|
425
|
+
"How do we roll back if something goes wrong?",
|
|
426
|
+
"What observability do we have?",
|
|
427
|
+
"What is the blast radius of a bad deploy?"],
|
|
428
|
+
structured_approach=["Assess current infrastructure and deployment process",
|
|
429
|
+
"Identify gaps in reliability and automation",
|
|
430
|
+
"Design pipeline and infrastructure changes",
|
|
431
|
+
"Plan rollout with rollback strategy",
|
|
432
|
+
"Define success metrics and alerts"],
|
|
433
|
+
agent_hint="plan",
|
|
434
|
+
),
|
|
435
|
+
DomainProfile(
|
|
436
|
+
id="database",
|
|
437
|
+
name="Database & Data Modeling",
|
|
438
|
+
keywords=["database", "schema", "table", "column", "index", "query",
|
|
439
|
+
"sql", "nosql", "migration", "join", "foreign key", "primary key",
|
|
440
|
+
"transaction", "acid", "normali", "partition", "shard", "replica"],
|
|
441
|
+
phrases=["query optimization", "execution plan", "database migration",
|
|
442
|
+
"data model", "schema design", "query plan", "n+1 query",
|
|
443
|
+
"connection pool", "read replica", "write ahead log",
|
|
444
|
+
"eventual consistency"],
|
|
445
|
+
file_indicators=[".sql", ".prisma", ".migration"],
|
|
446
|
+
expert_persona=(
|
|
447
|
+
"a database architect with deep expertise in both relational and NoSQL systems. "
|
|
448
|
+
"You think about data access patterns first, schema second. You've tuned queries "
|
|
449
|
+
"from minutes to milliseconds and know when denormalization is the right call."
|
|
450
|
+
),
|
|
451
|
+
thinking_frameworks=["Normal forms (1NF through BCNF) and when to denormalize",
|
|
452
|
+
"ACID vs BASE trade-offs",
|
|
453
|
+
"Index design (B-tree, hash, composite, covering)",
|
|
454
|
+
"CAP theorem applied to data stores"],
|
|
455
|
+
key_questions=["What are the primary access patterns (reads vs writes)?",
|
|
456
|
+
"What consistency guarantees are needed?",
|
|
457
|
+
"How much data and what growth rate?",
|
|
458
|
+
"What are the query performance requirements?",
|
|
459
|
+
"How will the schema evolve?"],
|
|
460
|
+
structured_approach=["Understand access patterns and data relationships",
|
|
461
|
+
"Design schema to match access patterns",
|
|
462
|
+
"Plan indexing strategy",
|
|
463
|
+
"Consider partitioning/sharding needs",
|
|
464
|
+
"Design migration path from current state"],
|
|
465
|
+
agent_hint="build",
|
|
466
|
+
),
|
|
467
|
+
DomainProfile(
|
|
468
|
+
id="api_design",
|
|
469
|
+
name="API Design",
|
|
470
|
+
keywords=["api", "endpoint", "rest", "graphql", "grpc", "webhook",
|
|
471
|
+
"pagination", "versioning", "rate limit", "openapi", "swagger",
|
|
472
|
+
"request", "response", "payload", "header", "status code"],
|
|
473
|
+
phrases=["rest api", "api design", "api versioning", "breaking change",
|
|
474
|
+
"backward compatible", "content negotiation", "hateoas",
|
|
475
|
+
"api gateway", "graphql schema", "api contract"],
|
|
476
|
+
file_indicators=[".openapi", ".swagger", ".graphql", ".gql", ".proto"],
|
|
477
|
+
expert_persona=(
|
|
478
|
+
"a senior API designer who has built APIs used by thousands of developers. "
|
|
479
|
+
"You think about developer experience, consistency, evolvability, and "
|
|
480
|
+
"backward compatibility. You know REST deeply but aren't dogmatic about it."
|
|
481
|
+
),
|
|
482
|
+
thinking_frameworks=["REST maturity model (Richardson)",
|
|
483
|
+
"API-first design",
|
|
484
|
+
"Consumer-driven contracts",
|
|
485
|
+
"Robustness principle (be liberal in what you accept)"],
|
|
486
|
+
key_questions=["Who are the API consumers (internal, external, both)?",
|
|
487
|
+
"What operations does the API need to support?",
|
|
488
|
+
"How will we handle versioning and breaking changes?",
|
|
489
|
+
"What authentication and rate limiting model?",
|
|
490
|
+
"What error format and status code conventions?"],
|
|
491
|
+
structured_approach=["Identify resources and operations",
|
|
492
|
+
"Design URL structure and HTTP methods",
|
|
493
|
+
"Define request/response schemas",
|
|
494
|
+
"Plan versioning and error handling",
|
|
495
|
+
"Document with examples"],
|
|
496
|
+
agent_hint="plan",
|
|
497
|
+
),
|
|
498
|
+
DomainProfile(
|
|
499
|
+
id="frontend",
|
|
500
|
+
name="Frontend & UI",
|
|
501
|
+
keywords=["react", "vue", "svelte", "angular", "component", "render",
|
|
502
|
+
"state", "hook", "prop", "css", "style", "dom", "browser",
|
|
503
|
+
"responsive", "animation", "accessibility", "a11y", "ssr"],
|
|
504
|
+
phrases=["server side rendering", "client side rendering", "state management",
|
|
505
|
+
"component library", "design system", "web vitals",
|
|
506
|
+
"progressive enhancement", "single page app", "hydration",
|
|
507
|
+
"code splitting", "lazy loading"],
|
|
508
|
+
file_indicators=[".tsx", ".jsx", ".vue", ".svelte", ".css", ".scss", ".less"],
|
|
509
|
+
expert_persona=(
|
|
510
|
+
"a senior frontend architect who cares deeply about user experience, "
|
|
511
|
+
"accessibility, and performance. You've built design systems and know "
|
|
512
|
+
"that the best code is the code that makes users productive and happy."
|
|
513
|
+
),
|
|
514
|
+
thinking_frameworks=["Component composition patterns",
|
|
515
|
+
"Unidirectional data flow",
|
|
516
|
+
"Web Core Vitals (LCP, FID, CLS)",
|
|
517
|
+
"Progressive enhancement",
|
|
518
|
+
"WCAG accessibility guidelines"],
|
|
519
|
+
key_questions=["What is the target user experience?",
|
|
520
|
+
"What rendering strategy fits (SSR, CSR, ISR, SSG)?",
|
|
521
|
+
"How will we manage state (local, global, server)?",
|
|
522
|
+
"What are the accessibility requirements?",
|
|
523
|
+
"What are the performance budgets?"],
|
|
524
|
+
structured_approach=["Clarify UX requirements and constraints",
|
|
525
|
+
"Choose rendering and state management strategy",
|
|
526
|
+
"Design component hierarchy",
|
|
527
|
+
"Plan for accessibility and performance",
|
|
528
|
+
"Define testing approach (visual, interaction, a11y)"],
|
|
529
|
+
agent_hint="build",
|
|
530
|
+
),
|
|
531
|
+
DomainProfile(
|
|
532
|
+
id="algorithms",
|
|
533
|
+
name="Algorithms & Data Structures",
|
|
534
|
+
keywords=["algorithm", "complexity", "sort", "search", "graph", "tree",
|
|
535
|
+
"heap", "hash", "array", "linked list", "stack", "queue",
|
|
536
|
+
"recursive", "dynamic", "greedy", "backtrack"],
|
|
537
|
+
phrases=["time complexity", "space complexity", "dynamic programming",
|
|
538
|
+
"divide and conquer", "binary search", "breadth first",
|
|
539
|
+
"depth first", "shortest path", "minimum spanning",
|
|
540
|
+
"sliding window", "two pointer"],
|
|
541
|
+
file_indicators=[],
|
|
542
|
+
expert_persona=(
|
|
543
|
+
"a computer scientist who loves elegant solutions and rigorous analysis. "
|
|
544
|
+
"You think in terms of invariants, complexity classes, and correctness proofs. "
|
|
545
|
+
"You know that the right data structure often matters more than the algorithm."
|
|
546
|
+
),
|
|
547
|
+
thinking_frameworks=["Big-O analysis (time and space)",
|
|
548
|
+
"Problem reduction (what known problem does this map to?)",
|
|
549
|
+
"Invariant-based reasoning",
|
|
550
|
+
"Amortized analysis"],
|
|
551
|
+
key_questions=["What are the input constraints (size, range, distribution)?",
|
|
552
|
+
"What are the performance requirements?",
|
|
553
|
+
"Is there a known algorithm or pattern that applies?",
|
|
554
|
+
"Can we trade space for time (or vice versa)?",
|
|
555
|
+
"What edge cases must we handle?"],
|
|
556
|
+
structured_approach=["Understand the problem and constraints",
|
|
557
|
+
"Identify applicable patterns or known algorithms",
|
|
558
|
+
"Design solution with correctness argument",
|
|
559
|
+
"Analyze time and space complexity",
|
|
560
|
+
"Consider optimizations and edge cases"],
|
|
561
|
+
agent_hint="build",
|
|
562
|
+
),
|
|
563
|
+
DomainProfile(
|
|
564
|
+
id="code_quality",
|
|
565
|
+
name="Code Quality & Refactoring",
|
|
566
|
+
keywords=["refactor", "clean", "readab", "maintainab", "solid", "dry",
|
|
567
|
+
"smell", "debt", "pattern", "antipattern", "principle",
|
|
568
|
+
"naming", "abstraction", "duplication"],
|
|
569
|
+
phrases=["code smell", "technical debt", "design pattern", "code review",
|
|
570
|
+
"clean code", "single responsibility", "dependency injection",
|
|
571
|
+
"separation of concerns", "boy scout rule",
|
|
572
|
+
"strangler fig", "legacy code"],
|
|
573
|
+
file_indicators=[],
|
|
574
|
+
expert_persona=(
|
|
575
|
+
"a pragmatic software craftsperson who values readability over cleverness. "
|
|
576
|
+
"You refactor with purpose, not for its own sake. You know that good code "
|
|
577
|
+
"is code your teammates can understand and modify with confidence."
|
|
578
|
+
),
|
|
579
|
+
thinking_frameworks=["SOLID principles (applied pragmatically)",
|
|
580
|
+
"Refactoring patterns (Fowler)",
|
|
581
|
+
"Code smells catalog",
|
|
582
|
+
"Connascence (coupling analysis)"],
|
|
583
|
+
key_questions=["What problem is the current design causing?",
|
|
584
|
+
"Is this refactoring worth the risk and effort?",
|
|
585
|
+
"What's the minimal change that improves the situation?",
|
|
586
|
+
"How do we refactor safely (tests as safety net)?",
|
|
587
|
+
"Will this be clearer to the next person reading it?"],
|
|
588
|
+
structured_approach=["Identify the pain point or code smell",
|
|
589
|
+
"Ensure adequate test coverage before refactoring",
|
|
590
|
+
"Apply incremental, safe transformations",
|
|
591
|
+
"Verify behavior preservation after each step",
|
|
592
|
+
"Review for clarity and simplicity"],
|
|
593
|
+
agent_hint="build",
|
|
594
|
+
),
|
|
595
|
+
DomainProfile(
|
|
596
|
+
id="planning",
|
|
597
|
+
name="Project Planning & Product",
|
|
598
|
+
keywords=["plan", "roadmap", "milestone", "sprint", "epic", "story",
|
|
599
|
+
"requirement", "scope", "prioriti", "estimate", "mvp",
|
|
600
|
+
"feature", "deadline", "backlog", "stakeholder"],
|
|
601
|
+
phrases=["user story", "acceptance criteria", "definition of done",
|
|
602
|
+
"minimum viable", "project plan", "technical spec",
|
|
603
|
+
"request for comments", "design doc", "product requirement",
|
|
604
|
+
"scope creep"],
|
|
605
|
+
file_indicators=[],
|
|
606
|
+
expert_persona=(
|
|
607
|
+
"a seasoned tech lead who bridges engineering and product. You break down "
|
|
608
|
+
"ambiguous problems into concrete, shippable increments. You know that the "
|
|
609
|
+
"best plan is one the team actually follows."
|
|
610
|
+
),
|
|
611
|
+
thinking_frameworks=["User story mapping",
|
|
612
|
+
"RICE prioritization (Reach, Impact, Confidence, Effort)",
|
|
613
|
+
"MoSCoW prioritization",
|
|
614
|
+
"Incremental delivery (thin vertical slices)"],
|
|
615
|
+
key_questions=["What is the user problem we're solving?",
|
|
616
|
+
"What is the smallest thing we can ship to learn?",
|
|
617
|
+
"What are the dependencies and risks?",
|
|
618
|
+
"How will we know this succeeded?",
|
|
619
|
+
"What can we defer without losing value?"],
|
|
620
|
+
structured_approach=["Define the problem and success criteria",
|
|
621
|
+
"Break down into shippable increments",
|
|
622
|
+
"Identify dependencies, risks, and unknowns",
|
|
623
|
+
"Prioritize by value and effort",
|
|
624
|
+
"Define first concrete next steps"],
|
|
625
|
+
agent_hint="plan",
|
|
626
|
+
),
|
|
627
|
+
DomainProfile(
|
|
628
|
+
id="general",
|
|
629
|
+
name="General Discussion",
|
|
630
|
+
keywords=[],
|
|
631
|
+
phrases=[],
|
|
632
|
+
file_indicators=[],
|
|
633
|
+
expert_persona=(
|
|
634
|
+
"a knowledgeable senior engineer with broad experience across the stack. "
|
|
635
|
+
"You think clearly, communicate precisely, and always consider the broader "
|
|
636
|
+
"context before diving into details."
|
|
637
|
+
),
|
|
638
|
+
thinking_frameworks=["First principles thinking",
|
|
639
|
+
"Trade-off analysis",
|
|
640
|
+
"Systems thinking"],
|
|
641
|
+
key_questions=["What are we trying to achieve?",
|
|
642
|
+
"What are the constraints?",
|
|
643
|
+
"What are the trade-offs?"],
|
|
644
|
+
structured_approach=["Understand the question and context",
|
|
645
|
+
"Consider multiple perspectives",
|
|
646
|
+
"Analyze trade-offs",
|
|
647
|
+
"Provide a clear recommendation"],
|
|
648
|
+
agent_hint="plan",
|
|
649
|
+
),
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
@dataclass
|
|
654
|
+
class DomainDetection:
|
|
655
|
+
"""Result of domain detection."""
|
|
656
|
+
primary: DomainProfile
|
|
657
|
+
confidence: int # 0-100
|
|
658
|
+
secondary: Optional[DomainProfile] = None
|
|
659
|
+
secondary_confidence: int = 0
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
def detect_domain(
|
|
663
|
+
message: str,
|
|
664
|
+
file_paths: Optional[list[str]] = None,
|
|
665
|
+
) -> DomainDetection:
|
|
666
|
+
"""Score message against all domains and return best match.
|
|
667
|
+
|
|
668
|
+
Scoring rules:
|
|
669
|
+
- keyword match: +1 per keyword found
|
|
670
|
+
- phrase match: +2 per phrase found (phrases are more specific)
|
|
671
|
+
- file indicator: +1.5 per matching file extension/pattern
|
|
672
|
+
"""
|
|
673
|
+
text = message.lower()
|
|
674
|
+
scores: dict[str, float] = {}
|
|
675
|
+
|
|
676
|
+
for domain_id, profile in DOMAIN_REGISTRY.items():
|
|
677
|
+
if domain_id == "general":
|
|
678
|
+
continue # general is the fallback
|
|
679
|
+
score = 0.0
|
|
680
|
+
|
|
681
|
+
for kw in profile.keywords:
|
|
682
|
+
if kw in text:
|
|
683
|
+
score += 1
|
|
684
|
+
|
|
685
|
+
for phrase in profile.phrases:
|
|
686
|
+
if phrase in text:
|
|
687
|
+
score += 2
|
|
688
|
+
|
|
689
|
+
if file_paths:
|
|
690
|
+
for fp in file_paths:
|
|
691
|
+
fp_lower = fp.lower()
|
|
692
|
+
name_lower = Path(fp).name.lower()
|
|
693
|
+
for indicator in profile.file_indicators:
|
|
694
|
+
ind = indicator.lower()
|
|
695
|
+
if fp_lower.endswith(ind) or ind == name_lower or ind in fp_lower:
|
|
696
|
+
score += 1.5
|
|
697
|
+
|
|
698
|
+
if score > 0:
|
|
699
|
+
scores[domain_id] = score
|
|
700
|
+
|
|
701
|
+
if not scores:
|
|
702
|
+
return DomainDetection(
|
|
703
|
+
primary=DOMAIN_REGISTRY["general"],
|
|
704
|
+
confidence=50,
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
ranked = sorted(scores.items(), key=lambda x: x[1], reverse=True)
|
|
708
|
+
best_id, best_score = ranked[0]
|
|
709
|
+
|
|
710
|
+
# Confidence: scale relative to number of matches.
|
|
711
|
+
# A score of 5+ is very confident; 1 is low.
|
|
712
|
+
confidence = min(99, int(40 + best_score * 12))
|
|
713
|
+
|
|
714
|
+
result = DomainDetection(
|
|
715
|
+
primary=DOMAIN_REGISTRY[best_id],
|
|
716
|
+
confidence=confidence,
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
# Cross-domain detection: secondary if >60% of primary
|
|
720
|
+
if len(ranked) > 1:
|
|
721
|
+
second_id, second_score = ranked[1]
|
|
722
|
+
if second_score >= best_score * 0.6:
|
|
723
|
+
result.secondary = DOMAIN_REGISTRY[second_id]
|
|
724
|
+
result.secondary_confidence = min(99, int(40 + second_score * 12))
|
|
725
|
+
|
|
726
|
+
return result
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def build_companion_prompt(
|
|
730
|
+
message: str,
|
|
731
|
+
files: Optional[list[str]] = None,
|
|
732
|
+
domain_override: Optional[str] = None,
|
|
733
|
+
is_followup: bool = False,
|
|
734
|
+
) -> tuple[str, DomainDetection]:
|
|
735
|
+
"""Assemble a domain-aware companion prompt.
|
|
736
|
+
|
|
737
|
+
Returns (prompt_text, domain_detection).
|
|
738
|
+
"""
|
|
739
|
+
# Detect or override domain
|
|
740
|
+
if domain_override and domain_override in DOMAIN_REGISTRY:
|
|
741
|
+
profile = DOMAIN_REGISTRY[domain_override]
|
|
742
|
+
detection = DomainDetection(primary=profile, confidence=99)
|
|
743
|
+
else:
|
|
744
|
+
detection = detect_domain(message, files)
|
|
745
|
+
profile = detection.primary
|
|
746
|
+
|
|
747
|
+
# Follow-up: lightweight prompt
|
|
748
|
+
if is_followup:
|
|
749
|
+
parts = [
|
|
750
|
+
"## Continuing Our Discussion",
|
|
751
|
+
"",
|
|
752
|
+
message,
|
|
753
|
+
"",
|
|
754
|
+
"Remember: challenge assumptions, consider alternatives, be explicit about trade-offs.",
|
|
755
|
+
]
|
|
756
|
+
return "\n".join(parts), detection
|
|
757
|
+
|
|
758
|
+
# --- Full initial prompt ---
|
|
759
|
+
parts = []
|
|
760
|
+
|
|
761
|
+
# File context
|
|
762
|
+
user_files = [f for f in (files or []) if not Path(f).name.startswith("opencode_msg_")]
|
|
763
|
+
if user_files:
|
|
764
|
+
file_context = build_file_context(user_files)
|
|
765
|
+
if file_context:
|
|
766
|
+
parts.append("## Context")
|
|
767
|
+
parts.append(file_context)
|
|
768
|
+
parts.append("")
|
|
769
|
+
|
|
770
|
+
# Cross-domain note
|
|
771
|
+
cross = ""
|
|
772
|
+
if detection.secondary:
|
|
773
|
+
cross = f" This also touches on **{detection.secondary.name}**, so weave in that perspective where relevant."
|
|
774
|
+
|
|
775
|
+
# Discussion setup
|
|
776
|
+
parts.append("## Discussion Setup")
|
|
777
|
+
parts.append(
|
|
778
|
+
f"You are {profile.expert_persona}{cross}\n"
|
|
779
|
+
f"I'm bringing you a question about **{profile.name}**, "
|
|
780
|
+
"and I want us to think through it together as peers."
|
|
781
|
+
)
|
|
782
|
+
parts.append("")
|
|
783
|
+
|
|
784
|
+
# Frameworks
|
|
785
|
+
parts.append(f"### Analytical Toolkit")
|
|
786
|
+
for fw in profile.thinking_frameworks:
|
|
787
|
+
parts.append(f"- {fw}")
|
|
788
|
+
parts.append("")
|
|
789
|
+
|
|
790
|
+
# Key questions
|
|
791
|
+
parts.append("### Key Questions to Consider")
|
|
792
|
+
for q in profile.key_questions:
|
|
793
|
+
parts.append(f"- {q}")
|
|
794
|
+
parts.append("")
|
|
795
|
+
|
|
796
|
+
# Collaborative ground rules
|
|
797
|
+
parts.append("## Collaborative Ground Rules")
|
|
798
|
+
parts.append("- Think out loud, share your reasoning")
|
|
799
|
+
parts.append("- Challenge questionable assumptions — including mine")
|
|
800
|
+
parts.append("- Lay out trade-offs explicitly: what we gain, what we lose")
|
|
801
|
+
parts.append("- Propose at least one alternative I haven't considered")
|
|
802
|
+
parts.append("")
|
|
803
|
+
|
|
804
|
+
# Structured approach
|
|
805
|
+
parts.append(f"## Approach")
|
|
806
|
+
for i, step in enumerate(profile.structured_approach, 1):
|
|
807
|
+
parts.append(f"{i}. {step}")
|
|
808
|
+
parts.append("")
|
|
809
|
+
|
|
810
|
+
# The question
|
|
811
|
+
parts.append("## The Question")
|
|
812
|
+
parts.append(message)
|
|
813
|
+
parts.append("")
|
|
814
|
+
|
|
815
|
+
# Synthesize
|
|
816
|
+
parts.append("## Synthesize")
|
|
817
|
+
parts.append("1. Your recommendation with rationale")
|
|
818
|
+
parts.append("2. Key trade-offs")
|
|
819
|
+
parts.append("3. Risks or blind spots")
|
|
820
|
+
parts.append("4. Open questions worth exploring")
|
|
821
|
+
|
|
822
|
+
return "\n".join(parts), detection
|
|
823
|
+
|
|
824
|
+
|
|
33
825
|
# Default configuration
|
|
34
826
|
DEFAULT_MODEL = "openai/gpt-5.2-codex"
|
|
35
827
|
DEFAULT_AGENT = "plan"
|
|
@@ -163,6 +955,10 @@ class OpenCodeBridge:
|
|
|
163
955
|
|
|
164
956
|
async def _run_opencode(self, *args, timeout: int = 300) -> tuple[str, int]:
|
|
165
957
|
"""Run opencode CLI command and return output (async)."""
|
|
958
|
+
global OPENCODE_BIN
|
|
959
|
+
# Lazy retry: if binary wasn't found at startup, try again
|
|
960
|
+
if not OPENCODE_BIN:
|
|
961
|
+
OPENCODE_BIN = find_opencode()
|
|
166
962
|
if not OPENCODE_BIN:
|
|
167
963
|
return "OpenCode not installed. Install from: https://opencode.ai", 1
|
|
168
964
|
|
|
@@ -177,12 +973,18 @@ class OpenCodeBridge:
|
|
|
177
973
|
proc.communicate(input=b''),
|
|
178
974
|
timeout=timeout
|
|
179
975
|
)
|
|
180
|
-
|
|
181
|
-
|
|
976
|
+
# Combine stdout+stderr so errors aren't silently lost
|
|
977
|
+
out = stdout.decode(errors="replace").strip()
|
|
978
|
+
err = stderr.decode(errors="replace").strip()
|
|
979
|
+
output = out if out else err
|
|
980
|
+
# If both exist and return code indicates error, include stderr
|
|
981
|
+
if out and err and proc.returncode:
|
|
982
|
+
output = f"{out}\n\nStderr:\n{err}"
|
|
983
|
+
return output, proc.returncode or 0
|
|
182
984
|
except asyncio.TimeoutError:
|
|
183
985
|
proc.kill()
|
|
184
986
|
await proc.wait()
|
|
185
|
-
return "Command timed out", 1
|
|
987
|
+
return f"Command timed out after {timeout}s", 1
|
|
186
988
|
except Exception as e:
|
|
187
989
|
return f"Error: {e}", 1
|
|
188
990
|
|
|
@@ -294,7 +1096,9 @@ Set via:
|
|
|
294
1096
|
self,
|
|
295
1097
|
message: str,
|
|
296
1098
|
session_id: Optional[str] = None,
|
|
297
|
-
files: Optional[list[str]] = None
|
|
1099
|
+
files: Optional[list[str]] = None,
|
|
1100
|
+
domain_override: Optional[str] = None,
|
|
1101
|
+
_raw: bool = False,
|
|
298
1102
|
) -> str:
|
|
299
1103
|
sid = session_id or self.active_session
|
|
300
1104
|
if not sid or sid not in self.sessions:
|
|
@@ -302,19 +1106,32 @@ Set via:
|
|
|
302
1106
|
|
|
303
1107
|
session = self.sessions[sid]
|
|
304
1108
|
session.add_message("user", message)
|
|
1109
|
+
# Save immediately so user messages aren't lost if OpenCode fails
|
|
1110
|
+
session.save(self.sessions_dir / f"{sid}.json")
|
|
305
1111
|
|
|
306
|
-
#
|
|
307
|
-
temp_file =
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
1112
|
+
# Always write message to temp file to avoid shell escaping issues
|
|
1113
|
+
temp_file = tempfile.NamedTemporaryFile(
|
|
1114
|
+
mode='w', suffix='.md', delete=False, prefix='opencode_msg_'
|
|
1115
|
+
)
|
|
1116
|
+
temp_file.write(message)
|
|
1117
|
+
temp_file.close()
|
|
1118
|
+
files = (files or []) + [temp_file.name]
|
|
1119
|
+
|
|
1120
|
+
# Build prompt: companion system unless _raw is set
|
|
1121
|
+
domain_info = ""
|
|
1122
|
+
if _raw:
|
|
1123
|
+
run_prompt = build_message_prompt(message, files)
|
|
316
1124
|
else:
|
|
317
|
-
|
|
1125
|
+
is_followup = len(session.messages) > 1
|
|
1126
|
+
run_prompt, detection = build_companion_prompt(
|
|
1127
|
+
message, files, domain_override=domain_override,
|
|
1128
|
+
is_followup=is_followup,
|
|
1129
|
+
)
|
|
1130
|
+
domain_info = f"[Domain: {detection.primary.name}] [Confidence: {detection.confidence}%]"
|
|
1131
|
+
if detection.secondary:
|
|
1132
|
+
domain_info += f" [Also: {detection.secondary.name} ({detection.secondary_confidence}%)]"
|
|
1133
|
+
|
|
1134
|
+
args = ["run", run_prompt]
|
|
318
1135
|
|
|
319
1136
|
args.extend(["--model", session.model])
|
|
320
1137
|
args.extend(["--agent", session.agent])
|
|
@@ -335,7 +1152,13 @@ Set via:
|
|
|
335
1152
|
# Use JSON format to get session ID
|
|
336
1153
|
args.extend(["--format", "json"])
|
|
337
1154
|
|
|
338
|
-
|
|
1155
|
+
# Scale timeout based on attached file size
|
|
1156
|
+
user_files = [f for f in files if not Path(f).name.startswith("opencode_msg_")]
|
|
1157
|
+
total_lines = sum(get_file_info(f).get("lines", 0) for f in user_files)
|
|
1158
|
+
# Base 300s, +60s per 1000 lines above threshold, capped at 900s
|
|
1159
|
+
timeout = min(900, 300 + max(0, (total_lines - MEDIUM_FILE) * 60 // 1000))
|
|
1160
|
+
|
|
1161
|
+
output, code = await self._run_opencode(*args, timeout=timeout)
|
|
339
1162
|
|
|
340
1163
|
# Cleanup temp file
|
|
341
1164
|
if temp_file:
|
|
@@ -371,7 +1194,10 @@ Set via:
|
|
|
371
1194
|
if reply or session.opencode_session_id:
|
|
372
1195
|
session.save(self.sessions_dir / f"{sid}.json")
|
|
373
1196
|
|
|
374
|
-
|
|
1197
|
+
response = reply or "No response received"
|
|
1198
|
+
if domain_info:
|
|
1199
|
+
response = f"{domain_info}\n\n{response}"
|
|
1200
|
+
return response
|
|
375
1201
|
|
|
376
1202
|
async def plan(
|
|
377
1203
|
self,
|
|
@@ -400,22 +1226,14 @@ Set via:
|
|
|
400
1226
|
topic: str,
|
|
401
1227
|
session_id: Optional[str] = None
|
|
402
1228
|
) -> str:
|
|
403
|
-
"""Open-ended brainstorming discussion."""
|
|
1229
|
+
"""Open-ended brainstorming discussion — routes through companion system."""
|
|
404
1230
|
sid = session_id or self.active_session
|
|
405
1231
|
|
|
406
1232
|
if not sid or sid not in self.sessions:
|
|
407
1233
|
sid = f"brainstorm-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
408
1234
|
await self.start_session(sid, agent="build")
|
|
409
1235
|
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
Please provide:
|
|
413
|
-
1. Key considerations and trade-offs
|
|
414
|
-
2. Multiple approaches or solutions
|
|
415
|
-
3. Pros and cons of each approach
|
|
416
|
-
4. Your recommended approach and why"""
|
|
417
|
-
|
|
418
|
-
return await self.send_message(prompt, sid)
|
|
1236
|
+
return await self.send_message(f"Let's brainstorm about: {topic}", sid)
|
|
419
1237
|
|
|
420
1238
|
async def review_code(
|
|
421
1239
|
self,
|
|
@@ -430,19 +1248,43 @@ Please provide:
|
|
|
430
1248
|
sid = f"review-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
431
1249
|
await self.start_session(sid, agent="build")
|
|
432
1250
|
|
|
433
|
-
# Check if it's a file path
|
|
1251
|
+
# Check if it's a file path (could be multiple, comma or space separated)
|
|
434
1252
|
files = None
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
1253
|
+
file_paths = []
|
|
1254
|
+
|
|
1255
|
+
# Try splitting by comma first, then check each part
|
|
1256
|
+
candidates = [c.strip() for c in code_or_file.replace(",", " ").split() if c.strip()]
|
|
1257
|
+
for candidate in candidates:
|
|
1258
|
+
if Path(candidate).is_file():
|
|
1259
|
+
file_paths.append(candidate)
|
|
1260
|
+
|
|
1261
|
+
if file_paths:
|
|
1262
|
+
files = file_paths
|
|
1263
|
+
file_infos = [get_file_info(f) for f in file_paths]
|
|
1264
|
+
file_infos = [i for i in file_infos if i]
|
|
1265
|
+
prompt = build_review_prompt(file_infos, focus)
|
|
1266
|
+
|
|
1267
|
+
# Increase timeout for large files
|
|
1268
|
+
total_lines = sum(i.get("lines", 0) for i in file_infos)
|
|
1269
|
+
if total_lines > LARGE_FILE:
|
|
1270
|
+
# Use variant=high for large reviews if not already high+
|
|
1271
|
+
session = self.sessions[sid]
|
|
1272
|
+
if session.variant in ("minimal", "low", "medium"):
|
|
1273
|
+
prompt += "\n\n> *Auto-escalated to thorough review due to file size.*"
|
|
438
1274
|
else:
|
|
439
|
-
|
|
1275
|
+
# Inline code snippet
|
|
1276
|
+
prompt = f"""Please review this code, focusing on: **{focus}**
|
|
440
1277
|
|
|
441
1278
|
```
|
|
442
1279
|
{code_or_file}
|
|
443
|
-
```
|
|
1280
|
+
```
|
|
1281
|
+
|
|
1282
|
+
Provide:
|
|
1283
|
+
- Issues found (bugs, edge cases, security)
|
|
1284
|
+
- Design feedback
|
|
1285
|
+
- Concrete improvement suggestions"""
|
|
444
1286
|
|
|
445
|
-
return await self.send_message(prompt, sid, files)
|
|
1287
|
+
return await self.send_message(prompt, sid, files, _raw=True)
|
|
446
1288
|
|
|
447
1289
|
def list_sessions(self) -> str:
|
|
448
1290
|
if not self.sessions:
|
|
@@ -531,6 +1373,44 @@ Please provide:
|
|
|
531
1373
|
|
|
532
1374
|
return f"Session '{sid}' ended."
|
|
533
1375
|
|
|
1376
|
+
def export_session(self, session_id: Optional[str] = None, format: str = "markdown") -> str:
|
|
1377
|
+
"""Export a session as markdown or JSON."""
|
|
1378
|
+
sid = session_id or self.active_session
|
|
1379
|
+
if not sid or sid not in self.sessions:
|
|
1380
|
+
return "No active session to export."
|
|
1381
|
+
|
|
1382
|
+
session = self.sessions[sid]
|
|
1383
|
+
|
|
1384
|
+
if format == "json":
|
|
1385
|
+
data = {
|
|
1386
|
+
"id": session.id,
|
|
1387
|
+
"model": session.model,
|
|
1388
|
+
"agent": session.agent,
|
|
1389
|
+
"variant": session.variant,
|
|
1390
|
+
"created": session.created,
|
|
1391
|
+
"messages": [asdict(m) for m in session.messages]
|
|
1392
|
+
}
|
|
1393
|
+
return json.dumps(data, indent=2)
|
|
1394
|
+
|
|
1395
|
+
# Markdown format
|
|
1396
|
+
lines = [
|
|
1397
|
+
f"# Session: {session.id}",
|
|
1398
|
+
f"**Model:** {session.model} | **Agent:** {session.agent} | **Variant:** {session.variant}",
|
|
1399
|
+
f"**Created:** {session.created}",
|
|
1400
|
+
f"**Messages:** {len(session.messages)}",
|
|
1401
|
+
"",
|
|
1402
|
+
"---",
|
|
1403
|
+
"",
|
|
1404
|
+
]
|
|
1405
|
+
for msg in session.messages:
|
|
1406
|
+
role = "User" if msg.role == "user" else "OpenCode"
|
|
1407
|
+
lines.append(f"## {role}")
|
|
1408
|
+
lines.append(f"*{msg.timestamp}*\n")
|
|
1409
|
+
lines.append(msg.content)
|
|
1410
|
+
lines.append("\n---\n")
|
|
1411
|
+
|
|
1412
|
+
return "\n".join(lines)
|
|
1413
|
+
|
|
534
1414
|
def health_check(self) -> dict:
|
|
535
1415
|
"""Return server health status."""
|
|
536
1416
|
uptime_seconds = int((datetime.now() - self.start_time).total_seconds())
|
|
@@ -595,7 +1475,9 @@ async def list_tools():
|
|
|
595
1475
|
),
|
|
596
1476
|
Tool(
|
|
597
1477
|
name="opencode_discuss",
|
|
598
|
-
description="Send a message to OpenCode. Use for code review, architecture, brainstorming."
|
|
1478
|
+
description="Send a message to OpenCode. Use for code review, architecture, brainstorming. "
|
|
1479
|
+
"Auto-detects discussion domain and frames OpenCode as a specialized expert. "
|
|
1480
|
+
"Use 'domain' to override detection.",
|
|
599
1481
|
inputSchema={
|
|
600
1482
|
"type": "object",
|
|
601
1483
|
"properties": {
|
|
@@ -607,6 +1489,14 @@ async def list_tools():
|
|
|
607
1489
|
"type": "array",
|
|
608
1490
|
"items": {"type": "string"},
|
|
609
1491
|
"description": "File paths to attach for context"
|
|
1492
|
+
},
|
|
1493
|
+
"domain": {
|
|
1494
|
+
"type": "string",
|
|
1495
|
+
"description": "Override auto-detected domain",
|
|
1496
|
+
"enum": ["architecture", "debugging", "performance", "security",
|
|
1497
|
+
"testing", "devops", "database", "api_design",
|
|
1498
|
+
"frontend", "algorithms", "code_quality", "planning",
|
|
1499
|
+
"general"]
|
|
610
1500
|
}
|
|
611
1501
|
},
|
|
612
1502
|
"required": ["message"]
|
|
@@ -647,13 +1537,13 @@ async def list_tools():
|
|
|
647
1537
|
),
|
|
648
1538
|
Tool(
|
|
649
1539
|
name="opencode_review",
|
|
650
|
-
description="Review code for issues and improvements",
|
|
1540
|
+
description="Review code for issues and improvements. Supports large files with adaptive review strategies. Can accept multiple file paths (space or comma separated).",
|
|
651
1541
|
inputSchema={
|
|
652
1542
|
"type": "object",
|
|
653
1543
|
"properties": {
|
|
654
1544
|
"code_or_file": {
|
|
655
1545
|
"type": "string",
|
|
656
|
-
"description": "Code snippet or file
|
|
1546
|
+
"description": "Code snippet, file path, or multiple file paths (space/comma separated)"
|
|
657
1547
|
},
|
|
658
1548
|
"focus": {
|
|
659
1549
|
"type": "string",
|
|
@@ -702,6 +1592,7 @@ async def list_tools():
|
|
|
702
1592
|
inputSchema={
|
|
703
1593
|
"type": "object",
|
|
704
1594
|
"properties": {
|
|
1595
|
+
"session_id": {"type": "string", "description": "Session ID (default: active session)"},
|
|
705
1596
|
"last_n": {"type": "integer", "description": "Number of messages (default: 20)"}
|
|
706
1597
|
}
|
|
707
1598
|
}
|
|
@@ -744,6 +1635,17 @@ async def list_tools():
|
|
|
744
1635
|
}
|
|
745
1636
|
}
|
|
746
1637
|
),
|
|
1638
|
+
Tool(
|
|
1639
|
+
name="opencode_export",
|
|
1640
|
+
description="Export a session transcript as markdown or JSON",
|
|
1641
|
+
inputSchema={
|
|
1642
|
+
"type": "object",
|
|
1643
|
+
"properties": {
|
|
1644
|
+
"session_id": {"type": "string", "description": "Session to export (default: active)"},
|
|
1645
|
+
"format": {"type": "string", "description": "Export format: markdown or json (default: markdown)", "enum": ["markdown", "json"]}
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
),
|
|
747
1649
|
Tool(
|
|
748
1650
|
name="opencode_health",
|
|
749
1651
|
description="Health check: returns server status, session count, and uptime",
|
|
@@ -769,7 +1671,8 @@ async def call_tool(name: str, arguments: dict):
|
|
|
769
1671
|
elif name == "opencode_discuss":
|
|
770
1672
|
result = await bridge.send_message(
|
|
771
1673
|
message=arguments["message"],
|
|
772
|
-
files=arguments.get("files")
|
|
1674
|
+
files=arguments.get("files"),
|
|
1675
|
+
domain_override=arguments.get("domain"),
|
|
773
1676
|
)
|
|
774
1677
|
elif name == "opencode_plan":
|
|
775
1678
|
result = await bridge.plan(
|
|
@@ -790,7 +1693,10 @@ async def call_tool(name: str, arguments: dict):
|
|
|
790
1693
|
elif name == "opencode_variant":
|
|
791
1694
|
result = bridge.set_variant(arguments["variant"])
|
|
792
1695
|
elif name == "opencode_history":
|
|
793
|
-
result = bridge.get_history(
|
|
1696
|
+
result = bridge.get_history(
|
|
1697
|
+
session_id=arguments.get("session_id"),
|
|
1698
|
+
last_n=arguments.get("last_n", 20)
|
|
1699
|
+
)
|
|
794
1700
|
elif name == "opencode_sessions":
|
|
795
1701
|
result = bridge.list_sessions()
|
|
796
1702
|
elif name == "opencode_switch":
|
|
@@ -805,6 +1711,11 @@ async def call_tool(name: str, arguments: dict):
|
|
|
805
1711
|
agent=arguments.get("agent"),
|
|
806
1712
|
variant=arguments.get("variant")
|
|
807
1713
|
)
|
|
1714
|
+
elif name == "opencode_export":
|
|
1715
|
+
result = bridge.export_session(
|
|
1716
|
+
session_id=arguments.get("session_id"),
|
|
1717
|
+
format=arguments.get("format", "markdown")
|
|
1718
|
+
)
|
|
808
1719
|
elif name == "opencode_health":
|
|
809
1720
|
health = bridge.health_check()
|
|
810
1721
|
result = f"Status: {health['status']}\nSessions: {health['sessions']}\nUptime: {health['uptime']}s"
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
opencode_bridge/__init__.py,sha256=SkXVg907MuInd7UEYOjHjiiIIT46y4S2l20hE9cShKo,92
|
|
2
|
+
opencode_bridge/install.py,sha256=VOJNYUPxq88g0XizkHSQ9noM3Qcd3AfZxPUZInEKErk,1796
|
|
3
|
+
opencode_bridge/server.py,sha256=vNTN7IFIQi6IaEQ1VtBgoum7s6uPKyKDrC3_y1mbTPw,72490
|
|
4
|
+
opencode_bridge-0.2.0.dist-info/METADATA,sha256=s810ba1WjpaJ_Bt5Ag-hiLa1PY2msk5N_crtRdxffZg,3924
|
|
5
|
+
opencode_bridge-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
6
|
+
opencode_bridge-0.2.0.dist-info/entry_points.txt,sha256=8elAgeI-Sk7EPoV7kUr3CCgQyIAW2VfDj5ZXQ_9slCc,184
|
|
7
|
+
opencode_bridge-0.2.0.dist-info/RECORD,,
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
opencode_bridge/__init__.py,sha256=SkXVg907MuInd7UEYOjHjiiIIT46y4S2l20hE9cShKo,92
|
|
2
|
-
opencode_bridge/install.py,sha256=VOJNYUPxq88g0XizkHSQ9noM3Qcd3AfZxPUZInEKErk,1796
|
|
3
|
-
opencode_bridge/server.py,sha256=aufAiF37ZZY_PjA-vDGVp_1FhFDV5VWESQTH4qOChjI,29064
|
|
4
|
-
opencode_bridge-0.1.4.dist-info/METADATA,sha256=sXUUJpr2z_83exM4t0HJuCwEu4NgJTptbgsDDQx1ww8,3924
|
|
5
|
-
opencode_bridge-0.1.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
6
|
-
opencode_bridge-0.1.4.dist-info/entry_points.txt,sha256=8elAgeI-Sk7EPoV7kUr3CCgQyIAW2VfDj5ZXQ_9slCc,184
|
|
7
|
-
opencode_bridge-0.1.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|