opencode-bridge 0.1.5__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opencode-bridge
3
- Version: 0.1.5
3
+ Version: 0.3.0
4
4
  Summary: MCP server for continuous OpenCode discussion sessions
5
5
  Project-URL: Repository, https://github.com/genomewalker/opencode-bridge
6
6
  Author: Antonio Fernandez-Guerra
@@ -30,6 +30,280 @@ from mcp.server.stdio import stdio_server
30
30
  from mcp.types import Tool, TextContent, ServerCapabilities, ToolsCapability
31
31
 
32
32
 
33
+ # File size thresholds
34
+ SMALL_FILE = 500 # lines
35
+ MEDIUM_FILE = 1500 # lines
36
+ LARGE_FILE = 5000 # lines
37
+
38
+ # Language detection by extension
39
+ LANG_MAP = {
40
+ ".py": "Python", ".js": "JavaScript", ".ts": "TypeScript", ".tsx": "TypeScript/React",
41
+ ".jsx": "JavaScript/React", ".go": "Go", ".rs": "Rust", ".java": "Java",
42
+ ".c": "C", ".cpp": "C++", ".h": "C/C++ Header", ".hpp": "C++ Header",
43
+ ".cs": "C#", ".rb": "Ruby", ".php": "PHP", ".swift": "Swift",
44
+ ".kt": "Kotlin", ".scala": "Scala", ".sh": "Shell", ".bash": "Bash",
45
+ ".sql": "SQL", ".html": "HTML", ".css": "CSS", ".scss": "SCSS",
46
+ ".yaml": "YAML", ".yml": "YAML", ".json": "JSON", ".toml": "TOML",
47
+ ".xml": "XML", ".md": "Markdown", ".r": "R", ".lua": "Lua",
48
+ ".zig": "Zig", ".nim": "Nim", ".ex": "Elixir", ".erl": "Erlang",
49
+ ".clj": "Clojure", ".hs": "Haskell", ".ml": "OCaml", ".vue": "Vue",
50
+ ".svelte": "Svelte", ".dart": "Dart", ".proto": "Protocol Buffers",
51
+ }
52
+
53
+
54
+ _file_info_cache: dict[str, dict] = {}
55
+
56
+ MAX_READ_SIZE = 10 * 1024 * 1024 # 10MB - above this, estimate lines from size
57
+
58
+
59
+ def get_file_info(filepath: str) -> dict:
60
+ """Get metadata about a file: size, lines, language, etc. Results are cached per path."""
61
+ filepath = str(Path(filepath).resolve())
62
+ if filepath in _file_info_cache:
63
+ return _file_info_cache[filepath]
64
+
65
+ p = Path(filepath)
66
+ if not p.is_file():
67
+ return {}
68
+ try:
69
+ stat = p.stat()
70
+ ext = p.suffix.lower()
71
+
72
+ # Count lines efficiently: stream for large files, estimate for huge ones
73
+ if stat.st_size > MAX_READ_SIZE:
74
+ # Estimate: ~40 bytes per line for code files
75
+ line_count = stat.st_size // 40
76
+ else:
77
+ # Stream line counting without loading full content into memory
78
+ line_count = 0
79
+ with open(p, "r", errors="replace") as f:
80
+ for _ in f:
81
+ line_count += 1
82
+
83
+ result = {
84
+ "path": filepath,
85
+ "name": p.name,
86
+ "size_bytes": stat.st_size,
87
+ "size_human": _human_size(stat.st_size),
88
+ "lines": line_count,
89
+ "language": LANG_MAP.get(ext, ext.lstrip(".").upper() if ext else "Unknown"),
90
+ "ext": ext,
91
+ "category": (
92
+ "small" if line_count <= SMALL_FILE
93
+ else "medium" if line_count <= MEDIUM_FILE
94
+ else "large" if line_count <= LARGE_FILE
95
+ else "very large"
96
+ ),
97
+ }
98
+ _file_info_cache[filepath] = result
99
+ return result
100
+ except Exception:
101
+ return {"path": filepath, "name": p.name}
102
+
103
+
104
+ def _human_size(size_bytes: int) -> str:
105
+ """Convert bytes to human-readable size."""
106
+ for unit in ("B", "KB", "MB", "GB"):
107
+ if size_bytes < 1024:
108
+ return f"{size_bytes:.0f}{unit}" if unit == "B" else f"{size_bytes:.1f}{unit}"
109
+ size_bytes /= 1024
110
+ return f"{size_bytes:.1f}TB"
111
+
112
+
113
+ def build_file_context(file_paths: list[str]) -> str:
114
+ """Build a context block describing attached files."""
115
+ if not file_paths:
116
+ return ""
117
+ infos = [info for f in file_paths if (info := get_file_info(f))]
118
+ if not infos:
119
+ return ""
120
+
121
+ parts = ["## Attached Files\n"]
122
+ for info in infos:
123
+ line = f"- **{info.get('name', '?')}**"
124
+ details = []
125
+ if "language" in info:
126
+ details.append(info["language"])
127
+ if "lines" in info:
128
+ details.append(f"{info['lines']} lines")
129
+ if "size_human" in info:
130
+ details.append(info["size_human"])
131
+ if "category" in info:
132
+ details.append(info["category"])
133
+ if details:
134
+ line += f" ({', '.join(details)})"
135
+ parts.append(line)
136
+
137
+ total_lines = sum(i.get("lines", 0) for i in infos)
138
+ if total_lines > LARGE_FILE:
139
+ parts.append(f"\n> Total: {total_lines} lines across {len(infos)} file(s) — this is a large review.")
140
+ parts.append("> Focus on the most critical issues first. Use a structured, section-by-section approach.")
141
+
142
+ return "\n".join(parts)
143
+
144
+
145
+ def build_review_prompt(file_infos: list[dict], focus: str) -> str:
146
+ """Build an adaptive review prompt based on file size and type."""
147
+ total_lines = sum(i.get("lines", 0) for i in file_infos)
148
+
149
+ # Base review instructions
150
+ prompt_parts = [f"Please review the attached code, focusing on: **{focus}**\n"]
151
+
152
+ # Add file context
153
+ if file_infos:
154
+ prompt_parts.append("### Files to review:")
155
+ for info in file_infos:
156
+ prompt_parts.append(f"- {info.get('name', '?')} ({info.get('language', '?')}, {info.get('lines', '?')} lines)")
157
+ prompt_parts.append("")
158
+
159
+ # Adapt strategy to file size
160
+ if total_lines > LARGE_FILE:
161
+ prompt_parts.append("""### Review Strategy (Large File)
162
+ This is a large codebase review. Use this structured approach:
163
+
164
+ 1. **Architecture Overview**: Describe the overall structure, main components, and data flow
165
+ 2. **Critical Issues**: Security vulnerabilities, bugs, race conditions, memory leaks
166
+ 3. **Design Concerns**: Architectural problems, tight coupling, missing abstractions
167
+ 4. **Code Quality**: Naming, duplication, complexity hotspots (focus on the worst areas)
168
+ 5. **Key Recommendations**: Top 5 most impactful improvements, prioritized
169
+
170
+ Do NOT try to comment on every line. Focus on patterns and the most impactful findings.""")
171
+ elif total_lines > MEDIUM_FILE:
172
+ prompt_parts.append("""### Review Strategy (Medium File)
173
+ Provide a structured review:
174
+
175
+ 1. **Summary**: What does this code do? Overall assessment
176
+ 2. **Issues Found**: Bugs, security concerns, edge cases, error handling gaps
177
+ 3. **Design Feedback**: Structure, patterns, abstractions
178
+ 4. **Specific Suggestions**: Concrete improvements with code examples where helpful""")
179
+ else:
180
+ prompt_parts.append("""### Review Guidelines
181
+ Provide a thorough review covering:
182
+ - Correctness and edge cases
183
+ - Error handling
184
+ - Code clarity and naming
185
+ - Any security concerns
186
+ - Concrete suggestions for improvement""")
187
+
188
+ return "\n".join(prompt_parts)
189
+
190
+
191
+ def build_message_prompt(message: str, file_paths: list[str]) -> str:
192
+ """Build a smart prompt that includes file context and instructions."""
193
+ parts = []
194
+
195
+ # Add file context if files are attached
196
+ user_files = [f for f in file_paths if not Path(f).name.startswith("opencode_msg_")]
197
+ if user_files:
198
+ file_context = build_file_context(user_files)
199
+ if file_context:
200
+ parts.append(file_context)
201
+ parts.append("")
202
+
203
+ total_lines = sum(get_file_info(f).get("lines", 0) for f in user_files)
204
+ if total_lines > LARGE_FILE:
205
+ parts.append("**Note:** Large file(s) attached. Read through the full content carefully before responding. "
206
+ "If asked to analyze or review, use a structured section-by-section approach.")
207
+ parts.append("")
208
+
209
+ parts.append("## Request")
210
+ parts.append("Respond to the user's request in the attached message file. "
211
+ "Read all attached files completely before responding.")
212
+
213
+ return "\n".join(parts)
214
+
215
+
216
+ # ---------------------------------------------------------------------------
217
+ # Companion System — Auto-Framing
218
+ # ---------------------------------------------------------------------------
219
+
220
+
221
+ def build_companion_prompt(
222
+ message: str,
223
+ files: Optional[list[str]] = None,
224
+ domain_override: Optional[str] = None,
225
+ is_followup: bool = False,
226
+ ) -> str:
227
+ """Assemble a companion prompt that auto-detects the domain.
228
+
229
+ The LLM identifies the domain and adopts an appropriate expert persona.
230
+ An optional *domain_override* hint biases the framing toward a specific field.
231
+ """
232
+ # Follow-up: lightweight prompt
233
+ if is_followup:
234
+ return "\n".join([
235
+ "## Continuing Our Discussion",
236
+ "",
237
+ message,
238
+ "",
239
+ "Remember: challenge assumptions, consider alternatives, be explicit about trade-offs.",
240
+ ])
241
+
242
+ # --- Full initial prompt ---
243
+ parts = []
244
+
245
+ # File context
246
+ user_files = [f for f in (files or []) if not Path(f).name.startswith("opencode_msg_")]
247
+ if user_files:
248
+ file_context = build_file_context(user_files)
249
+ if file_context:
250
+ parts.append("## Context")
251
+ parts.append(file_context)
252
+ parts.append("")
253
+
254
+ # Domain hint
255
+ domain_hint = ""
256
+ if domain_override:
257
+ domain_hint = (
258
+ f"\n\nNote: the user has indicated this is about **{domain_override}** — "
259
+ "frame your expertise accordingly."
260
+ )
261
+
262
+ parts.append("## Discussion Setup")
263
+ parts.append(
264
+ "Determine the **specific domain of expertise** this question belongs to "
265
+ "(e.g., distributed systems, metagenomics, compiler design, quantitative finance, "
266
+ "DevOps, security, database design, or any other field).\n"
267
+ "\n"
268
+ "Then adopt the persona of a **senior practitioner with deep, hands-on "
269
+ "experience** in that domain. You have:\n"
270
+ "- Years of practical experience solving real problems in this field\n"
271
+ "- Deep knowledge of the key frameworks, methods, and trade-offs\n"
272
+ "- Strong opinions loosely held — you recommend but explain why\n"
273
+ "\n"
274
+ "Briefly state what domain you identified and what expert lens you're "
275
+ f"applying (one line at the top is enough).{domain_hint}"
276
+ )
277
+ parts.append("")
278
+
279
+ parts.append("## Collaborative Ground Rules")
280
+ parts.append("- Think out loud, share your reasoning step by step")
281
+ parts.append("- Challenge questionable assumptions — including mine")
282
+ parts.append("- Lay out trade-offs explicitly: what we gain, what we lose")
283
+ parts.append("- Name the key analytical frameworks or methods relevant to this domain")
284
+ parts.append("- Propose at least one alternative I haven't considered")
285
+ parts.append("")
286
+
287
+ parts.append("## Your Approach")
288
+ parts.append("1. Identify the domain and the core question")
289
+ parts.append("2. Apply domain-specific frameworks and best practices")
290
+ parts.append("3. Analyze trade-offs with concrete reasoning")
291
+ parts.append("4. Provide a clear recommendation")
292
+ parts.append("")
293
+
294
+ parts.append("## The Question")
295
+ parts.append(message)
296
+ parts.append("")
297
+
298
+ parts.append("## Synthesize")
299
+ parts.append("1. Your recommendation with rationale")
300
+ parts.append("2. Key trade-offs")
301
+ parts.append("3. Risks or blind spots")
302
+ parts.append("4. Open questions worth exploring")
303
+
304
+ return "\n".join(parts)
305
+
306
+
33
307
  # Default configuration
34
308
  DEFAULT_MODEL = "openai/gpt-5.2-codex"
35
309
  DEFAULT_AGENT = "plan"
@@ -163,6 +437,10 @@ class OpenCodeBridge:
163
437
 
164
438
  async def _run_opencode(self, *args, timeout: int = 300) -> tuple[str, int]:
165
439
  """Run opencode CLI command and return output (async)."""
440
+ global OPENCODE_BIN
441
+ # Lazy retry: if binary wasn't found at startup, try again
442
+ if not OPENCODE_BIN:
443
+ OPENCODE_BIN = find_opencode()
166
444
  if not OPENCODE_BIN:
167
445
  return "OpenCode not installed. Install from: https://opencode.ai", 1
168
446
 
@@ -177,12 +455,18 @@ class OpenCodeBridge:
177
455
  proc.communicate(input=b''),
178
456
  timeout=timeout
179
457
  )
180
- output = stdout.decode() or stderr.decode()
181
- return output.strip(), proc.returncode or 0
458
+ # Combine stdout+stderr so errors aren't silently lost
459
+ out = stdout.decode(errors="replace").strip()
460
+ err = stderr.decode(errors="replace").strip()
461
+ output = out if out else err
462
+ # If both exist and return code indicates error, include stderr
463
+ if out and err and proc.returncode:
464
+ output = f"{out}\n\nStderr:\n{err}"
465
+ return output, proc.returncode or 0
182
466
  except asyncio.TimeoutError:
183
467
  proc.kill()
184
468
  await proc.wait()
185
- return "Command timed out", 1
469
+ return f"Command timed out after {timeout}s", 1
186
470
  except Exception as e:
187
471
  return f"Error: {e}", 1
188
472
 
@@ -294,7 +578,9 @@ Set via:
294
578
  self,
295
579
  message: str,
296
580
  session_id: Optional[str] = None,
297
- files: Optional[list[str]] = None
581
+ files: Optional[list[str]] = None,
582
+ domain_override: Optional[str] = None,
583
+ _raw: bool = False,
298
584
  ) -> str:
299
585
  sid = session_id or self.active_session
300
586
  if not sid or sid not in self.sessions:
@@ -302,6 +588,8 @@ Set via:
302
588
 
303
589
  session = self.sessions[sid]
304
590
  session.add_message("user", message)
591
+ # Save immediately so user messages aren't lost if OpenCode fails
592
+ session.save(self.sessions_dir / f"{sid}.json")
305
593
 
306
594
  # Always write message to temp file to avoid shell escaping issues
307
595
  temp_file = tempfile.NamedTemporaryFile(
@@ -309,9 +597,20 @@ Set via:
309
597
  )
310
598
  temp_file.write(message)
311
599
  temp_file.close()
312
- args = ["run", "Respond to the request in the attached message file."]
313
600
  files = (files or []) + [temp_file.name]
314
601
 
602
+ # Build prompt: companion system unless _raw is set
603
+ if _raw:
604
+ run_prompt = build_message_prompt(message, files)
605
+ else:
606
+ is_followup = len(session.messages) > 1
607
+ run_prompt = build_companion_prompt(
608
+ message, files, domain_override=domain_override,
609
+ is_followup=is_followup,
610
+ )
611
+
612
+ args = ["run", run_prompt]
613
+
315
614
  args.extend(["--model", session.model])
316
615
  args.extend(["--agent", session.agent])
317
616
 
@@ -331,7 +630,13 @@ Set via:
331
630
  # Use JSON format to get session ID
332
631
  args.extend(["--format", "json"])
333
632
 
334
- output, code = await self._run_opencode(*args)
633
+ # Scale timeout based on attached file size
634
+ user_files = [f for f in files if not Path(f).name.startswith("opencode_msg_")]
635
+ total_lines = sum(get_file_info(f).get("lines", 0) for f in user_files)
636
+ # Base 300s, +60s per 1000 lines above threshold, capped at 900s
637
+ timeout = min(900, 300 + max(0, (total_lines - MEDIUM_FILE) * 60 // 1000))
638
+
639
+ output, code = await self._run_opencode(*args, timeout=timeout)
335
640
 
336
641
  # Cleanup temp file
337
642
  if temp_file:
@@ -396,22 +701,14 @@ Set via:
396
701
  topic: str,
397
702
  session_id: Optional[str] = None
398
703
  ) -> str:
399
- """Open-ended brainstorming discussion."""
704
+ """Open-ended brainstorming discussion — routes through companion system."""
400
705
  sid = session_id or self.active_session
401
706
 
402
707
  if not sid or sid not in self.sessions:
403
708
  sid = f"brainstorm-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
404
709
  await self.start_session(sid, agent="build")
405
710
 
406
- prompt = f"""Let's brainstorm about: {topic}
407
-
408
- Please provide:
409
- 1. Key considerations and trade-offs
410
- 2. Multiple approaches or solutions
411
- 3. Pros and cons of each approach
412
- 4. Your recommended approach and why"""
413
-
414
- return await self.send_message(prompt, sid)
711
+ return await self.send_message(f"Let's brainstorm about: {topic}", sid)
415
712
 
416
713
  async def review_code(
417
714
  self,
@@ -426,19 +723,43 @@ Please provide:
426
723
  sid = f"review-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
427
724
  await self.start_session(sid, agent="build")
428
725
 
429
- # Check if it's a file path
726
+ # Check if it's a file path (could be multiple, comma or space separated)
430
727
  files = None
431
- if Path(code_or_file).is_file():
432
- files = [code_or_file]
433
- prompt = f"Please review the attached code file, focusing on: {focus}"
728
+ file_paths = []
729
+
730
+ # Try splitting by comma first, then check each part
731
+ candidates = [c.strip() for c in code_or_file.replace(",", " ").split() if c.strip()]
732
+ for candidate in candidates:
733
+ if Path(candidate).is_file():
734
+ file_paths.append(candidate)
735
+
736
+ if file_paths:
737
+ files = file_paths
738
+ file_infos = [get_file_info(f) for f in file_paths]
739
+ file_infos = [i for i in file_infos if i]
740
+ prompt = build_review_prompt(file_infos, focus)
741
+
742
+ # Increase timeout for large files
743
+ total_lines = sum(i.get("lines", 0) for i in file_infos)
744
+ if total_lines > LARGE_FILE:
745
+ # Use variant=high for large reviews if not already high+
746
+ session = self.sessions[sid]
747
+ if session.variant in ("minimal", "low", "medium"):
748
+ prompt += "\n\n> *Auto-escalated to thorough review due to file size.*"
434
749
  else:
435
- prompt = f"""Please review this code, focusing on: {focus}
750
+ # Inline code snippet
751
+ prompt = f"""Please review this code, focusing on: **{focus}**
436
752
 
437
753
  ```
438
754
  {code_or_file}
439
- ```"""
755
+ ```
756
+
757
+ Provide:
758
+ - Issues found (bugs, edge cases, security)
759
+ - Design feedback
760
+ - Concrete improvement suggestions"""
440
761
 
441
- return await self.send_message(prompt, sid, files)
762
+ return await self.send_message(prompt, sid, files, _raw=True)
442
763
 
443
764
  def list_sessions(self) -> str:
444
765
  if not self.sessions:
@@ -527,6 +848,44 @@ Please provide:
527
848
 
528
849
  return f"Session '{sid}' ended."
529
850
 
851
+ def export_session(self, session_id: Optional[str] = None, format: str = "markdown") -> str:
852
+ """Export a session as markdown or JSON."""
853
+ sid = session_id or self.active_session
854
+ if not sid or sid not in self.sessions:
855
+ return "No active session to export."
856
+
857
+ session = self.sessions[sid]
858
+
859
+ if format == "json":
860
+ data = {
861
+ "id": session.id,
862
+ "model": session.model,
863
+ "agent": session.agent,
864
+ "variant": session.variant,
865
+ "created": session.created,
866
+ "messages": [asdict(m) for m in session.messages]
867
+ }
868
+ return json.dumps(data, indent=2)
869
+
870
+ # Markdown format
871
+ lines = [
872
+ f"# Session: {session.id}",
873
+ f"**Model:** {session.model} | **Agent:** {session.agent} | **Variant:** {session.variant}",
874
+ f"**Created:** {session.created}",
875
+ f"**Messages:** {len(session.messages)}",
876
+ "",
877
+ "---",
878
+ "",
879
+ ]
880
+ for msg in session.messages:
881
+ role = "User" if msg.role == "user" else "OpenCode"
882
+ lines.append(f"## {role}")
883
+ lines.append(f"*{msg.timestamp}*\n")
884
+ lines.append(msg.content)
885
+ lines.append("\n---\n")
886
+
887
+ return "\n".join(lines)
888
+
530
889
  def health_check(self) -> dict:
531
890
  """Return server health status."""
532
891
  uptime_seconds = int((datetime.now() - self.start_time).total_seconds())
@@ -591,7 +950,9 @@ async def list_tools():
591
950
  ),
592
951
  Tool(
593
952
  name="opencode_discuss",
594
- description="Send a message to OpenCode. Use for code review, architecture, brainstorming.",
953
+ description="Send a message to OpenCode. Use for code review, architecture, brainstorming. "
954
+ "Auto-detects discussion domain and frames OpenCode as a specialized expert. "
955
+ "Use 'domain' to override detection.",
595
956
  inputSchema={
596
957
  "type": "object",
597
958
  "properties": {
@@ -603,6 +964,10 @@ async def list_tools():
603
964
  "type": "array",
604
965
  "items": {"type": "string"},
605
966
  "description": "File paths to attach for context"
967
+ },
968
+ "domain": {
969
+ "type": "string",
970
+ "description": "Hint the domain of expertise (e.g., 'security', 'metagenomics', 'quantitative finance')"
606
971
  }
607
972
  },
608
973
  "required": ["message"]
@@ -643,13 +1008,13 @@ async def list_tools():
643
1008
  ),
644
1009
  Tool(
645
1010
  name="opencode_review",
646
- description="Review code for issues and improvements",
1011
+ description="Review code for issues and improvements. Supports large files with adaptive review strategies. Can accept multiple file paths (space or comma separated).",
647
1012
  inputSchema={
648
1013
  "type": "object",
649
1014
  "properties": {
650
1015
  "code_or_file": {
651
1016
  "type": "string",
652
- "description": "Code snippet or file path"
1017
+ "description": "Code snippet, file path, or multiple file paths (space/comma separated)"
653
1018
  },
654
1019
  "focus": {
655
1020
  "type": "string",
@@ -698,6 +1063,7 @@ async def list_tools():
698
1063
  inputSchema={
699
1064
  "type": "object",
700
1065
  "properties": {
1066
+ "session_id": {"type": "string", "description": "Session ID (default: active session)"},
701
1067
  "last_n": {"type": "integer", "description": "Number of messages (default: 20)"}
702
1068
  }
703
1069
  }
@@ -740,6 +1106,17 @@ async def list_tools():
740
1106
  }
741
1107
  }
742
1108
  ),
1109
+ Tool(
1110
+ name="opencode_export",
1111
+ description="Export a session transcript as markdown or JSON",
1112
+ inputSchema={
1113
+ "type": "object",
1114
+ "properties": {
1115
+ "session_id": {"type": "string", "description": "Session to export (default: active)"},
1116
+ "format": {"type": "string", "description": "Export format: markdown or json (default: markdown)", "enum": ["markdown", "json"]}
1117
+ }
1118
+ }
1119
+ ),
743
1120
  Tool(
744
1121
  name="opencode_health",
745
1122
  description="Health check: returns server status, session count, and uptime",
@@ -765,7 +1142,8 @@ async def call_tool(name: str, arguments: dict):
765
1142
  elif name == "opencode_discuss":
766
1143
  result = await bridge.send_message(
767
1144
  message=arguments["message"],
768
- files=arguments.get("files")
1145
+ files=arguments.get("files"),
1146
+ domain_override=arguments.get("domain"),
769
1147
  )
770
1148
  elif name == "opencode_plan":
771
1149
  result = await bridge.plan(
@@ -786,7 +1164,10 @@ async def call_tool(name: str, arguments: dict):
786
1164
  elif name == "opencode_variant":
787
1165
  result = bridge.set_variant(arguments["variant"])
788
1166
  elif name == "opencode_history":
789
- result = bridge.get_history(last_n=arguments.get("last_n", 20))
1167
+ result = bridge.get_history(
1168
+ session_id=arguments.get("session_id"),
1169
+ last_n=arguments.get("last_n", 20)
1170
+ )
790
1171
  elif name == "opencode_sessions":
791
1172
  result = bridge.list_sessions()
792
1173
  elif name == "opencode_switch":
@@ -801,6 +1182,11 @@ async def call_tool(name: str, arguments: dict):
801
1182
  agent=arguments.get("agent"),
802
1183
  variant=arguments.get("variant")
803
1184
  )
1185
+ elif name == "opencode_export":
1186
+ result = bridge.export_session(
1187
+ session_id=arguments.get("session_id"),
1188
+ format=arguments.get("format", "markdown")
1189
+ )
804
1190
  elif name == "opencode_health":
805
1191
  health = bridge.health_check()
806
1192
  result = f"Status: {health['status']}\nSessions: {health['sessions']}\nUptime: {health['uptime']}s"
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "opencode-bridge"
7
- version = "0.1.5"
7
+ version = "0.3.0"
8
8
  description = "MCP server for continuous OpenCode discussion sessions"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -1,6 +1,6 @@
1
1
  # OpenCode Discussion
2
2
 
3
- Collaborative discussion with OpenCode models (GPT-5, Claude, Gemini). Sessions persist across messages.
3
+ Collaborative discussion with OpenCode models (GPT-5, Claude, Gemini). Sessions persist across messages. Auto-detects discussion domains and frames OpenCode as a specialized expert.
4
4
 
5
5
  ## Usage
6
6
 
@@ -14,7 +14,7 @@ Collaborative discussion with OpenCode models (GPT-5, Claude, Gemini). Sessions
14
14
  |---------|-------------|
15
15
  | `/opencode` | Start/continue session |
16
16
  | `/opencode plan <task>` | Plan with plan agent |
17
- | `/opencode ask <question>` | Ask anything |
17
+ | `/opencode ask <question>` | Ask anything (auto-detects domain) |
18
18
  | `/opencode review <file>` | Review code |
19
19
  | `/opencode models` | List models |
20
20
  | `/opencode model <name>` | Switch model |
@@ -24,6 +24,21 @@ Collaborative discussion with OpenCode models (GPT-5, Claude, Gemini). Sessions
24
24
  | `/opencode set agent <name>` | Set default agent |
25
25
  | `/opencode end` | End session |
26
26
 
27
+ ## Auto-Framing Companion
28
+
29
+ When you send a message via `opencode_discuss`, the system automatically frames OpenCode as a domain expert. Rather than relying on hardcoded domain lists, the companion prompt instructs the LLM to:
30
+
31
+ 1. **Self-identify the domain** from the question content
32
+ 2. **Adopt a senior practitioner persona** with deep, hands-on experience
33
+ 3. **Apply relevant analytical frameworks** for that domain
34
+ 4. **Engage collaboratively** — challenging assumptions, proposing alternatives, laying out trade-offs
35
+
36
+ This works for any domain — software architecture, metagenomics, quantitative finance, linguistics, or anything else.
37
+
38
+ Optionally provide a `domain` hint to steer the framing: `opencode_discuss(message="...", domain="security")`.
39
+
40
+ Follow-up messages in an existing session get a lighter prompt that preserves the collaborative framing without repeating the full setup.
41
+
27
42
  ## Instructions
28
43
 
29
44
  ### Starting a Session
@@ -44,12 +59,16 @@ When user says `/opencode ask <question>`:
44
59
  1. Call `opencode_discuss(message=<question>)`
45
60
  2. Relay the response
46
61
 
62
+ To hint a specific domain: `opencode_discuss(message=<question>, domain="security")`
63
+
47
64
  ### Code Review
48
65
 
49
66
  When user says `/opencode review <file>`:
50
67
  1. Call `opencode_review(code_or_file=<file>)`
51
68
  2. Relay the findings
52
69
 
70
+ Note: Code review bypasses the companion system and uses specialized review prompts.
71
+
53
72
  ### Configuration
54
73
 
55
74
  When user says `/opencode config`:
@@ -71,7 +90,7 @@ After initial connection, messages like these should be sent as follow-ups:
71
90
  - "how would you implement..."
72
91
  - "can you explain..."
73
92
 
74
- Call `opencode_discuss(message=<user message>)` and relay response.
93
+ Call `opencode_discuss(message=<user message>)` and relay response. Follow-ups automatically get a lighter prompt.
75
94
 
76
95
  ### Session Management
77
96
 
@@ -86,17 +105,12 @@ Call `opencode_discuss(message=<user message>)` and relay response.
86
105
  User: /opencode
87
106
  Claude: Connected to OpenCode (openai/gpt-5.2-codex, plan agent). Ready.
88
107
 
89
- User: Let's plan an RLM-inspired hierarchical retrieval system
90
- Claude: [calls opencode_plan, relays response]
91
-
92
- User: What about the filtering stage?
93
- Claude: [calls opencode_discuss, relays response]
94
-
95
- User: /opencode model github-copilot/claude-opus-4.5
96
- Claude: Model changed to github-copilot/claude-opus-4.5
108
+ User: Should we use event sourcing for our order system?
109
+ Claude: [calls opencode_discuss]
110
+ [OpenCode responds as a distributed systems architect]
97
111
 
98
- User: /opencode set model openai/gpt-5.2-codex
99
- Claude: Default model set to openai/gpt-5.2-codex (persisted)
112
+ User: What about the security implications?
113
+ Claude: [calls opencode_discuss follow-up, lighter prompt]
100
114
 
101
115
  User: /opencode end
102
116
  Claude: Session ended.
File without changes
@@ -0,0 +1,114 @@
1
+ """Tests for the auto-framing companion prompt system."""
2
+
3
+ from opencode_bridge.server import build_companion_prompt
4
+
5
+
6
+ # ---------------------------------------------------------------------------
7
+ # Initial prompt structure
8
+ # ---------------------------------------------------------------------------
9
+
10
+ class TestInitialPrompt:
11
+ def test_has_all_sections(self):
12
+ prompt = build_companion_prompt("How should we handle auth?")
13
+ assert "## Discussion Setup" in prompt
14
+ assert "## Collaborative Ground Rules" in prompt
15
+ assert "## Your Approach" in prompt
16
+ assert "## The Question" in prompt
17
+ assert "## Synthesize" in prompt
18
+
19
+ def test_contains_message(self):
20
+ msg = "Should we use event sourcing for orders?"
21
+ prompt = build_companion_prompt(msg)
22
+ assert msg in prompt
23
+
24
+ def test_instructs_domain_identification(self):
25
+ prompt = build_companion_prompt("How do we price a barrier option?")
26
+ assert "specific domain of expertise" in prompt
27
+ assert "senior practitioner" in prompt
28
+
29
+ def test_instructs_trade_off_analysis(self):
30
+ prompt = build_companion_prompt("Should we use Redis or Memcached?")
31
+ assert "trade-offs" in prompt.lower()
32
+ assert "challenge" in prompt.lower()
33
+
34
+ def test_works_for_software_topics(self):
35
+ prompt = build_companion_prompt("Should we use microservices or a monolith?")
36
+ assert "## The Question" in prompt
37
+ assert "microservices" in prompt
38
+
39
+ def test_works_for_science_topics(self):
40
+ prompt = build_companion_prompt(
41
+ "Should we use co-assembly or per-sample binning for ancient DNA metagenomes?"
42
+ )
43
+ assert "## The Question" in prompt
44
+ assert "co-assembly" in prompt
45
+
46
+ def test_works_for_finance_topics(self):
47
+ prompt = build_companion_prompt(
48
+ "How should we price a European barrier option with jump diffusion?"
49
+ )
50
+ assert "## The Question" in prompt
51
+ assert "barrier option" in prompt
52
+
53
+
54
+ # ---------------------------------------------------------------------------
55
+ # Domain override hint
56
+ # ---------------------------------------------------------------------------
57
+
58
+ class TestDomainOverride:
59
+ def test_override_included_in_prompt(self):
60
+ prompt = build_companion_prompt("Tell me about caching", domain_override="security")
61
+ assert "security" in prompt.lower()
62
+
63
+ def test_override_free_form(self):
64
+ prompt = build_companion_prompt(
65
+ "How do we handle this?", domain_override="metagenomics"
66
+ )
67
+ assert "metagenomics" in prompt
68
+
69
+ def test_no_override_no_hint(self):
70
+ prompt = build_companion_prompt("Tell me about caching")
71
+ assert "user has indicated" not in prompt
72
+
73
+
74
+ # ---------------------------------------------------------------------------
75
+ # Follow-up prompts
76
+ # ---------------------------------------------------------------------------
77
+
78
+ class TestFollowup:
79
+ def test_followup_is_lightweight(self):
80
+ full = build_companion_prompt("How should we handle auth?")
81
+ followup = build_companion_prompt("What about JWT?", is_followup=True)
82
+ assert "Continuing Our Discussion" in followup
83
+ assert len(followup) < len(full)
84
+
85
+ def test_followup_does_not_have_full_sections(self):
86
+ followup = build_companion_prompt("What about JWT?", is_followup=True)
87
+ assert "## Discussion Setup" not in followup
88
+ assert "## Your Approach" not in followup
89
+
90
+ def test_followup_contains_message(self):
91
+ msg = "What about JWT vs sessions?"
92
+ followup = build_companion_prompt(msg, is_followup=True)
93
+ assert msg in followup
94
+
95
+ def test_followup_has_collaborative_reminder(self):
96
+ followup = build_companion_prompt("What next?", is_followup=True)
97
+ assert "challenge assumptions" in followup
98
+
99
+
100
+ # ---------------------------------------------------------------------------
101
+ # File context
102
+ # ---------------------------------------------------------------------------
103
+
104
+ class TestFileContext:
105
+ def test_no_crash_with_files(self):
106
+ prompt = build_companion_prompt("Review this", files=["/tmp/test.py"])
107
+ assert "## The Question" in prompt
108
+
109
+ def test_temp_files_excluded_from_context(self):
110
+ prompt = build_companion_prompt(
111
+ "Review this", files=["/tmp/opencode_msg_abc.md"]
112
+ )
113
+ # Temp message files should not appear in file context
114
+ assert "opencode_msg" not in prompt.split("## The Question")[0]
File without changes