@gopersonal/advisor 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +57 -0
  2. package/build/index.js +370 -0
  3. package/package.json +35 -0
package/README.md ADDED
@@ -0,0 +1,57 @@
1
+ # @gopersonal/advisor
2
+
3
+ An MCP server that gives AI agents access to a separate AI advisor via [OpenCode SDK](https://opencode.ai). Agents call the advisor only when they're genuinely stuck or need a second opinion — not for routine tasks.
4
+
5
+ ## Tools
6
+
7
+ - **ask_advisor** — Get help when stuck after multiple failed attempts
8
+ - **get_second_opinion** — Sanity-check a decision between approaches
9
+
10
+ ## Add to Claude Code
11
+
12
+ ```bash
13
+ claude mcp add advisor -- npx -y @gopersonal/advisor
14
+ ```
15
+
16
+ ### With a custom provider
17
+
18
+ ```bash
19
+ claude mcp add advisor \
20
+ -e ADVISOR_MODEL=anthropic/claude-sonnet-4-5 \
21
+ -e ADVISOR_API_KEY=sk-your-key \
22
+ -- npx -y @gopersonal/advisor
23
+ ```
24
+
25
+ ### With a proxy endpoint
26
+
27
+ ```bash
28
+ claude mcp add advisor \
29
+ -e ADVISOR_MODEL=anthropic/gpt-5.2-codex \
30
+ -e ADVISOR_API_KEY=none \
31
+ -e ADVISOR_BASE_URL=https://your-proxy.example.com \
32
+ -- npx -y @gopersonal/advisor
33
+ ```
34
+
35
+ ## Environment Variables
36
+
37
+ | Variable | Description | Example |
38
+ |---|---|---|
39
+ | `ADVISOR_MODEL` | Model in `provider/model` format | `anthropic/claude-sonnet-4-5` |
40
+ | `ADVISOR_API_KEY` | API key for the provider | `sk-...` |
41
+ | `ADVISOR_BASE_URL` | Custom base URL (proxies, etc.) | `https://proxy.example.com` |
42
+ | `ADVISOR_NPM` | AI SDK npm package (rare, for non-native providers) | `@ai-sdk/openai-compatible` |
43
+
44
+ If no env vars are set, the advisor connects to a running opencode instance or starts one using your default opencode config.
45
+
46
+ ## How it works
47
+
48
+ 1. Agent calls `ask_advisor` or `get_second_opinion` via MCP
49
+ 2. The server creates a temporary OpenCode session
50
+ 3. Sends the prompt asynchronously, polls for the response
51
+ 4. Auto-answers any interactive questions from the OpenCode agent
52
+ 5. Returns the advisor's response and cleans up the session
53
+
54
+ ## Requirements
55
+
56
+ - [OpenCode](https://opencode.ai) installed (`brew install sst/tap/opencode` or `npm i -g opencode`)
57
+ - A configured AI provider (Anthropic, OpenAI, MiniMax, or any of 75+ supported providers)
package/build/index.js ADDED
@@ -0,0 +1,370 @@
1
+ #!/usr/bin/env node
2
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
3
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
4
+ import { z } from "zod";
5
+ import { createOpencode, createOpencodeClient } from "@opencode-ai/sdk/v2";
6
+ // --- Environment-based configuration ---
7
+ //
8
+ // Pass these via the "env" field in the MCP config JSON.
9
+ //
10
+ // OpenCode natively supports 75+ providers (Anthropic, OpenAI, MiniMax, etc.)
11
+ // so most of the time you just need ADVISOR_MODEL and ADVISOR_API_KEY.
12
+ //
13
+ // ADVISOR_MODEL - model in "provider/model" format, e.g. "anthropic/claude-sonnet-4-5"
14
+ // or "openai/gpt-5.2-codex" or "minimax/minimax-m2.5"
15
+ // ADVISOR_API_KEY - API key for the provider (optional if already set in opencode)
16
+ // ADVISOR_BASE_URL - custom base URL override (optional, for proxies or custom endpoints)
17
+ // ADVISOR_NPM - AI SDK npm package (optional, only for non-native providers)
18
+ //
19
+ function buildOpencodeConfig() {
20
+ const model = process.env.ADVISOR_MODEL;
21
+ const apiKey = process.env.ADVISOR_API_KEY;
22
+ const baseURL = process.env.ADVISOR_BASE_URL;
23
+ const npm = process.env.ADVISOR_NPM;
24
+ const config = {
25
+ // Disable title generation (it uses small_model which may not work with custom providers)
26
+ small_model: model || undefined,
27
+ };
28
+ if (model) {
29
+ config.model = model;
30
+ console.error(`[advisor] Model: ${model}`);
31
+ }
32
+ // Extract provider id and model name (e.g. "anthropic" + "gpt-5.2-codex")
33
+ const providerId = model?.split("/")[0];
34
+ const modelId = model?.split("/").slice(1).join("/");
35
+ if (providerId && (apiKey || baseURL || npm)) {
36
+ const options = {};
37
+ if (apiKey)
38
+ options.apiKey = apiKey;
39
+ if (baseURL)
40
+ options.baseURL = baseURL;
41
+ const providerConfig = { options };
42
+ if (npm)
43
+ providerConfig.npm = npm;
44
+ // Register the model so opencode knows its capabilities
45
+ if (modelId) {
46
+ providerConfig.models = {
47
+ [modelId]: {
48
+ name: modelId,
49
+ tool_call: true,
50
+ attachment: false,
51
+ reasoning: false,
52
+ temperature: false, // many proxies don't support temperature
53
+ limit: { context: 200000, output: 16384 },
54
+ },
55
+ };
56
+ }
57
+ config.provider = { [providerId]: providerConfig };
58
+ if (baseURL)
59
+ console.error(`[advisor] Custom base URL: ${baseURL}`);
60
+ }
61
+ return config;
62
+ }
63
+ // --- OpenCode connection management ---
64
+ let opcClient = null;
65
+ let opcServer = null;
66
+ async function checkServerHealth(baseUrl) {
67
+ try {
68
+ const resp = await fetch(`${baseUrl}/project`, {
69
+ signal: AbortSignal.timeout(3000),
70
+ headers: { Accept: "application/json" },
71
+ });
72
+ if (!resp.ok)
73
+ return false;
74
+ const ct = resp.headers.get("content-type") ?? "";
75
+ return ct.includes("json");
76
+ }
77
+ catch {
78
+ return false;
79
+ }
80
+ }
81
+ async function getOpencodeClient() {
82
+ if (opcClient)
83
+ return opcClient;
84
+ // Try connecting to existing opencode servers
85
+ for (const port of [4096, 4097, 4098]) {
86
+ const baseUrl = `http://127.0.0.1:${port}`;
87
+ if (await checkServerHealth(baseUrl)) {
88
+ opcClient = createOpencodeClient({ baseUrl });
89
+ console.error(`[advisor] Connected to existing opencode server at ${baseUrl}`);
90
+ return opcClient;
91
+ }
92
+ }
93
+ // Start a new server with provider config from env vars
94
+ console.error("[advisor] No existing server found, starting new one...");
95
+ const config = buildOpencodeConfig();
96
+ try {
97
+ const { client, server } = await createOpencode({
98
+ timeout: 30000,
99
+ ...(Object.keys(config).length > 0 ? { config } : {}),
100
+ });
101
+ opcClient = client;
102
+ opcServer = server;
103
+ console.error(`[advisor] Started opencode server at ${server.url}`);
104
+ return opcClient;
105
+ }
106
+ catch (err) {
107
+ const msg = err instanceof Error ? err.message : String(err);
108
+ throw new Error(`Failed to start opencode server: ${msg}. ` +
109
+ `Make sure opencode is installed (https://opencode.ai) and configured with a provider.`);
110
+ }
111
+ }
112
+ // Model override for session prompts (from ADVISOR_MODEL env var)
113
+ function getModelOverride() {
114
+ const model = process.env.ADVISOR_MODEL;
115
+ if (!model || !model.includes("/"))
116
+ return undefined;
117
+ const [providerID, ...rest] = model.split("/");
118
+ return { providerID, modelID: rest.join("/") };
119
+ }
120
+ function sleep(ms) {
121
+ return new Promise((resolve) => setTimeout(resolve, ms));
122
+ }
123
+ async function askOpencode(prompt, systemPrompt) {
124
+ const client = await getOpencodeClient();
125
+ const sessionResult = await client.session.create({
126
+ title: "Advisor Query",
127
+ });
128
+ if (!sessionResult.data) {
129
+ throw new Error("Failed to create opencode session");
130
+ }
131
+ const sessionId = sessionResult.data.id;
132
+ const modelOverride = getModelOverride();
133
+ try {
134
+ // Send the prompt asynchronously - returns 204 immediately while session processes
135
+ try {
136
+ await client.session.promptAsync({
137
+ sessionID: sessionId,
138
+ system: systemPrompt,
139
+ ...(modelOverride ? { model: modelOverride } : {}),
140
+ parts: [{ type: "text", text: prompt }],
141
+ });
142
+ console.error(`[advisor] Prompt submitted async`);
143
+ }
144
+ catch (err) {
145
+ const msg = err instanceof Error ? err.message : String(err);
146
+ throw new Error(`Failed to submit prompt: ${msg}`);
147
+ }
148
+ // Poll for the assistant's response and auto-answer any questions
149
+ const maxWaitMs = 90_000;
150
+ const pollIntervalMs = 2000;
151
+ const startTime = Date.now();
152
+ let lastTextLength = 0;
153
+ let stableCount = 0;
154
+ const answeredQuestions = new Set();
155
+ await sleep(3000);
156
+ while (Date.now() - startTime < maxWaitMs) {
157
+ // Auto-answer any pending questions (opencode agent may ask permission)
158
+ try {
159
+ const questions = await client.question.list({});
160
+ if (questions.data && Array.isArray(questions.data)) {
161
+ for (const q of questions.data) {
162
+ if (q.sessionID === sessionId && !answeredQuestions.has(q.id)) {
163
+ console.error(`[advisor] Auto-answering question: ${q.id}`);
164
+ const answers = q.questions.map((qi) => {
165
+ // Pick the first option for each question
166
+ const opts = qi.options;
167
+ return opts && opts.length > 0 ? [opts[0].label] : ["yes"];
168
+ });
169
+ await client.question.reply({
170
+ requestID: q.id,
171
+ answers,
172
+ });
173
+ answeredQuestions.add(q.id);
174
+ }
175
+ }
176
+ }
177
+ }
178
+ catch {
179
+ // question API may not be available, ignore
180
+ }
181
+ // Check messages for the assistant's response
182
+ const messagesResult = await client.session.messages({
183
+ sessionID: sessionId,
184
+ });
185
+ const messages = messagesResult.data;
186
+ if (!messages || !Array.isArray(messages)) {
187
+ await sleep(pollIntervalMs);
188
+ continue;
189
+ }
190
+ // Find the latest assistant message
191
+ const assistantMsg = [...messages].reverse().find((m) => m.info && typeof m.info === "object" && "role" in m.info && m.info.role === "assistant");
192
+ if (!assistantMsg) {
193
+ await sleep(pollIntervalMs);
194
+ continue;
195
+ }
196
+ // Extract text parts
197
+ const textParts = [];
198
+ if (Array.isArray(assistantMsg.parts)) {
199
+ for (const part of assistantMsg.parts) {
200
+ if (part && typeof part === "object" && part.type === "text" && "text" in part) {
201
+ textParts.push(String(part.text));
202
+ }
203
+ }
204
+ }
205
+ const currentText = textParts.join("\n");
206
+ // Check if the message is marked as completed
207
+ const info = assistantMsg.info;
208
+ if (info.time && typeof info.time === "object") {
209
+ const time = info.time;
210
+ if (time.completed && currentText.length > 0) {
211
+ console.error(`[advisor] Response completed (${currentText.length} chars)`);
212
+ return currentText;
213
+ }
214
+ }
215
+ // Fallback: text stopped growing for 3 cycles
216
+ if (currentText.length > 0) {
217
+ if (currentText.length === lastTextLength) {
218
+ stableCount++;
219
+ if (stableCount >= 3) {
220
+ console.error(`[advisor] Response stable (${currentText.length} chars)`);
221
+ return currentText;
222
+ }
223
+ }
224
+ else {
225
+ stableCount = 0;
226
+ lastTextLength = currentText.length;
227
+ }
228
+ }
229
+ await sleep(pollIntervalMs);
230
+ }
231
+ throw new Error("Timed out waiting for advisor response");
232
+ }
233
+ finally {
234
+ try {
235
+ await client.session.delete({ sessionID: sessionId });
236
+ }
237
+ catch {
238
+ // ignore cleanup errors
239
+ }
240
+ }
241
+ }
242
+ // --- MCP Server setup ---
243
+ const server = new McpServer({
244
+ name: "advisor",
245
+ version: "2.0.0",
246
+ });
247
+ // Tool 1: ask_advisor
248
+ server.registerTool("ask_advisor", {
249
+ description: "Get an external opinion from a separate AI when you are STUCK. " +
250
+ "Call this ONLY when: (1) you have already tried at least 2 different approaches and they failed, " +
251
+ "OR (2) you hit an error you genuinely don't understand after investigating it. " +
252
+ "Do NOT call this for routine tasks or things you can figure out yourself. " +
253
+ "You must describe what you already tried and why it didn't work.",
254
+ inputSchema: {
255
+ problem: z
256
+ .string()
257
+ .describe("What you are trying to do and what is going wrong"),
258
+ failed_attempts: z
259
+ .string()
260
+ .describe("The specific approaches you already tried and WHY each one failed. " +
261
+ "Be honest and detailed - the advisor needs this to avoid suggesting the same things."),
262
+ error_context: z
263
+ .string()
264
+ .optional()
265
+ .describe("Error messages, stack traces, or unexpected output you're seeing"),
266
+ code_context: z
267
+ .string()
268
+ .optional()
269
+ .describe("Relevant code that's involved in the problem"),
270
+ },
271
+ }, async ({ problem, failed_attempts, error_context, code_context }) => {
272
+ const systemPrompt = `You are a senior engineer acting as an advisor to an AI coding agent that is stuck. ` +
273
+ `The agent has already tried multiple approaches and failed. Your job is to:\n` +
274
+ `1. Identify what the agent is missing or getting wrong\n` +
275
+ `2. Suggest a DIFFERENT approach the agent hasn't tried\n` +
276
+ `3. If the agent's approach was close, pinpoint the exact mistake\n` +
277
+ `Be direct and specific. No preamble. Start with the most likely fix.`;
278
+ let userPrompt = `## What I'm trying to do\n${problem}\n\n## What I already tried (and why it failed)\n${failed_attempts}`;
279
+ if (error_context) {
280
+ userPrompt += `\n\n## Errors I'm seeing\n\`\`\`\n${error_context}\n\`\`\``;
281
+ }
282
+ if (code_context) {
283
+ userPrompt += `\n\n## Relevant code\n\`\`\`\n${code_context}\n\`\`\``;
284
+ }
285
+ try {
286
+ const advice = await askOpencode(userPrompt, systemPrompt);
287
+ return { content: [{ type: "text", text: advice }] };
288
+ }
289
+ catch (error) {
290
+ const msg = error instanceof Error ? error.message : String(error);
291
+ return {
292
+ content: [{
293
+ type: "text",
294
+ text: `Advisor unavailable: ${msg}\nMake sure opencode is installed and configured with a provider.`,
295
+ }],
296
+ isError: true,
297
+ };
298
+ }
299
+ });
300
+ // Tool 2: get_second_opinion
301
+ server.registerTool("get_second_opinion", {
302
+ description: "Get a second opinion from a separate AI before committing to an approach you're unsure about. " +
303
+ "Call this ONLY when: (1) you see multiple valid approaches and aren't confident which is best, " +
304
+ "OR (2) you're about to make a significant architectural choice and want a sanity check. " +
305
+ "Do NOT call this for straightforward decisions or trivial choices.",
306
+ inputSchema: {
307
+ situation: z
308
+ .string()
309
+ .describe("What you're working on and what decision you need to make"),
310
+ option_a: z
311
+ .string()
312
+ .describe("The approach you're leaning toward and why"),
313
+ option_b: z
314
+ .string()
315
+ .optional()
316
+ .describe("An alternative approach you're also considering"),
317
+ worry: z
318
+ .string()
319
+ .optional()
320
+ .describe("What specifically makes you unsure — the risk or trade-off you're worried about"),
321
+ },
322
+ }, async ({ situation, option_a, option_b, worry }) => {
323
+ const systemPrompt = `You are a senior engineer giving a quick second opinion to an AI coding agent. ` +
324
+ `The agent is at a decision point and wants a sanity check. Be brief:\n` +
325
+ `- State which option you'd pick and why (1-2 sentences)\n` +
326
+ `- Flag any gotcha the agent might be missing\n` +
327
+ `- If both options are fine, just say so and pick one\n` +
328
+ `No long analysis. The agent just needs a nudge in the right direction.`;
329
+ let userPrompt = `## Situation\n${situation}\n\n## Option A (leaning toward)\n${option_a}`;
330
+ if (option_b) {
331
+ userPrompt += `\n\n## Option B\n${option_b}`;
332
+ }
333
+ if (worry) {
334
+ userPrompt += `\n\n## What I'm worried about\n${worry}`;
335
+ }
336
+ try {
337
+ const opinion = await askOpencode(userPrompt, systemPrompt);
338
+ return { content: [{ type: "text", text: opinion }] };
339
+ }
340
+ catch (error) {
341
+ const msg = error instanceof Error ? error.message : String(error);
342
+ return {
343
+ content: [{
344
+ type: "text",
345
+ text: `Advisor unavailable: ${msg}\nMake sure opencode is installed and configured.`,
346
+ }],
347
+ isError: true,
348
+ };
349
+ }
350
+ });
351
+ // --- Start the server ---
352
+ async function main() {
353
+ const transport = new StdioServerTransport();
354
+ await server.connect(transport);
355
+ console.error("[advisor] MCP Advisor server running on stdio");
356
+ }
357
+ main().catch((error) => {
358
+ console.error("[advisor] Fatal error:", error);
359
+ process.exit(1);
360
+ });
361
+ process.on("SIGINT", () => {
362
+ if (opcServer)
363
+ opcServer.close();
364
+ process.exit(0);
365
+ });
366
+ process.on("SIGTERM", () => {
367
+ if (opcServer)
368
+ opcServer.close();
369
+ process.exit(0);
370
+ });
package/package.json ADDED
@@ -0,0 +1,35 @@
1
+ {
2
+ "name": "@gopersonal/advisor",
3
+ "version": "1.0.0",
4
+ "type": "module",
5
+ "main": "./build/index.js",
6
+ "bin": {
7
+ "advisor-mcp": "./build/index.js"
8
+ },
9
+ "files": [
10
+ "build",
11
+ "README.md"
12
+ ],
13
+ "scripts": {
14
+ "build": "tsc",
15
+ "prepublishOnly": "npm run build",
16
+ "start": "node build/index.js"
17
+ },
18
+ "keywords": ["mcp", "advisor", "opencode", "ai", "model-context-protocol"],
19
+ "author": "gopersonal",
20
+ "license": "ISC",
21
+ "description": "MCP server that gives AI agents a second opinion via OpenCode SDK",
22
+ "repository": {
23
+ "type": "git",
24
+ "url": "https://github.com/gopersonal/calvincode-mcps"
25
+ },
26
+ "dependencies": {
27
+ "@modelcontextprotocol/sdk": "^1.26.0",
28
+ "@opencode-ai/sdk": "^1.2.5",
29
+ "zod": "^3.25.76"
30
+ },
31
+ "devDependencies": {
32
+ "@types/node": "^25.2.3",
33
+ "typescript": "^5.9.3"
34
+ }
35
+ }