consult-llm-mcp 1.0.3 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,12 +1,14 @@
1
1
  # Consult LLM MCP
2
2
 
3
- An MCP server that lets Claude Code consult stronger AI models (o3, Gemini 2.5 Pro, DeepSeek Reasoner) when you need deeper analysis on complex problems.
3
+ An MCP server that lets Claude Code consult stronger AI models (o3, Gemini 2.5
4
+ Pro, DeepSeek Reasoner) when you need deeper analysis on complex problems.
4
5
 
5
6
  ## Features
6
7
 
7
8
  - Query powerful AI models (o3, Gemini 2.5 Pro, DeepSeek Reasoner) with file
8
9
  context
9
- - Automatic prompt construction from markdown and code files
10
+ - Direct prompt support for simple questions or automatic prompt construction
11
+ from markdown and code files
10
12
  - Git diff to feed code changes
11
13
  - Usage tracking with cost estimation
12
14
  - Comprehensive logging
@@ -100,11 +102,16 @@ models complex questions.
100
102
 
101
103
  ### Parameters
102
104
 
103
- - **files** (required): Array of file paths to process
105
+ - **files** (optional): Array of file paths to process
104
106
 
105
107
  - Markdown files (.md) become the main prompt
106
108
  - Other files are added as context with file paths and code blocks
107
109
 
110
+ - **prompt** (optional): Direct prompt text for simple questions
111
+
112
+ - Alternative to using markdown files
113
+ - Either `files` or `prompt` must be provided
114
+
108
115
  - **model** (optional): LLM model to use
109
116
 
110
117
  - Options: `o3` (default), `gemini-2.5-pro`, `deepseek-reasoner`
@@ -117,6 +124,8 @@ models complex questions.
117
124
 
118
125
  ### Example Usage
119
126
 
127
+ **With files:**
128
+
120
129
  ```json
121
130
  {
122
131
  "files": ["src/auth.ts", "src/middleware.ts", "review.md"],
@@ -128,6 +137,16 @@ models complex questions.
128
137
  }
129
138
  ```
130
139
 
140
+ **With direct prompt:**
141
+
142
+ ```json
143
+ {
144
+ "prompt": "Analyze the performance implications of using async/await vs Promise.then() in Node.js",
145
+ "files": ["src/database.ts"],
146
+ "model": "gemini-2.5-pro"
147
+ }
148
+ ```
149
+
131
150
  ## Supported Models
132
151
 
133
152
  - **o3**: OpenAI's reasoning model ($2/$8 per million tokens)
@@ -188,38 +207,18 @@ Tokens: 3440 input, 5880 output | Cost: $0.014769 (input: $0.001892, output: $0.
188
207
 
189
208
  ## CLAUDE.md example
190
209
 
191
- To help Claude Code understand when and how to use this tool, you can add the
192
- following to your project's `CLAUDE.md` file:
210
+ While not strictly necessary, to help Claude Code understand when and how to use
211
+ this tool, you can optionally something like the following to your project's
212
+ `CLAUDE.md` file:
193
213
 
194
- ````markdown
214
+ ```markdown
195
215
  ## consult-llm-mcp
196
216
 
197
217
  Use the `consult_llm` MCP tool to ask a more powerful AI for help with complex
198
- problems. Write your problem description in a markdown file with as much detail
199
- as possible and pass relevant code files as context. Include files to git_diff
200
- when asking feedback for changes.
218
+ problems. Include files to git_diff when asking feedback for changes.
201
219
 
202
220
  Use Gemini 2.5 Pro.
203
-
204
- ### Example
205
-
206
- ```bash
207
- echo "<very detailed plan or question to be reviewed by the smart LLM>" > task.md
208
221
  ```
209
222
 
210
- Tool call:
211
-
212
- ```json
213
- {
214
- "files": [
215
- "server/src/db.ts",
216
- "server/src/routes/conversations.ts",
217
- "task.md"
218
- ],
219
- "git_diff": {
220
- "files": ["server/src/db.ts", "server/src/routes/conversations.ts"]
221
- },
222
- "model": "gemini-2.5-pro"
223
- }
224
- ```
225
- ````
223
+ Claude Code seems to know pretty well when to use this MCP even without this
224
+ instruction however.
package/dist/llm-query.js CHANGED
@@ -1,10 +1,30 @@
1
1
  import { getClientForModel } from './llm.js';
2
2
  import { calculateCost } from './llm-cost.js';
3
+ const SYSTEM_PROMPT = `You are an expert software engineering consultant being asked to analyze complex problems that require deep technical insight. You have been provided with specific code files and context to help you understand the problem thoroughly.
4
+
5
+ Your role is to:
6
+ - Provide detailed technical analysis of the problem
7
+ - Suggest specific, actionable solutions with code examples where helpful
8
+ - Consider architectural implications and best practices
9
+ - Identify potential edge cases or risks
10
+ - Explain your reasoning clearly
11
+
12
+ When reviewing code changes (git diffs), focus on:
13
+ - Correctness and potential bugs
14
+ - Performance implications
15
+ - Security considerations
16
+ - Maintainability and code quality
17
+ - Integration with existing codebase patterns
18
+
19
+ Provide concrete, implementable recommendations rather than general advice. Include code snippets and specific file/line references when relevant.`;
3
20
  export async function queryLlm(prompt, model) {
4
21
  const { client } = getClientForModel(model);
5
22
  const completion = await client.chat.completions.create({
6
23
  model,
7
- messages: [{ role: 'user', content: prompt }],
24
+ messages: [
25
+ { role: 'system', content: SYSTEM_PROMPT },
26
+ { role: 'user', content: prompt },
27
+ ],
8
28
  });
9
29
  const response = completion.choices[0]?.message?.content;
10
30
  if (!response) {
package/dist/main.js CHANGED
@@ -36,17 +36,17 @@ async function handleConsultLlm(args) {
36
36
  .join(', ');
37
37
  throw new Error(`Invalid request parameters: ${errors}`);
38
38
  }
39
- const { files, git_diff } = parseResult.data;
39
+ const { files, prompt: directPrompt, git_diff } = parseResult.data;
40
40
  const model = parseResult.data.model ?? config.defaultModel ?? 'o3';
41
41
  logToolCall('consult_llm', args);
42
- // Process files
43
- const { markdownFiles, otherFiles } = processFiles(files);
42
+ // Process files (if provided)
43
+ const { markdownFiles, otherFiles } = files ? processFiles(files) : { markdownFiles: [], otherFiles: [] };
44
44
  // Generate git diff
45
45
  const gitDiffOutput = git_diff
46
46
  ? generateGitDiff(git_diff.repo_path, git_diff.files, git_diff.base_ref)
47
47
  : undefined;
48
48
  // Build prompt
49
- const prompt = buildPrompt(markdownFiles, otherFiles, gitDiffOutput);
49
+ const prompt = buildPrompt(directPrompt, markdownFiles, otherFiles, gitDiffOutput);
50
50
  logPrompt(model, prompt);
51
51
  // Query LLM
52
52
  const { response, costInfo } = await queryLlm(prompt, model);
@@ -1,4 +1,4 @@
1
- export declare function buildPrompt(markdownFiles: string[], otherFiles: Array<{
1
+ export declare function buildPrompt(directPrompt: string | undefined, markdownFiles: string[], otherFiles: Array<{
2
2
  path: string;
3
3
  content: string;
4
4
  }>, gitDiffOutput?: string): string;
@@ -1,4 +1,4 @@
1
- export function buildPrompt(markdownFiles, otherFiles, gitDiffOutput) {
1
+ export function buildPrompt(directPrompt, markdownFiles, otherFiles, gitDiffOutput) {
2
2
  const promptParts = [];
3
3
  if (gitDiffOutput?.trim()) {
4
4
  promptParts.push('## Git Diff\n```diff', gitDiffOutput, '```\n');
@@ -12,6 +12,9 @@ export function buildPrompt(markdownFiles, otherFiles, gitDiffOutput) {
12
12
  promptParts.push('```\n');
13
13
  }
14
14
  }
15
+ if (directPrompt) {
16
+ promptParts.push(directPrompt);
17
+ }
15
18
  if (markdownFiles.length > 0) {
16
19
  promptParts.push(...markdownFiles);
17
20
  }
package/dist/schema.d.ts CHANGED
@@ -6,7 +6,8 @@ export declare const SupportedChatModel: z.ZodEnum<{
6
6
  }>;
7
7
  export type SupportedChatModel = z.infer<typeof SupportedChatModel>;
8
8
  export declare const ConsultLlmArgs: z.ZodObject<{
9
- files: z.ZodArray<z.ZodString>;
9
+ files: z.ZodOptional<z.ZodArray<z.ZodString>>;
10
+ prompt: z.ZodOptional<z.ZodString>;
10
11
  model: z.ZodOptional<z.ZodEnum<{
11
12
  o3: "o3";
12
13
  "gemini-2.5-pro": "gemini-2.5-pro";
@@ -31,6 +32,10 @@ export declare const toolSchema: {
31
32
  };
32
33
  readonly description: "Array of file paths to process. Markdown files (.md) become the main prompt, other files are added as context with file paths and code blocks. \n\nIn the markdown file(s), be clear about what you want the LLM to do: implement code, review code, explain concepts, analyze bugs, etc.";
33
34
  };
35
+ readonly prompt: {
36
+ readonly type: "string";
37
+ readonly description: "Direct prompt text for simple questions. Alternative to using markdown files.";
38
+ };
34
39
  readonly model: {
35
40
  readonly type: "string";
36
41
  readonly enum: readonly ["o3", "gemini-2.5-pro", "deepseek-reasoner"];
@@ -61,6 +66,6 @@ export declare const toolSchema: {
61
66
  readonly description: "Generate git diff output to include as context. Shows uncommitted changes by default.";
62
67
  };
63
68
  };
64
- readonly required: readonly ["files"];
69
+ readonly required: readonly [];
65
70
  };
66
71
  };
package/dist/schema.js CHANGED
@@ -4,8 +4,10 @@ export const SupportedChatModel = z.enum([
4
4
  'gemini-2.5-pro',
5
5
  'deepseek-reasoner',
6
6
  ]);
7
- export const ConsultLlmArgs = z.object({
8
- files: z.array(z.string()).min(1, 'At least one file is required'),
7
+ export const ConsultLlmArgs = z
8
+ .object({
9
+ files: z.array(z.string()).optional(),
10
+ prompt: z.string().optional(),
9
11
  model: SupportedChatModel.optional(),
10
12
  git_diff: z
11
13
  .object({
@@ -16,7 +18,8 @@ export const ConsultLlmArgs = z.object({
16
18
  base_ref: z.string().optional().default('HEAD'),
17
19
  })
18
20
  .optional(),
19
- });
21
+ })
22
+ .refine((data) => data.files || data.prompt, 'Either files or prompt must be provided');
20
23
  export const toolSchema = {
21
24
  name: 'consult_llm',
22
25
  description: `Ask a more powerful AI for help with complex problems. Write your problem description in a markdown file and pass relevant code files as context.
@@ -32,6 +35,10 @@ Be specific about what you want: code implementation, code review, bug analysis,
32
35
 
33
36
  In the markdown file(s), be clear about what you want the LLM to do: implement code, review code, explain concepts, analyze bugs, etc.`,
34
37
  },
38
+ prompt: {
39
+ type: 'string',
40
+ description: 'Direct prompt text for simple questions. Alternative to using markdown files.',
41
+ },
35
42
  model: {
36
43
  type: 'string',
37
44
  enum: ['o3', 'gemini-2.5-pro', 'deepseek-reasoner'],
@@ -60,6 +67,6 @@ In the markdown file(s), be clear about what you want the LLM to do: implement c
60
67
  description: 'Generate git diff output to include as context. Shows uncommitted changes by default.',
61
68
  },
62
69
  },
63
- required: ['files'],
70
+ required: [],
64
71
  },
65
72
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "consult-llm-mcp",
3
- "version": "1.0.3",
3
+ "version": "1.0.4",
4
4
  "description": "MCP server for consulting powerful AI models",
5
5
  "type": "module",
6
6
  "main": "dist/main.js",