consult-llm-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +146 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +39 -0
- package/dist/config.d.ts +19 -0
- package/dist/config.js +22 -0
- package/dist/file.d.ts +7 -0
- package/dist/file.js +23 -0
- package/dist/git.d.ts +1 -0
- package/dist/git.js +17 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +25 -0
- package/dist/llm-cost.d.ts +7 -0
- package/dist/llm-cost.js +26 -0
- package/dist/llm-query.d.ts +5 -0
- package/dist/llm-query.js +20 -0
- package/dist/llm.d.ts +5 -0
- package/dist/llm.js +34 -0
- package/dist/logger.d.ts +4 -0
- package/dist/logger.js +30 -0
- package/dist/main.d.ts +2 -0
- package/dist/main.js +70 -0
- package/dist/mcp-server.d.ts +2 -0
- package/dist/mcp-server.js +220 -0
- package/dist/prompt-builder.d.ts +4 -0
- package/dist/prompt-builder.js +19 -0
- package/dist/schema.d.ts +66 -0
- package/dist/schema.js +61 -0
- package/package.json +57 -0
package/README.md
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# Consult LLM MCP
|
|
2
|
+
|
|
3
|
+
An MCP (Model Context Protocol) server that allows you to consult more powerful
|
|
4
|
+
AI models with your code and questions.
|
|
5
|
+
|
|
6
|
+
## Features
|
|
7
|
+
|
|
8
|
+
- Query powerful AI models (o3, Gemini 2.5 Pro, DeepSeek Reasoner) with file
|
|
9
|
+
context
|
|
10
|
+
- Automatic prompt construction from markdown and code files
|
|
11
|
+
- Git diff to feed code changes
|
|
12
|
+
- Usage tracking with cost estimation
|
|
13
|
+
- Comprehensive logging
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
npm install
|
|
19
|
+
npm run build
|
|
20
|
+
npm install -g .
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Configuration
|
|
24
|
+
|
|
25
|
+
Set the following environment variables:
|
|
26
|
+
|
|
27
|
+
- `OPENAI_API_KEY` - Your OpenAI API key (required for o3)
|
|
28
|
+
- `GEMINI_API_KEY` - Your Google AI API key (required for Gemini models)
|
|
29
|
+
- `DEEPSEEK_API_KEY` - Your DeepSeek API key (required for DeepSeek models)
|
|
30
|
+
- `CONSULT_LLM_DEFAULT_MODEL` - Override the default model (optional, defaults
|
|
31
|
+
to 'o3')
|
|
32
|
+
|
|
33
|
+
## Usage with Claude Code
|
|
34
|
+
|
|
35
|
+
Add the MCP server to Claude Code:
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
claude mcp add consult-llm -- consult-llm-mcp
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Or for global availability:
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
claude mcp add --scope user consult-llm -- consult-llm-mcp
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## MCP Tool: consult_llm
|
|
48
|
+
|
|
49
|
+
The server provides a single tool called `consult_llm` for asking powerful AI
|
|
50
|
+
models complex questions.
|
|
51
|
+
|
|
52
|
+
### Parameters
|
|
53
|
+
|
|
54
|
+
- **files** (required): Array of file paths to process
|
|
55
|
+
|
|
56
|
+
- Markdown files (.md) become the main prompt
|
|
57
|
+
- Other files are added as context with file paths and code blocks
|
|
58
|
+
|
|
59
|
+
- **model** (optional): LLM model to use
|
|
60
|
+
|
|
61
|
+
- Options: `o3` (default), `gemini-2.5-pro`, `deepseek-reasoner`
|
|
62
|
+
|
|
63
|
+
- **git_diff** (optional): Include git diff output as context
|
|
64
|
+
- **files** (required): Specific files to include in diff
|
|
65
|
+
- **repo_path** (optional): Path to git repository (defaults to current
|
|
66
|
+
directory)
|
|
67
|
+
- **base_ref** (optional): Git reference to compare against (defaults to HEAD)
|
|
68
|
+
|
|
69
|
+
### Example Usage
|
|
70
|
+
|
|
71
|
+
```json
|
|
72
|
+
{
|
|
73
|
+
"files": ["src/auth.ts", "src/middleware.ts", "review.md"],
|
|
74
|
+
"model": "o3",
|
|
75
|
+
"git_diff": {
|
|
76
|
+
"files": ["src/auth.ts", "src/middleware.ts"],
|
|
77
|
+
"base_ref": "main"
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Supported Models
|
|
83
|
+
|
|
84
|
+
- **o3**: OpenAI's reasoning model ($2/$8 per million tokens)
|
|
85
|
+
- **gemini-2.5-pro**: Google's Gemini 2.5 Pro ($1.25/$10 per million tokens)
|
|
86
|
+
- **deepseek-reasoner**: DeepSeek's reasoning model ($0.55/$2.19 per million
|
|
87
|
+
tokens)
|
|
88
|
+
|
|
89
|
+
## Logging
|
|
90
|
+
|
|
91
|
+
All prompts and responses are logged to `~/.consult-llm-mcp/logs/mcp.log` with:
|
|
92
|
+
|
|
93
|
+
- Tool call parameters
|
|
94
|
+
- Full prompts and responses
|
|
95
|
+
- Token usage and cost estimates
|
|
96
|
+
|
|
97
|
+
## CLAUDE.md example
|
|
98
|
+
|
|
99
|
+
To help Claude Code understand when and how to use this tool, you can add the
|
|
100
|
+
following to your project's `CLAUDE.md` file:
|
|
101
|
+
|
|
102
|
+
````markdown
|
|
103
|
+
## consult-llm-mcp
|
|
104
|
+
|
|
105
|
+
Use the `consult_llm` MCP tool to ask a more powerful AI for help with complex
|
|
106
|
+
problems. Write your problem description in a markdown file with as much detail
|
|
107
|
+
as possible and pass relevant code files as context. Include files to git_diff
|
|
108
|
+
when asking feedback for changes.
|
|
109
|
+
|
|
110
|
+
Use Gemini 2.5 Pro.
|
|
111
|
+
|
|
112
|
+
### Example
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
echo "<very detailed plan or question to be reviewed by the smart LLM>" > task.md
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
Tool call:
|
|
119
|
+
|
|
120
|
+
```json
|
|
121
|
+
{
|
|
122
|
+
"files": [
|
|
123
|
+
"server/src/db.ts",
|
|
124
|
+
"server/src/routes/conversations.ts",
|
|
125
|
+
"task.md"
|
|
126
|
+
],
|
|
127
|
+
"git_diff": {
|
|
128
|
+
"files": ["server/src/db.ts", "server/src/routes/conversations.ts"]
|
|
129
|
+
},
|
|
130
|
+
"model": "gemini-2.5-pro"
|
|
131
|
+
}
|
|
132
|
+
```
|
|
133
|
+
````
|
|
134
|
+
|
|
135
|
+
## Development
|
|
136
|
+
|
|
137
|
+
```bash
|
|
138
|
+
# Run in development mode
|
|
139
|
+
npm run dev
|
|
140
|
+
|
|
141
|
+
# Build TypeScript
|
|
142
|
+
npm run build
|
|
143
|
+
|
|
144
|
+
# Format code
|
|
145
|
+
npm run format
|
|
146
|
+
```
|
package/dist/cli.d.ts
ADDED
package/dist/cli.js
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { getClientForModel } from './index.js';
|
|
3
|
+
async function main() {
|
|
4
|
+
const args = process.argv.slice(2);
|
|
5
|
+
if (args.length === 0) {
|
|
6
|
+
console.error('Usage: llmtool <prompt>');
|
|
7
|
+
console.error('Environment variables:');
|
|
8
|
+
console.error(' OPENAI_API_KEY - Required for GPT models');
|
|
9
|
+
console.error(' GEMINI_API_KEY - Required for Gemini models');
|
|
10
|
+
console.error(' MODEL - Model to use (default: gpt-3.5-turbo)');
|
|
11
|
+
process.exit(1);
|
|
12
|
+
}
|
|
13
|
+
const prompt = args.join(' ');
|
|
14
|
+
const model = process.env.MODEL || 'gpt-3.5-turbo';
|
|
15
|
+
try {
|
|
16
|
+
const { client } = getClientForModel(model);
|
|
17
|
+
const completion = await client.chat.completions.create({
|
|
18
|
+
model,
|
|
19
|
+
messages: [{ role: 'user', content: prompt }],
|
|
20
|
+
temperature: 0.7,
|
|
21
|
+
});
|
|
22
|
+
const response = completion.choices[0]?.message?.content;
|
|
23
|
+
if (response) {
|
|
24
|
+
console.log(response);
|
|
25
|
+
}
|
|
26
|
+
else {
|
|
27
|
+
console.error('No response from the model');
|
|
28
|
+
process.exit(1);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
catch (error) {
|
|
32
|
+
console.error('Error:', error instanceof Error ? error.message : String(error));
|
|
33
|
+
process.exit(1);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
main().catch((error) => {
|
|
37
|
+
console.error('Fatal error:', error);
|
|
38
|
+
process.exit(1);
|
|
39
|
+
});
|
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
declare const Config: z.ZodObject<{
|
|
3
|
+
openaiApiKey: z.ZodOptional<z.ZodString>;
|
|
4
|
+
geminiApiKey: z.ZodOptional<z.ZodString>;
|
|
5
|
+
deepseekApiKey: z.ZodOptional<z.ZodString>;
|
|
6
|
+
defaultModel: z.ZodOptional<z.ZodEnum<{
|
|
7
|
+
o3: "o3";
|
|
8
|
+
"gemini-2.5-pro": "gemini-2.5-pro";
|
|
9
|
+
"deepseek-reasoner": "deepseek-reasoner";
|
|
10
|
+
}>>;
|
|
11
|
+
}, z.core.$strip>;
|
|
12
|
+
export type Config = z.infer<typeof Config>;
|
|
13
|
+
export declare const config: {
|
|
14
|
+
openaiApiKey?: string | undefined;
|
|
15
|
+
geminiApiKey?: string | undefined;
|
|
16
|
+
deepseekApiKey?: string | undefined;
|
|
17
|
+
defaultModel?: "o3" | "gemini-2.5-pro" | "deepseek-reasoner" | undefined;
|
|
18
|
+
};
|
|
19
|
+
export {};
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
import { SupportedChatModel } from './schema.js';
|
|
3
|
+
const Config = z.object({
|
|
4
|
+
openaiApiKey: z.string().optional(),
|
|
5
|
+
geminiApiKey: z.string().optional(),
|
|
6
|
+
deepseekApiKey: z.string().optional(),
|
|
7
|
+
defaultModel: SupportedChatModel.optional(),
|
|
8
|
+
});
|
|
9
|
+
const parsedConfig = Config.safeParse({
|
|
10
|
+
openaiApiKey: process.env.OPENAI_API_KEY,
|
|
11
|
+
geminiApiKey: process.env.GEMINI_API_KEY,
|
|
12
|
+
deepseekApiKey: process.env.DEEPSEEK_API_KEY,
|
|
13
|
+
defaultModel: process.env.CONSULT_LLM_DEFAULT_MODEL,
|
|
14
|
+
});
|
|
15
|
+
if (!parsedConfig.success) {
|
|
16
|
+
console.error('❌ Invalid environment variables:');
|
|
17
|
+
for (const issue of parsedConfig.error.issues) {
|
|
18
|
+
console.error(` ${issue.path.join('.')}: ${issue.message}`);
|
|
19
|
+
}
|
|
20
|
+
process.exit(1);
|
|
21
|
+
}
|
|
22
|
+
export const config = parsedConfig.data;
|
package/dist/file.d.ts
ADDED
package/dist/file.js
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { readFileSync, existsSync } from 'fs';
|
|
2
|
+
import { resolve } from 'path';
|
|
3
|
+
export function processFiles(files) {
|
|
4
|
+
const resolvedFiles = files.map((f) => resolve(f));
|
|
5
|
+
const missingFiles = resolvedFiles.filter((f) => !existsSync(f));
|
|
6
|
+
if (missingFiles.length > 0) {
|
|
7
|
+
throw new Error(`Files not found: ${missingFiles.join(', ')}`);
|
|
8
|
+
}
|
|
9
|
+
const markdownFiles = [];
|
|
10
|
+
const otherFiles = [];
|
|
11
|
+
for (let i = 0; i < files.length; i++) {
|
|
12
|
+
const filePath = resolvedFiles[i];
|
|
13
|
+
const originalPath = files[i];
|
|
14
|
+
const content = readFileSync(filePath, 'utf-8');
|
|
15
|
+
if (originalPath.endsWith('.md') || originalPath.endsWith('.markdown')) {
|
|
16
|
+
markdownFiles.push(content);
|
|
17
|
+
}
|
|
18
|
+
else {
|
|
19
|
+
otherFiles.push({ path: originalPath, content });
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
return { markdownFiles, otherFiles };
|
|
23
|
+
}
|
package/dist/git.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function generateGitDiff(repoPath: string | undefined, files: string[], baseRef?: string): string;
|
package/dist/git.js
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { execSync } from 'child_process';
|
|
2
|
+
export function generateGitDiff(repoPath, files, baseRef = 'HEAD') {
|
|
3
|
+
try {
|
|
4
|
+
const repo = repoPath || process.cwd();
|
|
5
|
+
if (files.length === 0) {
|
|
6
|
+
throw new Error('No files specified for git diff');
|
|
7
|
+
}
|
|
8
|
+
return execSync(`git diff ${baseRef} -- ${files.join(' ')}`, {
|
|
9
|
+
cwd: repo,
|
|
10
|
+
encoding: 'utf-8',
|
|
11
|
+
maxBuffer: 1024 * 1024,
|
|
12
|
+
});
|
|
13
|
+
}
|
|
14
|
+
catch (error) {
|
|
15
|
+
return `Error generating git diff: ${error instanceof Error ? error.message : String(error)}`;
|
|
16
|
+
}
|
|
17
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { config } from './config.js';
|
|
3
|
+
const clients = {};
|
|
4
|
+
export function getClientForModel(model) {
|
|
5
|
+
if (model.startsWith('gpt-')) {
|
|
6
|
+
if (!clients.openai) {
|
|
7
|
+
clients.openai = new OpenAI({
|
|
8
|
+
apiKey: config.openaiApiKey,
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
return { client: clients.openai };
|
|
12
|
+
}
|
|
13
|
+
else if (model.startsWith('gemini-')) {
|
|
14
|
+
if (!clients.gemini) {
|
|
15
|
+
clients.gemini = new OpenAI({
|
|
16
|
+
apiKey: config.geminiApiKey,
|
|
17
|
+
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
|
18
|
+
});
|
|
19
|
+
}
|
|
20
|
+
return { client: clients.gemini };
|
|
21
|
+
}
|
|
22
|
+
else {
|
|
23
|
+
throw new Error(`Unable to determine LLM provider for model: ${model}`);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { CompletionUsage } from 'openai/resources.js';
|
|
2
|
+
import { type SupportedChatModel } from './schema.js';
|
|
3
|
+
export declare function calculateCost(usage: CompletionUsage | undefined, model: SupportedChatModel): {
|
|
4
|
+
inputCost: number;
|
|
5
|
+
outputCost: number;
|
|
6
|
+
totalCost: number;
|
|
7
|
+
};
|
package/dist/llm-cost.js
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
const MODEL_PRICING = {
|
|
2
|
+
o3: {
|
|
3
|
+
inputCostPerMillion: 2.0,
|
|
4
|
+
outputCostPerMillion: 8.0,
|
|
5
|
+
},
|
|
6
|
+
'gemini-2.5-pro': {
|
|
7
|
+
inputCostPerMillion: 1.25,
|
|
8
|
+
outputCostPerMillion: 10.0,
|
|
9
|
+
},
|
|
10
|
+
'deepseek-reasoner': {
|
|
11
|
+
inputCostPerMillion: 0.55,
|
|
12
|
+
outputCostPerMillion: 2.19,
|
|
13
|
+
},
|
|
14
|
+
};
|
|
15
|
+
export function calculateCost(usage, model) {
|
|
16
|
+
const pricing = MODEL_PRICING[model];
|
|
17
|
+
if (!pricing) {
|
|
18
|
+
return { inputCost: 0, outputCost: 0, totalCost: 0 };
|
|
19
|
+
}
|
|
20
|
+
const inputTokens = usage?.prompt_tokens || 0;
|
|
21
|
+
const outputTokens = usage?.completion_tokens || 0;
|
|
22
|
+
const inputCost = (inputTokens / 1_000_000) * pricing.inputCostPerMillion;
|
|
23
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.outputCostPerMillion;
|
|
24
|
+
const totalCost = inputCost + outputCost;
|
|
25
|
+
return { inputCost, outputCost, totalCost };
|
|
26
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { getClientForModel } from './llm.js';
|
|
2
|
+
import { calculateCost } from './llm-cost.js';
|
|
3
|
+
export async function queryLlm(prompt, model) {
|
|
4
|
+
const { client } = getClientForModel(model);
|
|
5
|
+
const completion = await client.chat.completions.create({
|
|
6
|
+
model,
|
|
7
|
+
messages: [{ role: 'user', content: prompt }],
|
|
8
|
+
});
|
|
9
|
+
const response = completion.choices[0]?.message?.content;
|
|
10
|
+
if (!response) {
|
|
11
|
+
throw new Error('No response from the model');
|
|
12
|
+
}
|
|
13
|
+
const usage = completion.usage;
|
|
14
|
+
// Calculate costs
|
|
15
|
+
const { inputCost, outputCost, totalCost } = calculateCost(usage, model);
|
|
16
|
+
const costInfo = usage
|
|
17
|
+
? `Tokens: ${usage.prompt_tokens} input, ${usage.completion_tokens} output | Cost: $${totalCost.toFixed(6)} (input: $${inputCost.toFixed(6)}, output: $${outputCost.toFixed(6)})`
|
|
18
|
+
: 'Usage data not available';
|
|
19
|
+
return { response, costInfo };
|
|
20
|
+
}
|
package/dist/llm.d.ts
ADDED
package/dist/llm.js
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { config } from './config.js';
|
|
3
|
+
const clients = {};
|
|
4
|
+
export function getClientForModel(model) {
|
|
5
|
+
if (model.startsWith('gpt-') || model === 'o3') {
|
|
6
|
+
if (!clients.openai) {
|
|
7
|
+
clients.openai = new OpenAI({
|
|
8
|
+
apiKey: config.openaiApiKey,
|
|
9
|
+
});
|
|
10
|
+
}
|
|
11
|
+
return { client: clients.openai };
|
|
12
|
+
}
|
|
13
|
+
else if (model.startsWith('gemini-')) {
|
|
14
|
+
if (!clients.gemini) {
|
|
15
|
+
clients.gemini = new OpenAI({
|
|
16
|
+
apiKey: config.geminiApiKey,
|
|
17
|
+
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
|
18
|
+
});
|
|
19
|
+
}
|
|
20
|
+
return { client: clients.gemini };
|
|
21
|
+
}
|
|
22
|
+
else if (model.startsWith('deepseek-')) {
|
|
23
|
+
if (!clients.deepseek) {
|
|
24
|
+
clients.deepseek = new OpenAI({
|
|
25
|
+
apiKey: config.deepseekApiKey,
|
|
26
|
+
baseURL: 'https://api.deepseek.com',
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
return { client: clients.deepseek };
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
throw new Error(`Unable to determine LLM provider for model: ${model}`);
|
|
33
|
+
}
|
|
34
|
+
}
|
package/dist/logger.d.ts
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
export declare function logToFile(content: string): void;
|
|
2
|
+
export declare function logToolCall(name: string, args: unknown): void;
|
|
3
|
+
export declare function logPrompt(model: string, prompt: string): void;
|
|
4
|
+
export declare function logResponse(model: string, response: string, costInfo: string): void;
|
package/dist/logger.js
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { appendFileSync, mkdirSync } from 'fs';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
import { homedir } from 'os';
|
|
4
|
+
const logDir = join(homedir(), '.consult-llm-mcp', 'logs');
|
|
5
|
+
const logFile = join(logDir, 'mcp.log');
|
|
6
|
+
try {
|
|
7
|
+
mkdirSync(logDir, { recursive: true });
|
|
8
|
+
}
|
|
9
|
+
catch (error) {
|
|
10
|
+
// Directory might already exist
|
|
11
|
+
}
|
|
12
|
+
export function logToFile(content) {
|
|
13
|
+
const timestamp = new Date().toISOString();
|
|
14
|
+
const logEntry = `[${timestamp}] ${content}\n`;
|
|
15
|
+
try {
|
|
16
|
+
appendFileSync(logFile, logEntry);
|
|
17
|
+
}
|
|
18
|
+
catch (error) {
|
|
19
|
+
console.error('Failed to write to log file:', error);
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
export function logToolCall(name, args) {
|
|
23
|
+
logToFile(`TOOL CALL: ${name}\nArguments: ${JSON.stringify(args, null, 2)}\n${'='.repeat(80)}`);
|
|
24
|
+
}
|
|
25
|
+
export function logPrompt(model, prompt) {
|
|
26
|
+
logToFile(`PROMPT (model: ${model}):\n${prompt}\n${'='.repeat(80)}`);
|
|
27
|
+
}
|
|
28
|
+
export function logResponse(model, response, costInfo) {
|
|
29
|
+
logToFile(`RESPONSE (model: ${model}):\n${response}\n${costInfo}\n${'='.repeat(80)}`);
|
|
30
|
+
}
|
package/dist/main.d.ts
ADDED
package/dist/main.js
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
3
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js';
|
|
5
|
+
import { config } from './config.js';
|
|
6
|
+
import { ConsultLlmArgs, toolSchema } from './schema.js';
|
|
7
|
+
import { processFiles } from './file.js';
|
|
8
|
+
import { generateGitDiff } from './git.js';
|
|
9
|
+
import { buildPrompt } from './prompt-builder.js';
|
|
10
|
+
import { queryLlm } from './llm-query.js';
|
|
11
|
+
import { logToolCall, logPrompt, logResponse } from './logger.js';
|
|
12
|
+
const server = new Server({
|
|
13
|
+
name: 'consult_llm',
|
|
14
|
+
version: '1.0.0',
|
|
15
|
+
}, {
|
|
16
|
+
capabilities: {
|
|
17
|
+
tools: {},
|
|
18
|
+
},
|
|
19
|
+
});
|
|
20
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
21
|
+
return {
|
|
22
|
+
tools: [toolSchema],
|
|
23
|
+
};
|
|
24
|
+
});
|
|
25
|
+
async function handleConsultLlm(args) {
|
|
26
|
+
const parseResult = ConsultLlmArgs.safeParse(args);
|
|
27
|
+
if (!parseResult.success) {
|
|
28
|
+
const errors = parseResult.error.issues
|
|
29
|
+
.map((issue) => `${issue.path.join('.')}: ${issue.message}`)
|
|
30
|
+
.join(', ');
|
|
31
|
+
throw new Error(`Invalid request parameters: ${errors}`);
|
|
32
|
+
}
|
|
33
|
+
const { files, git_diff } = parseResult.data;
|
|
34
|
+
const model = parseResult.data.model ?? config.defaultModel ?? 'o3';
|
|
35
|
+
logToolCall('consult_llm', args);
|
|
36
|
+
// Process files
|
|
37
|
+
const { markdownFiles, otherFiles } = processFiles(files);
|
|
38
|
+
// Generate git diff
|
|
39
|
+
const gitDiffOutput = git_diff
|
|
40
|
+
? generateGitDiff(git_diff.repo_path, git_diff.files, git_diff.base_ref)
|
|
41
|
+
: undefined;
|
|
42
|
+
// Build prompt
|
|
43
|
+
const prompt = buildPrompt(markdownFiles, otherFiles, gitDiffOutput);
|
|
44
|
+
logPrompt(model, prompt);
|
|
45
|
+
// Query LLM
|
|
46
|
+
const { response, costInfo } = await queryLlm(prompt, model);
|
|
47
|
+
logResponse(model, response, costInfo);
|
|
48
|
+
return {
|
|
49
|
+
content: [{ type: 'text', text: response }],
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
53
|
+
if (request.params.name === 'consult_llm') {
|
|
54
|
+
try {
|
|
55
|
+
return await handleConsultLlm(request.params.arguments);
|
|
56
|
+
}
|
|
57
|
+
catch (error) {
|
|
58
|
+
throw new Error(`LLM query failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
throw new Error(`Unknown tool: ${request.params.name}`);
|
|
62
|
+
});
|
|
63
|
+
async function main() {
|
|
64
|
+
const transport = new StdioServerTransport();
|
|
65
|
+
await server.connect(transport);
|
|
66
|
+
}
|
|
67
|
+
main().catch((error) => {
|
|
68
|
+
console.error('Fatal error:', error);
|
|
69
|
+
process.exit(1);
|
|
70
|
+
});
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
3
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js';
|
|
5
|
+
import { getClientForModel } from './llm.js';
|
|
6
|
+
import { readFileSync, existsSync, appendFileSync, mkdirSync } from 'fs';
|
|
7
|
+
import { resolve, join } from 'path';
|
|
8
|
+
import { homedir } from 'os';
|
|
9
|
+
import { execSync } from 'child_process';
|
|
10
|
+
const MODEL_PRICING = {
|
|
11
|
+
o3: {
|
|
12
|
+
inputCostPerMillion: 2.0,
|
|
13
|
+
outputCostPerMillion: 8.0,
|
|
14
|
+
},
|
|
15
|
+
'gemini-2.5-pro': {
|
|
16
|
+
inputCostPerMillion: 1.25,
|
|
17
|
+
outputCostPerMillion: 10.0,
|
|
18
|
+
},
|
|
19
|
+
};
|
|
20
|
+
function calculateCost(usage, model) {
|
|
21
|
+
const pricing = MODEL_PRICING[model];
|
|
22
|
+
if (!pricing) {
|
|
23
|
+
return { inputCost: 0, outputCost: 0, totalCost: 0 };
|
|
24
|
+
}
|
|
25
|
+
const inputTokens = usage?.prompt_tokens || 0;
|
|
26
|
+
const outputTokens = usage?.completion_tokens || 0;
|
|
27
|
+
const inputCost = (inputTokens / 1_000_000) * pricing.inputCostPerMillion;
|
|
28
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.outputCostPerMillion;
|
|
29
|
+
const totalCost = inputCost + outputCost;
|
|
30
|
+
return { inputCost, outputCost, totalCost };
|
|
31
|
+
}
|
|
32
|
+
// Setup logging directory
|
|
33
|
+
const logDir = join(homedir(), '.llmtool', 'logs');
|
|
34
|
+
const logFile = join(logDir, 'mcp.log');
|
|
35
|
+
try {
|
|
36
|
+
mkdirSync(logDir, { recursive: true });
|
|
37
|
+
}
|
|
38
|
+
catch (error) {
|
|
39
|
+
// Directory might already exist
|
|
40
|
+
}
|
|
41
|
+
function logToFile(content) {
|
|
42
|
+
const timestamp = new Date().toISOString();
|
|
43
|
+
const logEntry = `[${timestamp}] ${content}\n`;
|
|
44
|
+
try {
|
|
45
|
+
appendFileSync(logFile, logEntry);
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
console.error('Failed to write to log file:', error);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
const server = new Server({
|
|
52
|
+
name: 'llmtool',
|
|
53
|
+
version: '1.0.0',
|
|
54
|
+
}, {
|
|
55
|
+
capabilities: {
|
|
56
|
+
tools: {},
|
|
57
|
+
},
|
|
58
|
+
});
|
|
59
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
60
|
+
return {
|
|
61
|
+
tools: [
|
|
62
|
+
{
|
|
63
|
+
name: 'llm_query',
|
|
64
|
+
description: 'Ask a more powerful AI for help with complex problems. Write your problem description in a markdown file and pass relevant code files as context.',
|
|
65
|
+
inputSchema: {
|
|
66
|
+
type: 'object',
|
|
67
|
+
properties: {
|
|
68
|
+
files: {
|
|
69
|
+
type: 'array',
|
|
70
|
+
items: { type: 'string' },
|
|
71
|
+
description: 'Array of file paths to process. Markdown files (.md) become the main prompt, other files are added as context with file paths and code blocks.',
|
|
72
|
+
},
|
|
73
|
+
model: {
|
|
74
|
+
type: 'string',
|
|
75
|
+
enum: ['o3', 'gemini-2.5-pro'],
|
|
76
|
+
default: 'o3',
|
|
77
|
+
description: 'LLM model to use',
|
|
78
|
+
},
|
|
79
|
+
git_diff: {
|
|
80
|
+
type: 'object',
|
|
81
|
+
properties: {
|
|
82
|
+
repo_path: {
|
|
83
|
+
type: 'string',
|
|
84
|
+
description: 'Path to git repository (defaults to current working directory)',
|
|
85
|
+
},
|
|
86
|
+
files: {
|
|
87
|
+
type: 'array',
|
|
88
|
+
items: { type: 'string' },
|
|
89
|
+
description: 'Specific files to include in diff',
|
|
90
|
+
},
|
|
91
|
+
base_ref: {
|
|
92
|
+
type: 'string',
|
|
93
|
+
default: 'HEAD',
|
|
94
|
+
description: 'Git reference to compare against (e.g., "HEAD", "main", commit hash)',
|
|
95
|
+
},
|
|
96
|
+
},
|
|
97
|
+
required: ['files'],
|
|
98
|
+
description: 'Generate git diff output to include as context. Shows uncommitted changes by default.',
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
required: ['files'],
|
|
102
|
+
},
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
};
|
|
106
|
+
});
|
|
107
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
108
|
+
if (request.params.name === 'llm_query') {
|
|
109
|
+
const { files, model = 'o3', git_diff, } = request.params.arguments;
|
|
110
|
+
try {
|
|
111
|
+
// Validate files exist
|
|
112
|
+
const resolvedFiles = files.map((f) => resolve(f));
|
|
113
|
+
const missingFiles = resolvedFiles.filter((f) => !existsSync(f));
|
|
114
|
+
if (missingFiles.length > 0) {
|
|
115
|
+
throw new Error(`Files not found: ${missingFiles.join(', ')}`);
|
|
116
|
+
}
|
|
117
|
+
// Process files using same logic as CLI
|
|
118
|
+
const markdownFiles = [];
|
|
119
|
+
const otherFiles = [];
|
|
120
|
+
for (let i = 0; i < files.length; i++) {
|
|
121
|
+
const filePath = resolvedFiles[i];
|
|
122
|
+
const originalPath = files[i];
|
|
123
|
+
const content = readFileSync(filePath, 'utf-8');
|
|
124
|
+
if (originalPath.endsWith('.md') ||
|
|
125
|
+
originalPath.endsWith('.markdown')) {
|
|
126
|
+
markdownFiles.push(content);
|
|
127
|
+
}
|
|
128
|
+
else {
|
|
129
|
+
otherFiles.push({ path: originalPath, content });
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
// Generate git diff if requested
|
|
133
|
+
let gitDiffOutput = '';
|
|
134
|
+
if (git_diff) {
|
|
135
|
+
try {
|
|
136
|
+
const repoPath = git_diff.repo_path || process.cwd();
|
|
137
|
+
const diffFiles = git_diff.files;
|
|
138
|
+
const baseRef = git_diff.base_ref || 'HEAD';
|
|
139
|
+
// Build git diff command - always pass specific files to avoid unrelated changes
|
|
140
|
+
if (diffFiles.length === 0) {
|
|
141
|
+
throw new Error('No files specified for git diff');
|
|
142
|
+
}
|
|
143
|
+
const gitCommand = `git diff ${baseRef} -- ${diffFiles.join(' ')}`;
|
|
144
|
+
gitDiffOutput = execSync(gitCommand, {
|
|
145
|
+
cwd: repoPath,
|
|
146
|
+
encoding: 'utf-8',
|
|
147
|
+
maxBuffer: 1024 * 1024, // 1MB max
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
catch (error) {
|
|
151
|
+
gitDiffOutput = `Error generating git diff: ${error instanceof Error ? error.message : String(error)}`;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
// Build prompt using same logic as CLI
|
|
155
|
+
let promptParts = [];
|
|
156
|
+
// Add git diff as context if available
|
|
157
|
+
if (gitDiffOutput.trim()) {
|
|
158
|
+
promptParts.push('## Git Diff\n');
|
|
159
|
+
promptParts.push('```diff');
|
|
160
|
+
promptParts.push(gitDiffOutput);
|
|
161
|
+
promptParts.push('```\n');
|
|
162
|
+
}
|
|
163
|
+
// Add non-markdown files as context
|
|
164
|
+
if (otherFiles.length > 0) {
|
|
165
|
+
promptParts.push('## Relevant Files\n');
|
|
166
|
+
for (const file of otherFiles) {
|
|
167
|
+
promptParts.push(`### File: ${file.path}`);
|
|
168
|
+
promptParts.push('```');
|
|
169
|
+
promptParts.push(file.content);
|
|
170
|
+
promptParts.push('```\n');
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
// Add markdown files as main prompt
|
|
174
|
+
if (markdownFiles.length > 0) {
|
|
175
|
+
promptParts.push(...markdownFiles);
|
|
176
|
+
}
|
|
177
|
+
const prompt = promptParts.join('\n');
|
|
178
|
+
// Log the prompt
|
|
179
|
+
logToFile(`PROMPT (model: ${model}):\n${prompt}\n${'='.repeat(80)}`);
|
|
180
|
+
// Send to LLM
|
|
181
|
+
const { client } = getClientForModel(model);
|
|
182
|
+
const completion = await client.chat.completions.create({
|
|
183
|
+
model,
|
|
184
|
+
messages: [{ role: 'user', content: prompt }],
|
|
185
|
+
});
|
|
186
|
+
const response = completion.choices[0]?.message?.content;
|
|
187
|
+
if (!response) {
|
|
188
|
+
throw new Error('No response from the model');
|
|
189
|
+
}
|
|
190
|
+
// Calculate and log pricing
|
|
191
|
+
const usage = completion.usage;
|
|
192
|
+
const { inputCost, outputCost, totalCost } = calculateCost(usage, model);
|
|
193
|
+
const costInfo = usage
|
|
194
|
+
? `Tokens: ${usage.prompt_tokens} input, ${usage.completion_tokens} output | Cost: $${totalCost.toFixed(6)} (input: $${inputCost.toFixed(6)}, output: $${outputCost.toFixed(6)})`
|
|
195
|
+
: 'Usage data not available';
|
|
196
|
+
// Log the response with pricing
|
|
197
|
+
logToFile(`RESPONSE (model: ${model}):\n${response}\n${costInfo}\n${'='.repeat(80)}`);
|
|
198
|
+
return {
|
|
199
|
+
content: [
|
|
200
|
+
{
|
|
201
|
+
type: 'text',
|
|
202
|
+
text: response,
|
|
203
|
+
},
|
|
204
|
+
],
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
catch (error) {
|
|
208
|
+
throw new Error(`LLM query failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
throw new Error(`Unknown tool: ${request.params.name}`);
|
|
212
|
+
});
|
|
213
|
+
async function main() {
|
|
214
|
+
const transport = new StdioServerTransport();
|
|
215
|
+
await server.connect(transport);
|
|
216
|
+
}
|
|
217
|
+
main().catch((error) => {
|
|
218
|
+
console.error('Fatal error:', error);
|
|
219
|
+
process.exit(1);
|
|
220
|
+
});
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
export function buildPrompt(markdownFiles, otherFiles, gitDiffOutput) {
|
|
2
|
+
const promptParts = [];
|
|
3
|
+
if (gitDiffOutput?.trim()) {
|
|
4
|
+
promptParts.push('## Git Diff\n```diff', gitDiffOutput, '```\n');
|
|
5
|
+
}
|
|
6
|
+
if (otherFiles.length > 0) {
|
|
7
|
+
promptParts.push('## Relevant Files\n');
|
|
8
|
+
for (const file of otherFiles) {
|
|
9
|
+
promptParts.push(`### File: ${file.path}`);
|
|
10
|
+
promptParts.push('```');
|
|
11
|
+
promptParts.push(file.content);
|
|
12
|
+
promptParts.push('```\n');
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
if (markdownFiles.length > 0) {
|
|
16
|
+
promptParts.push(...markdownFiles);
|
|
17
|
+
}
|
|
18
|
+
return promptParts.join('\n');
|
|
19
|
+
}
|
package/dist/schema.d.ts
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
export declare const SupportedChatModel: z.ZodEnum<{
|
|
3
|
+
o3: "o3";
|
|
4
|
+
"gemini-2.5-pro": "gemini-2.5-pro";
|
|
5
|
+
"deepseek-reasoner": "deepseek-reasoner";
|
|
6
|
+
}>;
|
|
7
|
+
export type SupportedChatModel = z.infer<typeof SupportedChatModel>;
|
|
8
|
+
export declare const ConsultLlmArgs: z.ZodObject<{
|
|
9
|
+
files: z.ZodArray<z.ZodString>;
|
|
10
|
+
model: z.ZodOptional<z.ZodEnum<{
|
|
11
|
+
o3: "o3";
|
|
12
|
+
"gemini-2.5-pro": "gemini-2.5-pro";
|
|
13
|
+
"deepseek-reasoner": "deepseek-reasoner";
|
|
14
|
+
}>>;
|
|
15
|
+
git_diff: z.ZodOptional<z.ZodObject<{
|
|
16
|
+
repo_path: z.ZodOptional<z.ZodString>;
|
|
17
|
+
files: z.ZodArray<z.ZodString>;
|
|
18
|
+
base_ref: z.ZodDefault<z.ZodOptional<z.ZodString>>;
|
|
19
|
+
}, z.core.$strip>>;
|
|
20
|
+
}, z.core.$strip>;
|
|
21
|
+
export declare const toolSchema: {
|
|
22
|
+
readonly name: "consult_llm";
|
|
23
|
+
readonly description: "Ask a more powerful AI for help with complex problems. Write your problem description in a markdown file and pass relevant code files as context.";
|
|
24
|
+
readonly inputSchema: {
|
|
25
|
+
readonly type: "object";
|
|
26
|
+
readonly properties: {
|
|
27
|
+
readonly files: {
|
|
28
|
+
readonly type: "array";
|
|
29
|
+
readonly items: {
|
|
30
|
+
readonly type: "string";
|
|
31
|
+
};
|
|
32
|
+
readonly description: "Array of file paths to process. Markdown files (.md) become the main prompt, other files are added as context with file paths and code blocks.";
|
|
33
|
+
};
|
|
34
|
+
readonly model: {
|
|
35
|
+
readonly type: "string";
|
|
36
|
+
readonly enum: readonly ["o3", "gemini-2.5-pro", "deepseek-reasoner"];
|
|
37
|
+
readonly default: "o3";
|
|
38
|
+
readonly description: "LLM model to use";
|
|
39
|
+
};
|
|
40
|
+
readonly git_diff: {
|
|
41
|
+
readonly type: "object";
|
|
42
|
+
readonly properties: {
|
|
43
|
+
readonly repo_path: {
|
|
44
|
+
readonly type: "string";
|
|
45
|
+
readonly description: "Path to git repository (defaults to current working directory)";
|
|
46
|
+
};
|
|
47
|
+
readonly files: {
|
|
48
|
+
readonly type: "array";
|
|
49
|
+
readonly items: {
|
|
50
|
+
readonly type: "string";
|
|
51
|
+
};
|
|
52
|
+
readonly description: "Specific files to include in diff";
|
|
53
|
+
};
|
|
54
|
+
readonly base_ref: {
|
|
55
|
+
readonly type: "string";
|
|
56
|
+
readonly default: "HEAD";
|
|
57
|
+
readonly description: "Git reference to compare against (e.g., \"HEAD\", \"main\", commit hash)";
|
|
58
|
+
};
|
|
59
|
+
};
|
|
60
|
+
readonly required: readonly ["files"];
|
|
61
|
+
readonly description: "Generate git diff output to include as context. Shows uncommitted changes by default.";
|
|
62
|
+
};
|
|
63
|
+
};
|
|
64
|
+
readonly required: readonly ["files"];
|
|
65
|
+
};
|
|
66
|
+
};
|
package/dist/schema.js
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
export const SupportedChatModel = z.enum([
|
|
3
|
+
'o3',
|
|
4
|
+
'gemini-2.5-pro',
|
|
5
|
+
'deepseek-reasoner',
|
|
6
|
+
]);
|
|
7
|
+
export const ConsultLlmArgs = z.object({
|
|
8
|
+
files: z.array(z.string()).min(1, 'At least one file is required'),
|
|
9
|
+
model: SupportedChatModel.optional(),
|
|
10
|
+
git_diff: z
|
|
11
|
+
.object({
|
|
12
|
+
repo_path: z.string().optional(),
|
|
13
|
+
files: z
|
|
14
|
+
.array(z.string())
|
|
15
|
+
.min(1, 'At least one file is required for git diff'),
|
|
16
|
+
base_ref: z.string().optional().default('HEAD'),
|
|
17
|
+
})
|
|
18
|
+
.optional(),
|
|
19
|
+
});
|
|
20
|
+
export const toolSchema = {
|
|
21
|
+
name: 'consult_llm',
|
|
22
|
+
description: 'Ask a more powerful AI for help with complex problems. Write your problem description in a markdown file and pass relevant code files as context.',
|
|
23
|
+
inputSchema: {
|
|
24
|
+
type: 'object',
|
|
25
|
+
properties: {
|
|
26
|
+
files: {
|
|
27
|
+
type: 'array',
|
|
28
|
+
items: { type: 'string' },
|
|
29
|
+
description: 'Array of file paths to process. Markdown files (.md) become the main prompt, other files are added as context with file paths and code blocks.',
|
|
30
|
+
},
|
|
31
|
+
model: {
|
|
32
|
+
type: 'string',
|
|
33
|
+
enum: ['o3', 'gemini-2.5-pro', 'deepseek-reasoner'],
|
|
34
|
+
default: 'o3',
|
|
35
|
+
description: 'LLM model to use',
|
|
36
|
+
},
|
|
37
|
+
git_diff: {
|
|
38
|
+
type: 'object',
|
|
39
|
+
properties: {
|
|
40
|
+
repo_path: {
|
|
41
|
+
type: 'string',
|
|
42
|
+
description: 'Path to git repository (defaults to current working directory)',
|
|
43
|
+
},
|
|
44
|
+
files: {
|
|
45
|
+
type: 'array',
|
|
46
|
+
items: { type: 'string' },
|
|
47
|
+
description: 'Specific files to include in diff',
|
|
48
|
+
},
|
|
49
|
+
base_ref: {
|
|
50
|
+
type: 'string',
|
|
51
|
+
default: 'HEAD',
|
|
52
|
+
description: 'Git reference to compare against (e.g., "HEAD", "main", commit hash)',
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
required: ['files'],
|
|
56
|
+
description: 'Generate git diff output to include as context. Shows uncommitted changes by default.',
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
required: ['files'],
|
|
60
|
+
},
|
|
61
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "consult-llm-mcp",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "MCP server for consulting powerful AI models",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/main.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"consult-llm-mcp": "./dist/main.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc",
|
|
12
|
+
"dev": "tsx src/main.ts",
|
|
13
|
+
"start": "node dist/main.js",
|
|
14
|
+
"format": "prettier --write .",
|
|
15
|
+
"install-global": "npm run build && npm install -g .",
|
|
16
|
+
"prepublishOnly": "npm run build",
|
|
17
|
+
"publish:dry": "npm publish --dry-run",
|
|
18
|
+
"publish:patch": "npm version patch && npm publish",
|
|
19
|
+
"publish:minor": "npm version minor && npm publish",
|
|
20
|
+
"publish:major": "npm version major && npm publish"
|
|
21
|
+
},
|
|
22
|
+
"keywords": [
|
|
23
|
+
"mcp",
|
|
24
|
+
"openai",
|
|
25
|
+
"gemini",
|
|
26
|
+
"llm",
|
|
27
|
+
"ai"
|
|
28
|
+
],
|
|
29
|
+
"author": "",
|
|
30
|
+
"repository": {
|
|
31
|
+
"type": "git",
|
|
32
|
+
"url": "git+https://github.com/raine/consult-llm-mcp.git"
|
|
33
|
+
},
|
|
34
|
+
"homepage": "https://github.com/raine/consult-llm-mcp#readme",
|
|
35
|
+
"bugs": {
|
|
36
|
+
"url": "https://github.com/raine/consult-llm-mcp/issues"
|
|
37
|
+
},
|
|
38
|
+
"files": [
|
|
39
|
+
"dist",
|
|
40
|
+
"README.md"
|
|
41
|
+
],
|
|
42
|
+
"engines": {
|
|
43
|
+
"node": ">=18.0.0"
|
|
44
|
+
},
|
|
45
|
+
"license": "MIT",
|
|
46
|
+
"dependencies": {
|
|
47
|
+
"@modelcontextprotocol/sdk": "^1.13.0",
|
|
48
|
+
"openai": "^5.6.0",
|
|
49
|
+
"zod": "^3.25.67"
|
|
50
|
+
},
|
|
51
|
+
"devDependencies": {
|
|
52
|
+
"@types/node": "^24.0.3",
|
|
53
|
+
"prettier": "^3.5.3",
|
|
54
|
+
"tsx": "^4.20.3",
|
|
55
|
+
"typescript": "^5.8.3"
|
|
56
|
+
}
|
|
57
|
+
}
|