dev-mcp-server 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +68 -0
- package/README.md +333 -0
- package/cli.js +248 -0
- package/package.json +60 -0
- package/src/api/routes/ingest.js +69 -0
- package/src/api/routes/knowledge.js +65 -0
- package/src/api/routes/query.js +105 -0
- package/src/api/server.js +91 -0
- package/src/core/indexer.js +171 -0
- package/src/core/ingester.js +155 -0
- package/src/core/queryEngine.js +236 -0
- package/src/storage/store.js +125 -0
- package/src/utils/fileParser.js +183 -0
- package/src/utils/llmClient.js +206 -0
- package/src/utils/logger.js +28 -0
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
const PROVIDER = (process.env.LLM_PROVIDER || 'anthropic').toLowerCase();
|
|
5
|
+
|
|
6
|
+
const DEFAULT_MODELS = {
|
|
7
|
+
anthropic: 'claude-opus-4-5',
|
|
8
|
+
ollama: 'llama3',
|
|
9
|
+
azure: '',
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
function resolveModel() {
|
|
13
|
+
if (PROVIDER === 'azure') {
|
|
14
|
+
return (
|
|
15
|
+
process.env.LLM_MODEL ||
|
|
16
|
+
process.env.AZURE_OPENAI_DEPLOYMENT ||
|
|
17
|
+
(() => { throw new Error('Azure OpenAI requires AZURE_OPENAI_DEPLOYMENT (or LLM_MODEL) to be set.'); })()
|
|
18
|
+
);
|
|
19
|
+
}
|
|
20
|
+
return process.env.LLM_MODEL || DEFAULT_MODELS[PROVIDER] || DEFAULT_MODELS.anthropic;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function buildAnthropicClient() {
|
|
24
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
25
|
+
if (!process.env.ANTHROPIC_API_KEY) {
|
|
26
|
+
throw new Error('ANTHROPIC_API_KEY is not set. Add it to your .env file.');
|
|
27
|
+
}
|
|
28
|
+
return new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async function anthropicCreate({ model, maxTokens, system, messages, stream }) {
|
|
32
|
+
const client = buildAnthropicClient();
|
|
33
|
+
return client.messages.create({ model, max_tokens: maxTokens, system, messages, stream });
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function ollamaBaseUrl() {
|
|
37
|
+
return (process.env.OLLAMA_BASE_URL || 'http://localhost:11434').replace(/\/$/, '');
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async function ollamaCreate({ model, maxTokens, system, messages, stream }) {
|
|
41
|
+
const url = `${ollamaBaseUrl()}/api/chat`;
|
|
42
|
+
const ollamaMessages = [{ role: 'system', content: system }, ...messages];
|
|
43
|
+
|
|
44
|
+
const body = JSON.stringify({
|
|
45
|
+
model,
|
|
46
|
+
messages: ollamaMessages,
|
|
47
|
+
stream: Boolean(stream),
|
|
48
|
+
options: { num_predict: maxTokens },
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
const res = await fetch(url, {
|
|
52
|
+
method: 'POST',
|
|
53
|
+
headers: { 'Content-Type': 'application/json' },
|
|
54
|
+
body,
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
if (!res.ok) {
|
|
58
|
+
const detail = await res.text().catch(() => '(no body)');
|
|
59
|
+
throw new Error(`Ollama request failed [${res.status}]: ${detail}`);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (!stream) {
|
|
63
|
+
const data = await res.json();
|
|
64
|
+
return {
|
|
65
|
+
content: [{ text: data.message?.content ?? '' }],
|
|
66
|
+
usage: {
|
|
67
|
+
input_tokens: data.prompt_eval_count ?? 0,
|
|
68
|
+
output_tokens: data.eval_count ?? 0,
|
|
69
|
+
},
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return ollamaStream(res);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
async function* ollamaStream(res) {
|
|
77
|
+
const reader = res.body.getReader();
|
|
78
|
+
const decoder = new TextDecoder();
|
|
79
|
+
let buffer = '';
|
|
80
|
+
let stopped = false;
|
|
81
|
+
|
|
82
|
+
try {
|
|
83
|
+
while (true) {
|
|
84
|
+
const { done, value } = await reader.read();
|
|
85
|
+
if (done) break;
|
|
86
|
+
|
|
87
|
+
buffer += decoder.decode(value, { stream: true });
|
|
88
|
+
const lines = buffer.split('\n');
|
|
89
|
+
buffer = lines.pop();
|
|
90
|
+
|
|
91
|
+
for (const line of lines) {
|
|
92
|
+
const trimmed = line.trim();
|
|
93
|
+
if (!trimmed) continue;
|
|
94
|
+
let obj;
|
|
95
|
+
try { obj = JSON.parse(trimmed); } catch (_) { continue; }
|
|
96
|
+
|
|
97
|
+
if (obj.message?.content) {
|
|
98
|
+
yield { type: 'content_block_delta', delta: { type: 'text_delta', text: obj.message.content } };
|
|
99
|
+
}
|
|
100
|
+
if (obj.done && !stopped) {
|
|
101
|
+
stopped = true;
|
|
102
|
+
yield { type: 'message_stop' };
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
} finally {
|
|
107
|
+
reader.releaseLock();
|
|
108
|
+
}
|
|
109
|
+
if (!stopped) yield { type: 'message_stop' };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
function buildAzureClient() {
|
|
113
|
+
let AzureOpenAI;
|
|
114
|
+
try {
|
|
115
|
+
({ AzureOpenAI } = require('openai'));
|
|
116
|
+
} catch (_) {
|
|
117
|
+
throw new Error(
|
|
118
|
+
'The "openai" package is required for Azure OpenAI.\n' +
|
|
119
|
+
'Run: npm install openai'
|
|
120
|
+
);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const apiKey = process.env.AZURE_OPENAI_API_KEY;
|
|
124
|
+
const endpoint = process.env.AZURE_OPENAI_ENDPOINT;
|
|
125
|
+
|
|
126
|
+
if (!apiKey) throw new Error('AZURE_OPENAI_API_KEY is not set.');
|
|
127
|
+
if (!endpoint) throw new Error('AZURE_OPENAI_ENDPOINT is not set. Example: https://<resource>.openai.azure.com');
|
|
128
|
+
|
|
129
|
+
return new AzureOpenAI({
|
|
130
|
+
apiKey,
|
|
131
|
+
endpoint,
|
|
132
|
+
apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-05-01-preview',
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
async function azureCreate({ model, maxTokens, system, messages, stream }) {
|
|
137
|
+
const client = buildAzureClient();
|
|
138
|
+
|
|
139
|
+
const azureMessages = [{ role: 'system', content: system }, ...messages];
|
|
140
|
+
|
|
141
|
+
const params = {
|
|
142
|
+
model,
|
|
143
|
+
messages: azureMessages,
|
|
144
|
+
max_tokens: maxTokens,
|
|
145
|
+
stream: Boolean(stream),
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
if (!stream) {
|
|
149
|
+
const response = await client.chat.completions.create(params);
|
|
150
|
+
return {
|
|
151
|
+
content: [{ text: response.choices[0]?.message?.content ?? '' }],
|
|
152
|
+
usage: {
|
|
153
|
+
input_tokens: response.usage?.prompt_tokens ?? 0,
|
|
154
|
+
output_tokens: response.usage?.completion_tokens ?? 0,
|
|
155
|
+
},
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
const azureStream = await client.chat.completions.create(params);
|
|
160
|
+
return azureStreamToEvents(azureStream);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async function* azureStreamToEvents(azureStream) {
|
|
164
|
+
let stopped = false;
|
|
165
|
+
|
|
166
|
+
for await (const chunk of azureStream) {
|
|
167
|
+
const delta = chunk.choices[0]?.delta?.content;
|
|
168
|
+
if (delta) {
|
|
169
|
+
yield { type: 'content_block_delta', delta: { type: 'text_delta', text: delta } };
|
|
170
|
+
}
|
|
171
|
+
if (chunk.choices[0]?.finish_reason && !stopped) {
|
|
172
|
+
stopped = true;
|
|
173
|
+
yield { type: 'message_stop' };
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
if (!stopped) yield { type: 'message_stop' };
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
const llmClient = {
|
|
180
|
+
provider: PROVIDER,
|
|
181
|
+
model: resolveModel(),
|
|
182
|
+
|
|
183
|
+
async createMessage({ maxTokens = 2000, system, messages, stream = false }) {
|
|
184
|
+
const model = this.model;
|
|
185
|
+
|
|
186
|
+
switch (PROVIDER) {
|
|
187
|
+
case 'anthropic':
|
|
188
|
+
return anthropicCreate({ model, maxTokens, system, messages, stream });
|
|
189
|
+
case 'ollama':
|
|
190
|
+
return ollamaCreate({ model, maxTokens, system, messages, stream });
|
|
191
|
+
case 'azure':
|
|
192
|
+
return azureCreate({ model, maxTokens, system, messages, stream });
|
|
193
|
+
default:
|
|
194
|
+
throw new Error(
|
|
195
|
+
`Unknown LLM_PROVIDER: "${PROVIDER}". ` +
|
|
196
|
+
'Supported values: "anthropic", "ollama", "azure".'
|
|
197
|
+
);
|
|
198
|
+
}
|
|
199
|
+
},
|
|
200
|
+
|
|
201
|
+
label() {
|
|
202
|
+
return `${PROVIDER}/${this.model}`;
|
|
203
|
+
},
|
|
204
|
+
};
|
|
205
|
+
|
|
206
|
+
module.exports = llmClient;
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
const winston = require('winston');
|
|
2
|
+
|
|
3
|
+
const logger = winston.createLogger({
|
|
4
|
+
level: process.env.LOG_LEVEL || 'info',
|
|
5
|
+
format: winston.format.combine(
|
|
6
|
+
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
|
7
|
+
winston.format.errors({ stack: true }),
|
|
8
|
+
winston.format.printf(({ timestamp, level, message, stack }) => {
|
|
9
|
+
return stack
|
|
10
|
+
? `[${timestamp}] ${level.toUpperCase()}: ${message}\n${stack}`
|
|
11
|
+
: `[${timestamp}] ${level.toUpperCase()}: ${message}`;
|
|
12
|
+
})
|
|
13
|
+
),
|
|
14
|
+
transports: [
|
|
15
|
+
new winston.transports.Console({
|
|
16
|
+
format: winston.format.combine(
|
|
17
|
+
winston.format.colorize(),
|
|
18
|
+
winston.format.printf(({ timestamp, level, message }) => {
|
|
19
|
+
return `[${timestamp}] ${level}: ${message}`;
|
|
20
|
+
})
|
|
21
|
+
),
|
|
22
|
+
}),
|
|
23
|
+
new winston.transports.File({ filename: 'logs/error.log', level: 'error' }),
|
|
24
|
+
new winston.transports.File({ filename: 'logs/combined.log' }),
|
|
25
|
+
],
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
module.exports = logger;
|