dev-mcp-server 0.0.2 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.env.example +23 -55
  2. package/README.md +609 -219
  3. package/cli.js +486 -160
  4. package/package.json +2 -2
  5. package/src/agents/BaseAgent.js +113 -0
  6. package/src/agents/dreamer.js +165 -0
  7. package/src/agents/improver.js +175 -0
  8. package/src/agents/specialists.js +202 -0
  9. package/src/agents/taskDecomposer.js +176 -0
  10. package/src/agents/teamCoordinator.js +153 -0
  11. package/src/api/routes/agents.js +172 -0
  12. package/src/api/routes/extras.js +115 -0
  13. package/src/api/routes/git.js +72 -0
  14. package/src/api/routes/ingest.js +60 -40
  15. package/src/api/routes/knowledge.js +59 -41
  16. package/src/api/routes/memory.js +41 -0
  17. package/src/api/routes/newRoutes.js +168 -0
  18. package/src/api/routes/pipelines.js +41 -0
  19. package/src/api/routes/planner.js +54 -0
  20. package/src/api/routes/query.js +24 -0
  21. package/src/api/routes/sessions.js +54 -0
  22. package/src/api/routes/tasks.js +67 -0
  23. package/src/api/routes/tools.js +85 -0
  24. package/src/api/routes/v5routes.js +196 -0
  25. package/src/api/server.js +133 -5
  26. package/src/context/compactor.js +151 -0
  27. package/src/context/contextEngineer.js +181 -0
  28. package/src/context/contextVisualizer.js +140 -0
  29. package/src/core/conversationEngine.js +231 -0
  30. package/src/core/indexer.js +169 -143
  31. package/src/core/ingester.js +141 -126
  32. package/src/core/queryEngine.js +286 -236
  33. package/src/cron/cronScheduler.js +260 -0
  34. package/src/dashboard/index.html +1181 -0
  35. package/src/lsp/symbolNavigator.js +220 -0
  36. package/src/memory/memoryManager.js +186 -0
  37. package/src/memory/teamMemory.js +111 -0
  38. package/src/messaging/messageBus.js +177 -0
  39. package/src/monitor/proactiveMonitor.js +337 -0
  40. package/src/pipelines/pipelineEngine.js +230 -0
  41. package/src/planner/plannerEngine.js +202 -0
  42. package/src/plugins/builtin/stats-plugin.js +29 -0
  43. package/src/plugins/pluginManager.js +144 -0
  44. package/src/prompts/promptEngineer.js +289 -0
  45. package/src/sessions/sessionManager.js +166 -0
  46. package/src/skills/skillsManager.js +263 -0
  47. package/src/storage/store.js +127 -105
  48. package/src/tasks/taskManager.js +151 -0
  49. package/src/tools/BashTool.js +154 -0
  50. package/src/tools/FileEditTool.js +280 -0
  51. package/src/tools/GitTool.js +212 -0
  52. package/src/tools/GrepTool.js +199 -0
  53. package/src/tools/registry.js +1380 -0
  54. package/src/utils/costTracker.js +69 -0
  55. package/src/utils/fileParser.js +176 -153
  56. package/src/utils/llmClient.js +355 -206
  57. package/src/watcher/fileWatcher.js +137 -0
  58. package/src/worktrees/worktreeManager.js +176 -0
@@ -1,206 +1,355 @@
1
- 'use strict';
2
-
3
-
4
- const PROVIDER = (process.env.LLM_PROVIDER || 'anthropic').toLowerCase();
5
-
6
- const DEFAULT_MODELS = {
7
- anthropic: 'claude-opus-4-5',
8
- ollama: 'llama3',
9
- azure: '',
10
- };
11
-
12
- function resolveModel() {
13
- if (PROVIDER === 'azure') {
14
- return (
15
- process.env.LLM_MODEL ||
16
- process.env.AZURE_OPENAI_DEPLOYMENT ||
17
- (() => { throw new Error('Azure OpenAI requires AZURE_OPENAI_DEPLOYMENT (or LLM_MODEL) to be set.'); })()
18
- );
19
- }
20
- return process.env.LLM_MODEL || DEFAULT_MODELS[PROVIDER] || DEFAULT_MODELS.anthropic;
21
- }
22
-
23
- function buildAnthropicClient() {
24
- const Anthropic = require('@anthropic-ai/sdk');
25
- if (!process.env.ANTHROPIC_API_KEY) {
26
- throw new Error('ANTHROPIC_API_KEY is not set. Add it to your .env file.');
27
- }
28
- return new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
29
- }
30
-
31
- async function anthropicCreate({ model, maxTokens, system, messages, stream }) {
32
- const client = buildAnthropicClient();
33
- return client.messages.create({ model, max_tokens: maxTokens, system, messages, stream });
34
- }
35
-
36
- function ollamaBaseUrl() {
37
- return (process.env.OLLAMA_BASE_URL || 'http://localhost:11434').replace(/\/$/, '');
38
- }
39
-
40
- async function ollamaCreate({ model, maxTokens, system, messages, stream }) {
41
- const url = `${ollamaBaseUrl()}/api/chat`;
42
- const ollamaMessages = [{ role: 'system', content: system }, ...messages];
43
-
44
- const body = JSON.stringify({
45
- model,
46
- messages: ollamaMessages,
47
- stream: Boolean(stream),
48
- options: { num_predict: maxTokens },
49
- });
50
-
51
- const res = await fetch(url, {
52
- method: 'POST',
53
- headers: { 'Content-Type': 'application/json' },
54
- body,
55
- });
56
-
57
- if (!res.ok) {
58
- const detail = await res.text().catch(() => '(no body)');
59
- throw new Error(`Ollama request failed [${res.status}]: ${detail}`);
60
- }
61
-
62
- if (!stream) {
63
- const data = await res.json();
64
- return {
65
- content: [{ text: data.message?.content ?? '' }],
66
- usage: {
67
- input_tokens: data.prompt_eval_count ?? 0,
68
- output_tokens: data.eval_count ?? 0,
69
- },
70
- };
71
- }
72
-
73
- return ollamaStream(res);
74
- }
75
-
76
- async function* ollamaStream(res) {
77
- const reader = res.body.getReader();
78
- const decoder = new TextDecoder();
79
- let buffer = '';
80
- let stopped = false;
81
-
82
- try {
83
- while (true) {
84
- const { done, value } = await reader.read();
85
- if (done) break;
86
-
87
- buffer += decoder.decode(value, { stream: true });
88
- const lines = buffer.split('\n');
89
- buffer = lines.pop();
90
-
91
- for (const line of lines) {
92
- const trimmed = line.trim();
93
- if (!trimmed) continue;
94
- let obj;
95
- try { obj = JSON.parse(trimmed); } catch (_) { continue; }
96
-
97
- if (obj.message?.content) {
98
- yield { type: 'content_block_delta', delta: { type: 'text_delta', text: obj.message.content } };
99
- }
100
- if (obj.done && !stopped) {
101
- stopped = true;
102
- yield { type: 'message_stop' };
103
- }
104
- }
105
- }
106
- } finally {
107
- reader.releaseLock();
108
- }
109
- if (!stopped) yield { type: 'message_stop' };
110
- }
111
-
112
- function buildAzureClient() {
113
- let AzureOpenAI;
114
- try {
115
- ({ AzureOpenAI } = require('openai'));
116
- } catch (_) {
117
- throw new Error(
118
- 'The "openai" package is required for Azure OpenAI.\n' +
119
- 'Run: npm install openai'
120
- );
121
- }
122
-
123
- const apiKey = process.env.AZURE_OPENAI_API_KEY;
124
- const endpoint = process.env.AZURE_OPENAI_ENDPOINT;
125
-
126
- if (!apiKey) throw new Error('AZURE_OPENAI_API_KEY is not set.');
127
- if (!endpoint) throw new Error('AZURE_OPENAI_ENDPOINT is not set. Example: https://<resource>.openai.azure.com');
128
-
129
- return new AzureOpenAI({
130
- apiKey,
131
- endpoint,
132
- apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-05-01-preview',
133
- });
134
- }
135
-
136
- async function azureCreate({ model, maxTokens, system, messages, stream }) {
137
- const client = buildAzureClient();
138
-
139
- const azureMessages = [{ role: 'system', content: system }, ...messages];
140
-
141
- const params = {
142
- model,
143
- messages: azureMessages,
144
- max_tokens: maxTokens,
145
- stream: Boolean(stream),
146
- };
147
-
148
- if (!stream) {
149
- const response = await client.chat.completions.create(params);
150
- return {
151
- content: [{ text: response.choices[0]?.message?.content ?? '' }],
152
- usage: {
153
- input_tokens: response.usage?.prompt_tokens ?? 0,
154
- output_tokens: response.usage?.completion_tokens ?? 0,
155
- },
156
- };
157
- }
158
-
159
- const azureStream = await client.chat.completions.create(params);
160
- return azureStreamToEvents(azureStream);
161
- }
162
-
163
- async function* azureStreamToEvents(azureStream) {
164
- let stopped = false;
165
-
166
- for await (const chunk of azureStream) {
167
- const delta = chunk.choices[0]?.delta?.content;
168
- if (delta) {
169
- yield { type: 'content_block_delta', delta: { type: 'text_delta', text: delta } };
170
- }
171
- if (chunk.choices[0]?.finish_reason && !stopped) {
172
- stopped = true;
173
- yield { type: 'message_stop' };
174
- }
175
- }
176
- if (!stopped) yield { type: 'message_stop' };
177
- }
178
-
179
- const llmClient = {
180
- provider: PROVIDER,
181
- model: resolveModel(),
182
-
183
- async createMessage({ maxTokens = 2000, system, messages, stream = false }) {
184
- const model = this.model;
185
-
186
- switch (PROVIDER) {
187
- case 'anthropic':
188
- return anthropicCreate({ model, maxTokens, system, messages, stream });
189
- case 'ollama':
190
- return ollamaCreate({ model, maxTokens, system, messages, stream });
191
- case 'azure':
192
- return azureCreate({ model, maxTokens, system, messages, stream });
193
- default:
194
- throw new Error(
195
- `Unknown LLM_PROVIDER: "${PROVIDER}". ` +
196
- 'Supported values: "anthropic", "ollama", "azure".'
197
- );
198
- }
199
- },
200
-
201
- label() {
202
- return `${PROVIDER}/${this.model}`;
203
- },
204
- };
205
-
206
- module.exports = llmClient;
1
+ 'use strict';
2
+ /**
3
+ * Universal LLM client. Reads LLM_PROVIDER from env and returns
4
+ * a unified interface regardless of the underlying provider.
5
+ *
6
+ * Supported providers:
7
+ * anthropic Anthropic Claude (default)
8
+ * azure — Azure OpenAI Service
9
+ * ollama — Local Ollama server
10
+ *
11
+ * Unified API (mirrors Anthropic's messages.create shape):
12
+ * await llm.chat({ model, system, messages, max_tokens, tools, stream })
13
+ * → { content: [{ type:'text', text }], usage: { input_tokens, output_tokens }, stop_reason }
14
+ *
15
+ * Model resolution:
16
+ * llm.model('fast') → haiku / gpt-4o-mini / mistral
17
+ * llm.model('smart') opus / gpt-4-turbo / llama3
18
+ * llm.model('default') → sonnet / gpt-4o / llama3
19
+ */
20
+
21
+ require('dotenv').config();
22
+
23
+ const PROVIDER = (process.env.LLM_PROVIDER || 'anthropic').toLowerCase();
24
+
25
+ // ── Model alias maps ───────────────────────────────────────────────────────────
26
+ const MODEL_ALIASES = {
27
+ anthropic: {
28
+ fast: 'claude-haiku-4-5-20251001',
29
+ default: 'claude-sonnet-4-5',
30
+ smart: 'claude-opus-4-5',
31
+ },
32
+ azure: {
33
+ fast: process.env.AZURE_OPENAI_DEPLOYMENT || 'gpt-4o-mini',
34
+ default: process.env.AZURE_OPENAI_DEPLOYMENT || 'gpt-4o',
35
+ smart: process.env.AZURE_OPENAI_DEPLOYMENT || 'gpt-4o',
36
+ },
37
+ ollama: {
38
+ fast: process.env.LLM_MODEL || 'llama3',
39
+ default: process.env.LLM_MODEL || 'llama3',
40
+ smart: process.env.LLM_MODEL || 'llama3',
41
+ },
42
+ };
43
+
44
+ // ── Pricing map (per million tokens, USD) ─────────────────────────────────────
45
+ const PRICING = {
46
+ 'claude-opus-4-5': { input: 15.0, output: 75.0 },
47
+ 'claude-sonnet-4-5': { input: 3.0, output: 15.0 },
48
+ 'claude-haiku-4-5-20251001': { input: 0.25, output: 1.25 },
49
+ 'claude-haiku-4-5': { input: 0.25, output: 1.25 },
50
+ 'gpt-4o': { input: 5.0, output: 15.0 },
51
+ 'gpt-4o-mini': { input: 0.15, output: 0.6 },
52
+ 'gpt-4-turbo': { input: 10.0, output: 30.0 },
53
+ // Ollama is free (local), track 0
54
+ default: { input: 0, output: 0 },
55
+ };
56
+
57
+ // ─────────────────────────────────────────────────────────────────────────────
58
+ // ANTHROPIC ADAPTER
59
+ // ─────────────────────────────────────────────────────────────────────────────
60
+ class AnthropicAdapter {
61
+ constructor() {
62
+ const Anthropic = require('@anthropic-ai/sdk');
63
+ this._client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
64
+ this.providerName = 'anthropic';
65
+ }
66
+
67
+ async chat({ model, system, messages, max_tokens = 2000, tools, stream = false }) {
68
+ const params = { model, max_tokens, messages };
69
+ if (system) params.system = system;
70
+ if (tools?.length) params.tools = tools;
71
+ if (stream) params.stream = true;
72
+
73
+ const response = await this._client.messages.create(params);
74
+
75
+ if (stream) return response; // pass the raw stream through
76
+
77
+ return {
78
+ content: response.content,
79
+ stop_reason: response.stop_reason,
80
+ usage: {
81
+ input_tokens: response.usage.input_tokens,
82
+ output_tokens: response.usage.output_tokens,
83
+ },
84
+ _raw: response,
85
+ };
86
+ }
87
+
88
+ supportsTools() { return true; }
89
+ supportsStreaming() { return true; }
90
+ }
91
+
92
+ // ─────────────────────────────────────────────────────────────────────────────
93
+ // AZURE OPENAI ADAPTER
94
+ // ─────────────────────────────────────────────────────────────────────────────
95
+ class AzureAdapter {
96
+ constructor() {
97
+ const { AzureOpenAI } = require('openai');
98
+ this._client = new AzureOpenAI({
99
+ endpoint: process.env.AZURE_OPENAI_ENDPOINT,
100
+ apiKey: process.env.AZURE_OPENAI_API_KEY,
101
+ apiVersion: process.env.AZURE_OPENAI_API_VERSION || '2024-05-01-preview',
102
+ deployment: process.env.AZURE_OPENAI_DEPLOYMENT,
103
+ });
104
+ this.providerName = 'azure';
105
+ }
106
+
107
+ async chat({ model, system, messages, max_tokens = 2000, tools, stream = false }) {
108
+ // Convert Anthropic message format → OpenAI format
109
+ const oaiMessages = this._convertMessages(system, messages);
110
+
111
+ const params = {
112
+ model: process.env.AZURE_OPENAI_DEPLOYMENT || model,
113
+ messages: oaiMessages,
114
+ max_tokens,
115
+ };
116
+
117
+ if (tools?.length) {
118
+ params.tools = tools.map(t => ({
119
+ type: 'function',
120
+ function: {
121
+ name: t.name,
122
+ description: t.description,
123
+ parameters: t.input_schema || {},
124
+ },
125
+ }));
126
+ }
127
+
128
+ if (stream) {
129
+ const s = await this._client.chat.completions.create({ ...params, stream: true });
130
+ return this._wrapAzureStream(s);
131
+ }
132
+
133
+ const response = await this._client.chat.completions.create(params);
134
+ return this._convertResponse(response);
135
+ }
136
+
137
+ _convertMessages(system, messages) {
138
+ const result = [];
139
+ if (system) result.push({ role: 'system', content: system });
140
+
141
+ for (const msg of messages) {
142
+ if (typeof msg.content === 'string') {
143
+ result.push({ role: msg.role, content: msg.content });
144
+ } else if (Array.isArray(msg.content)) {
145
+ // Handle Anthropic content blocks (text, tool_use, tool_result)
146
+ const textBlocks = msg.content.filter(b => b.type === 'text').map(b => b.text).join('\n');
147
+ const toolUseBlocks = msg.content.filter(b => b.type === 'tool_use');
148
+ const toolResultBlocks = msg.content.filter(b => b.type === 'tool_result');
149
+
150
+ if (toolResultBlocks.length) {
151
+ for (const tr of toolResultBlocks) {
152
+ result.push({ role: 'tool', tool_call_id: tr.tool_use_id, content: typeof tr.content === 'string' ? tr.content : JSON.stringify(tr.content) });
153
+ }
154
+ } else if (toolUseBlocks.length) {
155
+ result.push({
156
+ role: 'assistant',
157
+ content: textBlocks || null,
158
+ tool_calls: toolUseBlocks.map(tu => ({
159
+ id: tu.id, type: 'function',
160
+ function: { name: tu.name, arguments: JSON.stringify(tu.input || {}) },
161
+ })),
162
+ });
163
+ } else {
164
+ result.push({ role: msg.role, content: textBlocks });
165
+ }
166
+ }
167
+ }
168
+ return result;
169
+ }
170
+
171
+ _convertResponse(response) {
172
+ const choice = response.choices[0];
173
+ const content = [];
174
+
175
+ if (choice.message.content) {
176
+ content.push({ type: 'text', text: choice.message.content });
177
+ }
178
+
179
+ if (choice.message.tool_calls?.length) {
180
+ for (const tc of choice.message.tool_calls) {
181
+ content.push({
182
+ type: 'tool_use',
183
+ id: tc.id,
184
+ name: tc.function.name,
185
+ input: (() => { try { return JSON.parse(tc.function.arguments); } catch { return {}; } })(),
186
+ });
187
+ }
188
+ }
189
+
190
+ const stopMap = { stop: 'end_turn', tool_calls: 'tool_use', length: 'max_tokens' };
191
+
192
+ return {
193
+ content,
194
+ stop_reason: stopMap[choice.finish_reason] || 'end_turn',
195
+ usage: {
196
+ input_tokens: response.usage?.prompt_tokens || 0,
197
+ output_tokens: response.usage?.completion_tokens || 0,
198
+ },
199
+ _raw: response,
200
+ };
201
+ }
202
+
203
+ async *_wrapAzureStream(stream) {
204
+ for await (const chunk of stream) {
205
+ const delta = chunk.choices[0]?.delta;
206
+ if (delta?.content) {
207
+ yield { type: 'content_block_delta', delta: { type: 'text_delta', text: delta.content } };
208
+ }
209
+ if (chunk.choices[0]?.finish_reason) {
210
+ yield { type: 'message_stop' };
211
+ }
212
+ }
213
+ }
214
+
215
+ supportsTools() { return true; }
216
+ supportsStreaming() { return true; }
217
+ }
218
+
219
+ // ─────────────────────────────────────────────────────────────────────────────
220
+ // OLLAMA ADAPTER
221
+ // ─────────────────────────────────────────────────────────────────────────────
222
+ class OllamaAdapter {
223
+ constructor() {
224
+ const { OpenAI } = require('openai');
225
+ this._client = new OpenAI({
226
+ baseURL: (process.env.OLLAMA_BASE_URL || 'http://localhost:11434').replace(/\/$/, '') + '/v1',
227
+ apiKey: 'ollama', // required by openai SDK but not used by Ollama
228
+ });
229
+ this.providerName = 'ollama';
230
+ }
231
+
232
+ async chat({ model, system, messages, max_tokens = 2000, tools, stream = false }) {
233
+ const oaiMessages = [];
234
+ if (system) oaiMessages.push({ role: 'system', content: system });
235
+ for (const m of messages) {
236
+ const content = typeof m.content === 'string'
237
+ ? m.content
238
+ : m.content?.filter?.(b => b.type === 'text').map(b => b.text).join('\n') || '';
239
+ oaiMessages.push({ role: m.role === 'assistant' ? 'assistant' : 'user', content });
240
+ }
241
+
242
+ const params = { model: model || process.env.LLM_MODEL || 'llama3', messages: oaiMessages, max_tokens };
243
+ // Note: Ollama's tool support is model-dependent — only add if supported
244
+ if (tools?.length) params.tools = tools.map(t => ({ type: 'function', function: { name: t.name, description: t.description, parameters: t.input_schema || {} } }));
245
+
246
+ if (stream) {
247
+ const s = await this._client.chat.completions.create({ ...params, stream: true });
248
+ return this._wrapStream(s);
249
+ }
250
+
251
+ const response = await this._client.chat.completions.create(params);
252
+ const choice = response.choices[0];
253
+ return {
254
+ content: [{ type: 'text', text: choice.message.content || '' }],
255
+ stop_reason: choice.finish_reason === 'stop' ? 'end_turn' : 'end_turn',
256
+ usage: {
257
+ input_tokens: response.usage?.prompt_tokens || 0,
258
+ output_tokens: response.usage?.completion_tokens || 0,
259
+ },
260
+ _raw: response,
261
+ };
262
+ }
263
+
264
+ async *_wrapStream(stream) {
265
+ for await (const chunk of stream) {
266
+ const text = chunk.choices[0]?.delta?.content;
267
+ if (text) yield { type: 'content_block_delta', delta: { type: 'text_delta', text } };
268
+ if (chunk.choices[0]?.finish_reason) yield { type: 'message_stop' };
269
+ }
270
+ }
271
+
272
+ supportsTools() { return !!(process.env.OLLAMA_TOOLS === 'true'); }
273
+ supportsStreaming() { return true; }
274
+ }
275
+
276
+ // ─────────────────────────────────────────────────────────────────────────────
277
+ // FACTORY + SINGLETON
278
+ // ─────────────────────────────────────────────────────────────────────────────
279
+
280
+ function createAdapter() {
281
+ switch (PROVIDER) {
282
+ case 'azure': return new AzureAdapter();
283
+ case 'ollama': return new OllamaAdapter();
284
+ case 'anthropic':
285
+ default: return new AnthropicAdapter();
286
+ }
287
+ }
288
+
289
+ class LLMClient {
290
+ constructor() {
291
+ this._adapter = createAdapter();
292
+ this._aliases = MODEL_ALIASES[PROVIDER] || MODEL_ALIASES.anthropic;
293
+ this.provider = PROVIDER;
294
+ }
295
+
296
+ /**
297
+ * Resolve a model alias ('fast' | 'smart' | 'default') or return the
298
+ * string as-is if it's already a full model name.
299
+ */
300
+ model(alias) {
301
+ return this._aliases[alias] || alias || this._aliases.default;
302
+ }
303
+
304
+ /**
305
+ * Main chat method — unified interface across all providers.
306
+ *
307
+ * @param {object} opts
308
+ * model - full model name or alias ('fast'|'smart'|'default')
309
+ * system - system prompt string
310
+ * messages - Anthropic-format messages array
311
+ * max_tokens - max output tokens
312
+ * tools - Anthropic-format tool definitions (translated per provider)
313
+ * stream - return streaming response
314
+ */
315
+ async chat(opts) {
316
+ const model = this.model(opts.model);
317
+ return this._adapter.chat({ ...opts, model });
318
+ }
319
+
320
+ /**
321
+ * Shorthand: send a single user message, get text back.
322
+ */
323
+ async ask(prompt, opts = {}) {
324
+ const result = await this.chat({
325
+ model: opts.model || 'default',
326
+ system: opts.system,
327
+ messages: [{ role: 'user', content: prompt }],
328
+ max_tokens: opts.max_tokens || 1000,
329
+ });
330
+ return result.content.filter(b => b.type === 'text').map(b => b.text).join('\n');
331
+ }
332
+
333
+ /**
334
+ * Cost calculation — returns USD for given token counts.
335
+ */
336
+ costUsd(modelName, inputTokens, outputTokens) {
337
+ const resolved = this.model(modelName);
338
+ const p = PRICING[resolved] || PRICING.default;
339
+ return (inputTokens / 1_000_000) * p.input + (outputTokens / 1_000_000) * p.output;
340
+ }
341
+
342
+ supportsTools() { return this._adapter.supportsTools(); }
343
+ supportsStreaming() { return this._adapter.supportsStreaming(); }
344
+
345
+ getInfo() {
346
+ return {
347
+ provider: this.provider,
348
+ models: this._aliases,
349
+ supportsTools: this.supportsTools(),
350
+ supportsStreaming: this.supportsStreaming(),
351
+ };
352
+ }
353
+ }
354
+
355
+ module.exports = new LLMClient();