@juspay/neurolink 7.6.1 → 7.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +78 -3
  3. package/dist/cli/commands/config.d.ts +275 -3
  4. package/dist/cli/commands/config.js +121 -0
  5. package/dist/cli/commands/mcp.js +77 -28
  6. package/dist/cli/factories/commandFactory.js +359 -6
  7. package/dist/core/analytics.js +7 -27
  8. package/dist/core/baseProvider.js +43 -4
  9. package/dist/core/constants.d.ts +46 -0
  10. package/dist/core/constants.js +47 -0
  11. package/dist/core/dynamicModels.d.ts +16 -4
  12. package/dist/core/dynamicModels.js +130 -26
  13. package/dist/core/evaluation.js +5 -1
  14. package/dist/core/evaluationProviders.d.ts +6 -2
  15. package/dist/core/evaluationProviders.js +41 -125
  16. package/dist/core/factory.d.ts +5 -0
  17. package/dist/core/factory.js +62 -50
  18. package/dist/core/modelConfiguration.d.ts +246 -0
  19. package/dist/core/modelConfiguration.js +775 -0
  20. package/dist/core/types.d.ts +22 -3
  21. package/dist/core/types.js +5 -1
  22. package/dist/factories/providerRegistry.js +3 -3
  23. package/dist/index.d.ts +1 -1
  24. package/dist/index.js +1 -1
  25. package/dist/lib/core/analytics.js +7 -27
  26. package/dist/lib/core/baseProvider.js +43 -4
  27. package/dist/lib/core/constants.d.ts +46 -0
  28. package/dist/lib/core/constants.js +47 -0
  29. package/dist/lib/core/dynamicModels.d.ts +16 -4
  30. package/dist/lib/core/dynamicModels.js +130 -26
  31. package/dist/lib/core/evaluation.js +5 -1
  32. package/dist/lib/core/evaluationProviders.d.ts +6 -2
  33. package/dist/lib/core/evaluationProviders.js +41 -125
  34. package/dist/lib/core/factory.d.ts +5 -0
  35. package/dist/lib/core/factory.js +63 -50
  36. package/dist/lib/core/modelConfiguration.d.ts +246 -0
  37. package/dist/lib/core/modelConfiguration.js +775 -0
  38. package/dist/lib/core/types.d.ts +22 -3
  39. package/dist/lib/core/types.js +5 -1
  40. package/dist/lib/factories/providerRegistry.js +3 -3
  41. package/dist/lib/index.d.ts +1 -1
  42. package/dist/lib/index.js +1 -1
  43. package/dist/lib/mcp/factory.d.ts +5 -5
  44. package/dist/lib/mcp/factory.js +2 -2
  45. package/dist/lib/mcp/servers/utilities/utilityServer.d.ts +1 -1
  46. package/dist/lib/mcp/servers/utilities/utilityServer.js +1 -1
  47. package/dist/lib/mcp/toolRegistry.js +2 -2
  48. package/dist/lib/neurolink.d.ts +168 -12
  49. package/dist/lib/neurolink.js +685 -123
  50. package/dist/lib/providers/anthropic.js +52 -2
  51. package/dist/lib/providers/googleAiStudio.js +4 -0
  52. package/dist/lib/providers/googleVertex.d.ts +75 -9
  53. package/dist/lib/providers/googleVertex.js +365 -46
  54. package/dist/lib/providers/huggingFace.d.ts +52 -11
  55. package/dist/lib/providers/huggingFace.js +180 -42
  56. package/dist/lib/providers/litellm.d.ts +9 -9
  57. package/dist/lib/providers/litellm.js +103 -16
  58. package/dist/lib/providers/ollama.d.ts +52 -17
  59. package/dist/lib/providers/ollama.js +276 -68
  60. package/dist/lib/sdk/toolRegistration.d.ts +42 -0
  61. package/dist/lib/sdk/toolRegistration.js +269 -27
  62. package/dist/lib/telemetry/telemetryService.d.ts +6 -0
  63. package/dist/lib/telemetry/telemetryService.js +38 -3
  64. package/dist/lib/types/contextTypes.d.ts +75 -11
  65. package/dist/lib/types/contextTypes.js +227 -1
  66. package/dist/lib/types/domainTypes.d.ts +62 -0
  67. package/dist/lib/types/domainTypes.js +5 -0
  68. package/dist/lib/types/generateTypes.d.ts +52 -0
  69. package/dist/lib/types/index.d.ts +1 -0
  70. package/dist/lib/types/mcpTypes.d.ts +1 -1
  71. package/dist/lib/types/mcpTypes.js +1 -1
  72. package/dist/lib/types/streamTypes.d.ts +14 -0
  73. package/dist/lib/types/universalProviderOptions.d.ts +1 -1
  74. package/dist/lib/utils/errorHandling.d.ts +142 -0
  75. package/dist/lib/utils/errorHandling.js +316 -0
  76. package/dist/lib/utils/factoryProcessing.d.ts +74 -0
  77. package/dist/lib/utils/factoryProcessing.js +588 -0
  78. package/dist/lib/utils/optionsConversion.d.ts +54 -0
  79. package/dist/lib/utils/optionsConversion.js +126 -0
  80. package/dist/lib/utils/optionsUtils.d.ts +246 -0
  81. package/dist/lib/utils/optionsUtils.js +960 -0
  82. package/dist/lib/utils/providerConfig.js +6 -2
  83. package/dist/lib/utils/providerHealth.d.ts +107 -0
  84. package/dist/lib/utils/providerHealth.js +543 -0
  85. package/dist/lib/utils/providerUtils.d.ts +17 -0
  86. package/dist/lib/utils/providerUtils.js +271 -16
  87. package/dist/lib/utils/timeout.js +1 -1
  88. package/dist/lib/utils/tokenLimits.d.ts +33 -0
  89. package/dist/lib/utils/tokenLimits.js +118 -0
  90. package/dist/mcp/factory.d.ts +5 -5
  91. package/dist/mcp/factory.js +2 -2
  92. package/dist/mcp/servers/utilities/utilityServer.d.ts +1 -1
  93. package/dist/mcp/servers/utilities/utilityServer.js +1 -1
  94. package/dist/mcp/toolRegistry.js +2 -2
  95. package/dist/neurolink.d.ts +168 -12
  96. package/dist/neurolink.js +685 -123
  97. package/dist/providers/anthropic.js +52 -2
  98. package/dist/providers/googleAiStudio.js +4 -0
  99. package/dist/providers/googleVertex.d.ts +75 -9
  100. package/dist/providers/googleVertex.js +365 -46
  101. package/dist/providers/huggingFace.d.ts +52 -11
  102. package/dist/providers/huggingFace.js +181 -43
  103. package/dist/providers/litellm.d.ts +9 -9
  104. package/dist/providers/litellm.js +103 -16
  105. package/dist/providers/ollama.d.ts +52 -17
  106. package/dist/providers/ollama.js +276 -68
  107. package/dist/sdk/toolRegistration.d.ts +42 -0
  108. package/dist/sdk/toolRegistration.js +269 -27
  109. package/dist/telemetry/telemetryService.d.ts +6 -0
  110. package/dist/telemetry/telemetryService.js +38 -3
  111. package/dist/types/contextTypes.d.ts +75 -11
  112. package/dist/types/contextTypes.js +227 -2
  113. package/dist/types/domainTypes.d.ts +62 -0
  114. package/dist/types/domainTypes.js +5 -0
  115. package/dist/types/generateTypes.d.ts +52 -0
  116. package/dist/types/index.d.ts +1 -0
  117. package/dist/types/mcpTypes.d.ts +1 -1
  118. package/dist/types/mcpTypes.js +1 -1
  119. package/dist/types/streamTypes.d.ts +14 -0
  120. package/dist/types/universalProviderOptions.d.ts +1 -1
  121. package/dist/types/universalProviderOptions.js +0 -1
  122. package/dist/utils/errorHandling.d.ts +142 -0
  123. package/dist/utils/errorHandling.js +316 -0
  124. package/dist/utils/factoryProcessing.d.ts +74 -0
  125. package/dist/utils/factoryProcessing.js +588 -0
  126. package/dist/utils/optionsConversion.d.ts +54 -0
  127. package/dist/utils/optionsConversion.js +126 -0
  128. package/dist/utils/optionsUtils.d.ts +246 -0
  129. package/dist/utils/optionsUtils.js +960 -0
  130. package/dist/utils/providerConfig.js +6 -2
  131. package/dist/utils/providerHealth.d.ts +107 -0
  132. package/dist/utils/providerHealth.js +543 -0
  133. package/dist/utils/providerUtils.d.ts +17 -0
  134. package/dist/utils/providerUtils.js +271 -16
  135. package/dist/utils/timeout.js +1 -1
  136. package/dist/utils/tokenLimits.d.ts +33 -0
  137. package/dist/utils/tokenLimits.js +118 -0
  138. package/package.json +2 -2
@@ -2,6 +2,7 @@ import { BaseProvider } from "../core/baseProvider.js";
2
2
  import { logger } from "../utils/logger.js";
3
3
  import { TimeoutError } from "../utils/timeout.js";
4
4
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
+ import { modelConfig } from "../core/modelConfiguration.js";
5
6
  // Model version constants (configurable via environment)
6
7
  const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
7
8
  const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
@@ -246,49 +247,47 @@ export class OllamaProvider extends BaseProvider {
246
247
  return this.ollamaModel;
247
248
  }
248
249
  /**
249
- * Ollama tool/function calling support is currently disabled due to integration issues.
250
+ * Ollama Tool Calling Support (Enhanced 2025)
250
251
  *
251
- * **Current Issues:**
252
- * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
253
- * with BaseProvider's tool calling mechanism
254
- * 2. Ollama models require specific prompt formatting for function calls that differs
255
- * from the standardized AI SDK format
256
- * 3. Tool response parsing and execution flow needs custom implementation
252
+ * Uses configurable model list from ModelConfiguration instead of hardcoded values.
253
+ * Tool-capable models can be configured via OLLAMA_TOOL_CAPABLE_MODELS environment variable.
257
254
  *
258
- * **What's needed to enable tool support:**
259
- * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
260
- * - Implement Ollama-specific tool calling prompt templates
261
- * - Add proper response parsing for Ollama's function call format
262
- * - Test with models that support function calling (llama3.1, mistral, etc.)
255
+ * **Configuration Options:**
256
+ * - Environment variable: OLLAMA_TOOL_CAPABLE_MODELS (comma-separated list)
257
+ * - Configuration file: providers.ollama.modelBehavior.toolCapableModels
258
+ * - Fallback: Default list of known tool-capable models
263
259
  *
264
- * **Tracking:**
265
- * - See BaseProvider tool integration patterns in other providers
266
- * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
267
- * - Track AI SDK updates for better Ollama integration
260
+ * **Implementation Features:**
261
+ * - Direct Ollama API integration (/v1/chat/completions)
262
+ * - Automatic tool schema conversion to Ollama format
263
+ * - Streaming tool calls with incremental response parsing
264
+ * - Model compatibility validation and fallback handling
268
265
  *
269
- * @returns false to disable tools by default
266
+ * @returns true for supported models, false for unsupported models
270
267
  */
271
268
  supportsTools() {
272
- // IMPLEMENTATION STATUS (2025): Ollama function calling actively evolving
273
- //
274
- // Current State:
275
- // - Function calling added in Ollama 2024, improving in 2025
276
- // - Requires compatible models (Llama 3.1+, Code Llama variants)
277
- // - AI SDK integration needs custom adapter for Ollama's tool format
278
- //
279
- // Technical Requirements:
280
- // 1. Replace AI SDK with direct Ollama API tool calls
281
- // 2. Implement Ollama-specific tool schema conversion
282
- // 3. Add function response parsing from Ollama's JSON format
283
- // 4. Handle streaming tool calls with incremental parsing
284
- // 5. Validate model compatibility before enabling tools
285
- //
286
- // Implementation Path:
287
- // - Use Ollama's chat API with 'tools' parameter
288
- // - Parse tool_calls from response.message.tool_calls
289
- // - Execute functions and return results to conversation
290
- //
291
- // Until Ollama-specific implementation, tools disabled for compatibility
269
+ const modelName = this.modelName.toLowerCase();
270
+ // Get tool-capable models from configuration
271
+ const ollamaConfig = modelConfig.getProviderConfig("ollama");
272
+ const toolCapableModels = ollamaConfig?.modelBehavior?.toolCapableModels || [];
273
+ // Check if current model matches any tool-capable model patterns
274
+ const isToolCapable = toolCapableModels.some((capableModel) => modelName.includes(capableModel));
275
+ if (isToolCapable) {
276
+ logger.debug("Ollama tool calling enabled", {
277
+ model: this.modelName,
278
+ reason: "Model supports function calling",
279
+ baseUrl: this.baseUrl,
280
+ configuredModels: toolCapableModels.length,
281
+ });
282
+ return true;
283
+ }
284
+ // Log why tools are disabled for transparency
285
+ logger.debug("Ollama tool calling disabled", {
286
+ model: this.modelName,
287
+ reason: "Model not in tool-capable list",
288
+ suggestion: "Consider using llama3.1:8b-instruct, mistral:7b-instruct, or hermes3:8b for tool calling",
289
+ availableToolModels: toolCapableModels.slice(0, 3), // Show first 3 for brevity
290
+ });
292
291
  return false;
293
292
  }
294
293
  // executeGenerate removed - BaseProvider handles all generation with tools
@@ -296,43 +295,216 @@ export class OllamaProvider extends BaseProvider {
296
295
  try {
297
296
  this.validateStreamOptions(options);
298
297
  await this.checkOllamaHealth();
299
- // Direct HTTP streaming implementation for better compatibility
300
- const response = await fetch(`${this.baseUrl}/api/generate`, {
301
- method: "POST",
302
- headers: { "Content-Type": "application/json" },
303
- body: JSON.stringify({
304
- model: this.modelName || FALLBACK_OLLAMA_MODEL,
305
- prompt: options.input.text,
306
- system: options.systemPrompt,
307
- stream: true,
308
- options: {
309
- temperature: options.temperature,
310
- num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
311
- },
312
- }),
313
- signal: createAbortSignalWithTimeout(this.timeout),
314
- });
315
- if (!response.ok) {
316
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
298
+ // Check if tools are supported and provided
299
+ const modelSupportsTools = this.supportsTools();
300
+ const hasTools = options.tools && Object.keys(options.tools).length > 0;
301
+ if (modelSupportsTools && hasTools) {
302
+ // Use chat API with tools for tool-capable models
303
+ return this.executeStreamWithTools(options, analysisSchema);
304
+ }
305
+ else {
306
+ // Use generate API for non-tool scenarios
307
+ return this.executeStreamWithoutTools(options, analysisSchema);
317
308
  }
318
- // Transform to async generator to match other providers
319
- const self = this;
320
- const transformedStream = async function* () {
321
- const generator = self.createOllamaStream(response);
322
- for await (const chunk of generator) {
323
- yield chunk;
324
- }
325
- };
326
- return {
327
- stream: transformedStream(),
328
- provider: this.providerName,
329
- model: this.modelName,
330
- };
331
309
  }
332
310
  catch (error) {
333
311
  throw this.handleProviderError(error);
334
312
  }
335
313
  }
314
+ /**
315
+ * Execute streaming with Ollama's function calling support
316
+ * Uses the /v1/chat/completions endpoint with tools parameter
317
+ */
318
+ async executeStreamWithTools(options, analysisSchema) {
319
+ // Convert tools to Ollama format
320
+ const ollamaTools = this.convertToolsToOllamaFormat(options.tools);
321
+ // Prepare messages in Ollama chat format
322
+ const messages = [
323
+ ...(options.systemPrompt
324
+ ? [{ role: "system", content: options.systemPrompt }]
325
+ : []),
326
+ { role: "user", content: options.input.text },
327
+ ];
328
+ const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
329
+ method: "POST",
330
+ headers: { "Content-Type": "application/json" },
331
+ body: JSON.stringify({
332
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
333
+ messages,
334
+ tools: ollamaTools,
335
+ tool_choice: "auto",
336
+ stream: true,
337
+ temperature: options.temperature,
338
+ max_tokens: options.maxTokens || DEFAULT_MAX_TOKENS,
339
+ }),
340
+ signal: createAbortSignalWithTimeout(this.timeout),
341
+ });
342
+ if (!response.ok) {
343
+ // Fallback to non-tool mode if chat API fails
344
+ logger.warn("Ollama chat API failed, falling back to generate API", {
345
+ status: response.status,
346
+ statusText: response.statusText,
347
+ });
348
+ return this.executeStreamWithoutTools(options, analysisSchema);
349
+ }
350
+ // Transform to async generator with tool call handling
351
+ const self = this;
352
+ const transformedStream = async function* () {
353
+ const generator = self.createOllamaChatStream(response, options.tools);
354
+ for await (const chunk of generator) {
355
+ yield chunk;
356
+ }
357
+ };
358
+ return {
359
+ stream: transformedStream(),
360
+ provider: self.providerName,
361
+ model: self.modelName,
362
+ };
363
+ }
364
+ /**
365
+ * Execute streaming without tools using the generate API
366
+ * Fallback for non-tool scenarios or when chat API is unavailable
367
+ */
368
+ async executeStreamWithoutTools(options, analysisSchema) {
369
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
370
+ method: "POST",
371
+ headers: { "Content-Type": "application/json" },
372
+ body: JSON.stringify({
373
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
374
+ prompt: options.input.text,
375
+ system: options.systemPrompt,
376
+ stream: true,
377
+ options: {
378
+ temperature: options.temperature,
379
+ num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
380
+ },
381
+ }),
382
+ signal: createAbortSignalWithTimeout(this.timeout),
383
+ });
384
+ if (!response.ok) {
385
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
386
+ }
387
+ // Transform to async generator to match other providers
388
+ const self = this;
389
+ const transformedStream = async function* () {
390
+ const generator = self.createOllamaStream(response);
391
+ for await (const chunk of generator) {
392
+ yield chunk;
393
+ }
394
+ };
395
+ return {
396
+ stream: transformedStream(),
397
+ provider: this.providerName,
398
+ model: this.modelName,
399
+ };
400
+ }
401
+ /**
402
+ * Convert AI SDK tools format to Ollama's function calling format
403
+ */
404
+ convertToolsToOllamaFormat(tools) {
405
+ if (!tools || typeof tools !== "object") {
406
+ return [];
407
+ }
408
+ const toolsArray = Array.isArray(tools) ? tools : Object.values(tools);
409
+ return toolsArray.map((tool) => ({
410
+ type: "function",
411
+ function: {
412
+ name: tool.name || tool.function?.name,
413
+ description: tool.description || tool.function?.description,
414
+ parameters: tool.parameters ||
415
+ tool.function?.parameters || {
416
+ type: "object",
417
+ properties: {},
418
+ required: [],
419
+ },
420
+ },
421
+ }));
422
+ }
423
+ /**
424
+ * Create stream generator for Ollama chat API with tool call support
425
+ */
426
+ async *createOllamaChatStream(response, tools) {
427
+ const reader = response.body?.getReader();
428
+ if (!reader) {
429
+ throw new Error("No response body");
430
+ }
431
+ const decoder = new TextDecoder();
432
+ let buffer = "";
433
+ try {
434
+ while (true) {
435
+ const { done, value } = await reader.read();
436
+ if (done) {
437
+ break;
438
+ }
439
+ buffer += decoder.decode(value, { stream: true });
440
+ const lines = buffer.split("\n");
441
+ buffer = lines.pop() || "";
442
+ for (const line of lines) {
443
+ if (line.trim() && line.startsWith("data: ")) {
444
+ const dataLine = line.slice(6); // Remove "data: " prefix
445
+ if (dataLine === "[DONE]") {
446
+ return;
447
+ }
448
+ try {
449
+ const data = JSON.parse(dataLine);
450
+ const delta = data.choices?.[0]?.delta;
451
+ if (delta?.content) {
452
+ yield { content: delta.content };
453
+ }
454
+ if (delta?.tool_calls) {
455
+ // Handle tool calls - for now, we'll include them as content
456
+ // Future enhancement: Execute tools and return results
457
+ const toolCallDescription = this.formatToolCallForDisplay(delta.tool_calls);
458
+ if (toolCallDescription) {
459
+ yield { content: toolCallDescription };
460
+ }
461
+ }
462
+ if (data.choices?.[0]?.finish_reason) {
463
+ return;
464
+ }
465
+ }
466
+ catch (error) {
467
+ // Ignore JSON parse errors for incomplete chunks
468
+ }
469
+ }
470
+ }
471
+ }
472
+ }
473
+ finally {
474
+ reader.releaseLock();
475
+ }
476
+ }
477
+ /**
478
+ * Format tool calls for display when tools aren't executed directly
479
+ */
480
+ formatToolCallForDisplay(toolCalls) {
481
+ if (!toolCalls || toolCalls.length === 0) {
482
+ return "";
483
+ }
484
+ const descriptions = toolCalls.map((call) => {
485
+ const functionName = call.function?.name || "unknown_function";
486
+ let args = {};
487
+ if (call.function?.arguments) {
488
+ try {
489
+ args = JSON.parse(call.function.arguments);
490
+ }
491
+ catch (error) {
492
+ // If arguments are malformed, preserve for debugging while marking as invalid
493
+ logger.warn?.("Malformed tool call arguments: " + call.function.arguments);
494
+ args = {
495
+ _malformed: true,
496
+ _originalArguments: call.function.arguments,
497
+ _error: error instanceof Error ? error.message : String(error),
498
+ };
499
+ }
500
+ }
501
+ return `\n[Tool Call: ${functionName}(${JSON.stringify(args)})]`;
502
+ });
503
+ return descriptions.join("");
504
+ }
505
+ /**
506
+ * Create stream generator for Ollama generate API (non-tool mode)
507
+ */
336
508
  async *createOllamaStream(response) {
337
509
  const reader = response.body?.getReader();
338
510
  if (!reader) {
@@ -448,5 +620,41 @@ export class OllamaProvider extends BaseProvider {
448
620
  const models = await this.getAvailableModels();
449
621
  return models.includes(modelName);
450
622
  }
623
+ /**
624
+ * Get recommendations for tool-calling capable Ollama models
625
+ * Provides guidance for users who want to use function calling locally
626
+ */
627
+ static getToolCallingRecommendations() {
628
+ return {
629
+ recommended: [
630
+ "llama3.1:8b-instruct",
631
+ "mistral:7b-instruct-v0.3",
632
+ "hermes3:8b-llama3.1",
633
+ "codellama:34b-instruct",
634
+ "firefunction-v2:70b",
635
+ ],
636
+ performance: {
637
+ "llama3.1:8b-instruct": { speed: 3, quality: 3, size: "4.6GB" },
638
+ "mistral:7b-instruct-v0.3": { speed: 3, quality: 2, size: "4.1GB" },
639
+ "hermes3:8b-llama3.1": { speed: 3, quality: 3, size: "4.6GB" },
640
+ "codellama:34b-instruct": { speed: 1, quality: 3, size: "19GB" },
641
+ "firefunction-v2:70b": { speed: 1, quality: 3, size: "40GB" },
642
+ },
643
+ notes: {
644
+ "llama3.1:8b-instruct": "Best balance of speed, quality, and tool calling capability",
645
+ "mistral:7b-instruct-v0.3": "Lightweight with reliable function calling",
646
+ "hermes3:8b-llama3.1": "Specialized for tool execution and reasoning",
647
+ "codellama:34b-instruct": "Excellent for code-related tool calling, requires more resources",
648
+ "firefunction-v2:70b": "Optimized specifically for function calling, requires high-end hardware",
649
+ },
650
+ installation: {
651
+ "llama3.1:8b-instruct": "ollama pull llama3.1:8b-instruct",
652
+ "mistral:7b-instruct-v0.3": "ollama pull mistral:7b-instruct-v0.3",
653
+ "hermes3:8b-llama3.1": "ollama pull hermes3:8b-llama3.1",
654
+ "codellama:34b-instruct": "ollama pull codellama:34b-instruct",
655
+ "firefunction-v2:70b": "ollama pull firefunction-v2:70b",
656
+ },
657
+ };
658
+ }
451
659
  }
452
660
  export default OllamaProvider;
@@ -8,6 +8,18 @@ import { logger } from "../utils/logger.js";
8
8
  import type { InMemoryMCPServerConfig, InMemoryToolInfo } from "../types/mcpTypes.js";
9
9
  import type { ToolArgs, ToolContext as CoreToolContext, ToolResult, SimpleTool as CoreSimpleTool } from "../types/tools.js";
10
10
  import type { JsonValue } from "../types/common.js";
11
+ /**
12
+ * Enhanced validation configuration
13
+ */
14
+ declare const VALIDATION_CONFIG: {
15
+ readonly NAME_MIN_LENGTH: 2;
16
+ readonly NAME_MAX_LENGTH: 50;
17
+ readonly DESCRIPTION_MIN_LENGTH: 10;
18
+ readonly DESCRIPTION_MAX_LENGTH: number;
19
+ readonly RESERVED_NAMES: Set<string>;
20
+ readonly RECOMMENDED_PATTERNS: readonly ["get_data", "fetch_info", "calculate_value", "send_message", "create_item", "update_record", "delete_file", "validate_input"];
21
+ readonly COMPILED_PATTERN_REGEXES: RegExp[];
22
+ };
11
23
  /**
12
24
  * Context provided to tools during execution
13
25
  * Extends the core ToolContext with SDK-specific features
@@ -86,6 +98,13 @@ export declare function createMCPServerFromTools(serverId: string, tools: Record
86
98
  * Helper to create a tool with type safety
87
99
  */
88
100
  export declare function createTool<TParams = ToolArgs>(config: SimpleTool): SimpleTool;
101
+ /**
102
+ * Helper to create a validated tool with suggested improvements
103
+ */
104
+ export declare function createValidatedTool(name: string, config: SimpleTool, options?: {
105
+ strict?: boolean;
106
+ suggestions?: boolean;
107
+ }): SimpleTool;
89
108
  /**
90
109
  * Helper to create a tool with typed parameters
91
110
  */
@@ -97,3 +116,26 @@ export declare function createTypedTool<TParams extends z.ZodSchema>(config: Omi
97
116
  * Validate tool configuration with detailed error messages
98
117
  */
99
118
  export declare function validateTool(name: string, tool: SimpleTool): void;
119
+ /**
120
+ * Utility to validate multiple tools at once
121
+ */
122
+ export declare function validateTools(tools: Record<string, SimpleTool>): {
123
+ valid: string[];
124
+ invalid: Array<{
125
+ name: string;
126
+ error: string;
127
+ }>;
128
+ };
129
+ /**
130
+ * Get validation configuration for external inspection
131
+ */
132
+ export declare function getValidationConfig(): typeof VALIDATION_CONFIG;
133
+ /**
134
+ * Check if a tool name is available (not reserved)
135
+ */
136
+ export declare function isToolNameAvailable(name: string): boolean;
137
+ /**
138
+ * Suggest alternative tool names if the provided name is invalid
139
+ */
140
+ export declare function suggestToolNames(baseName: string): string[];
141
+ export {};