@juspay/neurolink 7.6.0 → 7.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/CHANGELOG.md +14 -2
  2. package/README.md +79 -4
  3. package/dist/cli/commands/config.d.ts +275 -3
  4. package/dist/cli/commands/config.js +121 -0
  5. package/dist/cli/commands/mcp.js +77 -28
  6. package/dist/cli/factories/commandFactory.js +359 -6
  7. package/dist/core/analytics.js +7 -27
  8. package/dist/core/baseProvider.js +43 -4
  9. package/dist/core/constants.d.ts +46 -0
  10. package/dist/core/constants.js +47 -0
  11. package/dist/core/dynamicModels.d.ts +16 -4
  12. package/dist/core/dynamicModels.js +130 -26
  13. package/dist/core/evaluation.js +5 -1
  14. package/dist/core/evaluationProviders.d.ts +6 -2
  15. package/dist/core/evaluationProviders.js +41 -125
  16. package/dist/core/factory.d.ts +5 -0
  17. package/dist/core/factory.js +62 -50
  18. package/dist/core/modelConfiguration.d.ts +246 -0
  19. package/dist/core/modelConfiguration.js +775 -0
  20. package/dist/core/types.d.ts +22 -3
  21. package/dist/core/types.js +5 -1
  22. package/dist/factories/providerRegistry.js +3 -3
  23. package/dist/index.d.ts +1 -1
  24. package/dist/index.js +1 -1
  25. package/dist/lib/core/analytics.js +7 -27
  26. package/dist/lib/core/baseProvider.js +43 -4
  27. package/dist/lib/core/constants.d.ts +46 -0
  28. package/dist/lib/core/constants.js +47 -0
  29. package/dist/lib/core/dynamicModels.d.ts +16 -4
  30. package/dist/lib/core/dynamicModels.js +130 -26
  31. package/dist/lib/core/evaluation.js +5 -1
  32. package/dist/lib/core/evaluationProviders.d.ts +6 -2
  33. package/dist/lib/core/evaluationProviders.js +41 -125
  34. package/dist/lib/core/factory.d.ts +5 -0
  35. package/dist/lib/core/factory.js +63 -50
  36. package/dist/lib/core/modelConfiguration.d.ts +246 -0
  37. package/dist/lib/core/modelConfiguration.js +775 -0
  38. package/dist/lib/core/types.d.ts +22 -3
  39. package/dist/lib/core/types.js +5 -1
  40. package/dist/lib/factories/providerRegistry.js +3 -3
  41. package/dist/lib/index.d.ts +1 -1
  42. package/dist/lib/index.js +1 -1
  43. package/dist/lib/mcp/factory.d.ts +5 -5
  44. package/dist/lib/mcp/factory.js +2 -2
  45. package/dist/lib/mcp/servers/utilities/utilityServer.d.ts +1 -1
  46. package/dist/lib/mcp/servers/utilities/utilityServer.js +1 -1
  47. package/dist/lib/mcp/toolRegistry.js +2 -2
  48. package/dist/lib/neurolink.d.ts +168 -12
  49. package/dist/lib/neurolink.js +685 -123
  50. package/dist/lib/providers/anthropic.js +52 -2
  51. package/dist/lib/providers/googleAiStudio.js +4 -0
  52. package/dist/lib/providers/googleVertex.d.ts +75 -9
  53. package/dist/lib/providers/googleVertex.js +365 -46
  54. package/dist/lib/providers/huggingFace.d.ts +52 -11
  55. package/dist/lib/providers/huggingFace.js +180 -42
  56. package/dist/lib/providers/litellm.d.ts +9 -9
  57. package/dist/lib/providers/litellm.js +103 -16
  58. package/dist/lib/providers/ollama.d.ts +52 -17
  59. package/dist/lib/providers/ollama.js +276 -68
  60. package/dist/lib/sdk/toolRegistration.d.ts +42 -0
  61. package/dist/lib/sdk/toolRegistration.js +269 -27
  62. package/dist/lib/telemetry/telemetryService.d.ts +6 -0
  63. package/dist/lib/telemetry/telemetryService.js +38 -3
  64. package/dist/lib/types/contextTypes.d.ts +75 -11
  65. package/dist/lib/types/contextTypes.js +227 -1
  66. package/dist/lib/types/domainTypes.d.ts +62 -0
  67. package/dist/lib/types/domainTypes.js +5 -0
  68. package/dist/lib/types/generateTypes.d.ts +52 -0
  69. package/dist/lib/types/index.d.ts +1 -0
  70. package/dist/lib/types/mcpTypes.d.ts +1 -1
  71. package/dist/lib/types/mcpTypes.js +1 -1
  72. package/dist/lib/types/streamTypes.d.ts +14 -0
  73. package/dist/lib/types/universalProviderOptions.d.ts +1 -1
  74. package/dist/lib/utils/errorHandling.d.ts +142 -0
  75. package/dist/lib/utils/errorHandling.js +316 -0
  76. package/dist/lib/utils/factoryProcessing.d.ts +74 -0
  77. package/dist/lib/utils/factoryProcessing.js +588 -0
  78. package/dist/lib/utils/optionsConversion.d.ts +54 -0
  79. package/dist/lib/utils/optionsConversion.js +126 -0
  80. package/dist/lib/utils/optionsUtils.d.ts +246 -0
  81. package/dist/lib/utils/optionsUtils.js +960 -0
  82. package/dist/lib/utils/providerHealth.d.ts +107 -0
  83. package/dist/lib/utils/providerHealth.js +507 -0
  84. package/dist/lib/utils/providerUtils.d.ts +17 -0
  85. package/dist/lib/utils/providerUtils.js +271 -16
  86. package/dist/lib/utils/timeout.js +1 -1
  87. package/dist/lib/utils/tokenLimits.d.ts +33 -0
  88. package/dist/lib/utils/tokenLimits.js +118 -0
  89. package/dist/mcp/factory.d.ts +5 -5
  90. package/dist/mcp/factory.js +2 -2
  91. package/dist/mcp/servers/utilities/utilityServer.d.ts +1 -1
  92. package/dist/mcp/servers/utilities/utilityServer.js +1 -1
  93. package/dist/mcp/toolRegistry.js +2 -2
  94. package/dist/neurolink.d.ts +168 -12
  95. package/dist/neurolink.js +685 -123
  96. package/dist/providers/anthropic.js +52 -2
  97. package/dist/providers/googleAiStudio.js +4 -0
  98. package/dist/providers/googleVertex.d.ts +75 -9
  99. package/dist/providers/googleVertex.js +365 -46
  100. package/dist/providers/huggingFace.d.ts +52 -11
  101. package/dist/providers/huggingFace.js +181 -43
  102. package/dist/providers/litellm.d.ts +9 -9
  103. package/dist/providers/litellm.js +103 -16
  104. package/dist/providers/ollama.d.ts +52 -17
  105. package/dist/providers/ollama.js +276 -68
  106. package/dist/sdk/toolRegistration.d.ts +42 -0
  107. package/dist/sdk/toolRegistration.js +269 -27
  108. package/dist/telemetry/telemetryService.d.ts +6 -0
  109. package/dist/telemetry/telemetryService.js +38 -3
  110. package/dist/types/contextTypes.d.ts +75 -11
  111. package/dist/types/contextTypes.js +227 -2
  112. package/dist/types/domainTypes.d.ts +62 -0
  113. package/dist/types/domainTypes.js +5 -0
  114. package/dist/types/generateTypes.d.ts +52 -0
  115. package/dist/types/index.d.ts +1 -0
  116. package/dist/types/mcpTypes.d.ts +1 -1
  117. package/dist/types/mcpTypes.js +1 -1
  118. package/dist/types/streamTypes.d.ts +14 -0
  119. package/dist/types/universalProviderOptions.d.ts +1 -1
  120. package/dist/types/universalProviderOptions.js +0 -1
  121. package/dist/utils/errorHandling.d.ts +142 -0
  122. package/dist/utils/errorHandling.js +316 -0
  123. package/dist/utils/factoryProcessing.d.ts +74 -0
  124. package/dist/utils/factoryProcessing.js +588 -0
  125. package/dist/utils/optionsConversion.d.ts +54 -0
  126. package/dist/utils/optionsConversion.js +126 -0
  127. package/dist/utils/optionsUtils.d.ts +246 -0
  128. package/dist/utils/optionsUtils.js +960 -0
  129. package/dist/utils/providerHealth.d.ts +107 -0
  130. package/dist/utils/providerHealth.js +507 -0
  131. package/dist/utils/providerUtils.d.ts +17 -0
  132. package/dist/utils/providerUtils.js +271 -16
  133. package/dist/utils/timeout.js +1 -1
  134. package/dist/utils/tokenLimits.d.ts +33 -0
  135. package/dist/utils/tokenLimits.js +118 -0
  136. package/package.json +2 -2
@@ -3,6 +3,7 @@ import { BaseProvider } from "../core/baseProvider.js";
3
3
  import { logger } from "../utils/logger.js";
4
4
  import { getDefaultTimeout, TimeoutError } from "../utils/timeout.js";
5
5
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
+ import { modelConfig } from "../core/modelConfiguration.js";
6
7
  // Model version constants (configurable via environment)
7
8
  const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
8
9
  const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
@@ -247,49 +248,47 @@ export class OllamaProvider extends BaseProvider {
247
248
  return this.ollamaModel;
248
249
  }
249
250
  /**
250
- * Ollama tool/function calling support is currently disabled due to integration issues.
251
+ * Ollama Tool Calling Support (Enhanced 2025)
251
252
  *
252
- * **Current Issues:**
253
- * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
254
- * with BaseProvider's tool calling mechanism
255
- * 2. Ollama models require specific prompt formatting for function calls that differs
256
- * from the standardized AI SDK format
257
- * 3. Tool response parsing and execution flow needs custom implementation
253
+ * Uses configurable model list from ModelConfiguration instead of hardcoded values.
254
+ * Tool-capable models can be configured via OLLAMA_TOOL_CAPABLE_MODELS environment variable.
258
255
  *
259
- * **What's needed to enable tool support:**
260
- * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
261
- * - Implement Ollama-specific tool calling prompt templates
262
- * - Add proper response parsing for Ollama's function call format
263
- * - Test with models that support function calling (llama3.1, mistral, etc.)
256
+ * **Configuration Options:**
257
+ * - Environment variable: OLLAMA_TOOL_CAPABLE_MODELS (comma-separated list)
258
+ * - Configuration file: providers.ollama.modelBehavior.toolCapableModels
259
+ * - Fallback: Default list of known tool-capable models
264
260
  *
265
- * **Tracking:**
266
- * - See BaseProvider tool integration patterns in other providers
267
- * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
268
- * - Track AI SDK updates for better Ollama integration
261
+ * **Implementation Features:**
262
+ * - Direct Ollama API integration (/v1/chat/completions)
263
+ * - Automatic tool schema conversion to Ollama format
264
+ * - Streaming tool calls with incremental response parsing
265
+ * - Model compatibility validation and fallback handling
269
266
  *
270
- * @returns false to disable tools by default
267
+ * @returns true for supported models, false for unsupported models
271
268
  */
272
269
  supportsTools() {
273
- // IMPLEMENTATION STATUS (2025): Ollama function calling actively evolving
274
- //
275
- // Current State:
276
- // - Function calling added in Ollama 2024, improving in 2025
277
- // - Requires compatible models (Llama 3.1+, Code Llama variants)
278
- // - AI SDK integration needs custom adapter for Ollama's tool format
279
- //
280
- // Technical Requirements:
281
- // 1. Replace AI SDK with direct Ollama API tool calls
282
- // 2. Implement Ollama-specific tool schema conversion
283
- // 3. Add function response parsing from Ollama's JSON format
284
- // 4. Handle streaming tool calls with incremental parsing
285
- // 5. Validate model compatibility before enabling tools
286
- //
287
- // Implementation Path:
288
- // - Use Ollama's chat API with 'tools' parameter
289
- // - Parse tool_calls from response.message.tool_calls
290
- // - Execute functions and return results to conversation
291
- //
292
- // Until Ollama-specific implementation, tools disabled for compatibility
270
+ const modelName = this.modelName.toLowerCase();
271
+ // Get tool-capable models from configuration
272
+ const ollamaConfig = modelConfig.getProviderConfig("ollama");
273
+ const toolCapableModels = ollamaConfig?.modelBehavior?.toolCapableModels || [];
274
+ // Check if current model matches any tool-capable model patterns
275
+ const isToolCapable = toolCapableModels.some((capableModel) => modelName.includes(capableModel));
276
+ if (isToolCapable) {
277
+ logger.debug("Ollama tool calling enabled", {
278
+ model: this.modelName,
279
+ reason: "Model supports function calling",
280
+ baseUrl: this.baseUrl,
281
+ configuredModels: toolCapableModels.length,
282
+ });
283
+ return true;
284
+ }
285
+ // Log why tools are disabled for transparency
286
+ logger.debug("Ollama tool calling disabled", {
287
+ model: this.modelName,
288
+ reason: "Model not in tool-capable list",
289
+ suggestion: "Consider using llama3.1:8b-instruct, mistral:7b-instruct, or hermes3:8b for tool calling",
290
+ availableToolModels: toolCapableModels.slice(0, 3), // Show first 3 for brevity
291
+ });
293
292
  return false;
294
293
  }
295
294
  // executeGenerate removed - BaseProvider handles all generation with tools
@@ -297,43 +296,216 @@ export class OllamaProvider extends BaseProvider {
297
296
  try {
298
297
  this.validateStreamOptions(options);
299
298
  await this.checkOllamaHealth();
300
- // Direct HTTP streaming implementation for better compatibility
301
- const response = await fetch(`${this.baseUrl}/api/generate`, {
302
- method: "POST",
303
- headers: { "Content-Type": "application/json" },
304
- body: JSON.stringify({
305
- model: this.modelName || FALLBACK_OLLAMA_MODEL,
306
- prompt: options.input.text,
307
- system: options.systemPrompt,
308
- stream: true,
309
- options: {
310
- temperature: options.temperature,
311
- num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
312
- },
313
- }),
314
- signal: createAbortSignalWithTimeout(this.timeout),
315
- });
316
- if (!response.ok) {
317
- throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
299
+ // Check if tools are supported and provided
300
+ const modelSupportsTools = this.supportsTools();
301
+ const hasTools = options.tools && Object.keys(options.tools).length > 0;
302
+ if (modelSupportsTools && hasTools) {
303
+ // Use chat API with tools for tool-capable models
304
+ return this.executeStreamWithTools(options, analysisSchema);
305
+ }
306
+ else {
307
+ // Use generate API for non-tool scenarios
308
+ return this.executeStreamWithoutTools(options, analysisSchema);
318
309
  }
319
- // Transform to async generator to match other providers
320
- const self = this;
321
- const transformedStream = async function* () {
322
- const generator = self.createOllamaStream(response);
323
- for await (const chunk of generator) {
324
- yield chunk;
325
- }
326
- };
327
- return {
328
- stream: transformedStream(),
329
- provider: this.providerName,
330
- model: this.modelName,
331
- };
332
310
  }
333
311
  catch (error) {
334
312
  throw this.handleProviderError(error);
335
313
  }
336
314
  }
315
+ /**
316
+ * Execute streaming with Ollama's function calling support
317
+ * Uses the /v1/chat/completions endpoint with tools parameter
318
+ */
319
+ async executeStreamWithTools(options, analysisSchema) {
320
+ // Convert tools to Ollama format
321
+ const ollamaTools = this.convertToolsToOllamaFormat(options.tools);
322
+ // Prepare messages in Ollama chat format
323
+ const messages = [
324
+ ...(options.systemPrompt
325
+ ? [{ role: "system", content: options.systemPrompt }]
326
+ : []),
327
+ { role: "user", content: options.input.text },
328
+ ];
329
+ const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
330
+ method: "POST",
331
+ headers: { "Content-Type": "application/json" },
332
+ body: JSON.stringify({
333
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
334
+ messages,
335
+ tools: ollamaTools,
336
+ tool_choice: "auto",
337
+ stream: true,
338
+ temperature: options.temperature,
339
+ max_tokens: options.maxTokens || DEFAULT_MAX_TOKENS,
340
+ }),
341
+ signal: createAbortSignalWithTimeout(this.timeout),
342
+ });
343
+ if (!response.ok) {
344
+ // Fallback to non-tool mode if chat API fails
345
+ logger.warn("Ollama chat API failed, falling back to generate API", {
346
+ status: response.status,
347
+ statusText: response.statusText,
348
+ });
349
+ return this.executeStreamWithoutTools(options, analysisSchema);
350
+ }
351
+ // Transform to async generator with tool call handling
352
+ const self = this;
353
+ const transformedStream = async function* () {
354
+ const generator = self.createOllamaChatStream(response, options.tools);
355
+ for await (const chunk of generator) {
356
+ yield chunk;
357
+ }
358
+ };
359
+ return {
360
+ stream: transformedStream(),
361
+ provider: self.providerName,
362
+ model: self.modelName,
363
+ };
364
+ }
365
+ /**
366
+ * Execute streaming without tools using the generate API
367
+ * Fallback for non-tool scenarios or when chat API is unavailable
368
+ */
369
+ async executeStreamWithoutTools(options, analysisSchema) {
370
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
371
+ method: "POST",
372
+ headers: { "Content-Type": "application/json" },
373
+ body: JSON.stringify({
374
+ model: this.modelName || FALLBACK_OLLAMA_MODEL,
375
+ prompt: options.input.text,
376
+ system: options.systemPrompt,
377
+ stream: true,
378
+ options: {
379
+ temperature: options.temperature,
380
+ num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
381
+ },
382
+ }),
383
+ signal: createAbortSignalWithTimeout(this.timeout),
384
+ });
385
+ if (!response.ok) {
386
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
387
+ }
388
+ // Transform to async generator to match other providers
389
+ const self = this;
390
+ const transformedStream = async function* () {
391
+ const generator = self.createOllamaStream(response);
392
+ for await (const chunk of generator) {
393
+ yield chunk;
394
+ }
395
+ };
396
+ return {
397
+ stream: transformedStream(),
398
+ provider: this.providerName,
399
+ model: this.modelName,
400
+ };
401
+ }
402
+ /**
403
+ * Convert AI SDK tools format to Ollama's function calling format
404
+ */
405
+ convertToolsToOllamaFormat(tools) {
406
+ if (!tools || typeof tools !== "object") {
407
+ return [];
408
+ }
409
+ const toolsArray = Array.isArray(tools) ? tools : Object.values(tools);
410
+ return toolsArray.map((tool) => ({
411
+ type: "function",
412
+ function: {
413
+ name: tool.name || tool.function?.name,
414
+ description: tool.description || tool.function?.description,
415
+ parameters: tool.parameters ||
416
+ tool.function?.parameters || {
417
+ type: "object",
418
+ properties: {},
419
+ required: [],
420
+ },
421
+ },
422
+ }));
423
+ }
424
+ /**
425
+ * Create stream generator for Ollama chat API with tool call support
426
+ */
427
+ async *createOllamaChatStream(response, tools) {
428
+ const reader = response.body?.getReader();
429
+ if (!reader) {
430
+ throw new Error("No response body");
431
+ }
432
+ const decoder = new TextDecoder();
433
+ let buffer = "";
434
+ try {
435
+ while (true) {
436
+ const { done, value } = await reader.read();
437
+ if (done) {
438
+ break;
439
+ }
440
+ buffer += decoder.decode(value, { stream: true });
441
+ const lines = buffer.split("\n");
442
+ buffer = lines.pop() || "";
443
+ for (const line of lines) {
444
+ if (line.trim() && line.startsWith("data: ")) {
445
+ const dataLine = line.slice(6); // Remove "data: " prefix
446
+ if (dataLine === "[DONE]") {
447
+ return;
448
+ }
449
+ try {
450
+ const data = JSON.parse(dataLine);
451
+ const delta = data.choices?.[0]?.delta;
452
+ if (delta?.content) {
453
+ yield { content: delta.content };
454
+ }
455
+ if (delta?.tool_calls) {
456
+ // Handle tool calls - for now, we'll include them as content
457
+ // Future enhancement: Execute tools and return results
458
+ const toolCallDescription = this.formatToolCallForDisplay(delta.tool_calls);
459
+ if (toolCallDescription) {
460
+ yield { content: toolCallDescription };
461
+ }
462
+ }
463
+ if (data.choices?.[0]?.finish_reason) {
464
+ return;
465
+ }
466
+ }
467
+ catch (error) {
468
+ // Ignore JSON parse errors for incomplete chunks
469
+ }
470
+ }
471
+ }
472
+ }
473
+ }
474
+ finally {
475
+ reader.releaseLock();
476
+ }
477
+ }
478
+ /**
479
+ * Format tool calls for display when tools aren't executed directly
480
+ */
481
+ formatToolCallForDisplay(toolCalls) {
482
+ if (!toolCalls || toolCalls.length === 0) {
483
+ return "";
484
+ }
485
+ const descriptions = toolCalls.map((call) => {
486
+ const functionName = call.function?.name || "unknown_function";
487
+ let args = {};
488
+ if (call.function?.arguments) {
489
+ try {
490
+ args = JSON.parse(call.function.arguments);
491
+ }
492
+ catch (error) {
493
+ // If arguments are malformed, preserve for debugging while marking as invalid
494
+ logger.warn?.("Malformed tool call arguments: " + call.function.arguments);
495
+ args = {
496
+ _malformed: true,
497
+ _originalArguments: call.function.arguments,
498
+ _error: error instanceof Error ? error.message : String(error),
499
+ };
500
+ }
501
+ }
502
+ return `\n[Tool Call: ${functionName}(${JSON.stringify(args)})]`;
503
+ });
504
+ return descriptions.join("");
505
+ }
506
+ /**
507
+ * Create stream generator for Ollama generate API (non-tool mode)
508
+ */
337
509
  async *createOllamaStream(response) {
338
510
  const reader = response.body?.getReader();
339
511
  if (!reader) {
@@ -449,5 +621,41 @@ export class OllamaProvider extends BaseProvider {
449
621
  const models = await this.getAvailableModels();
450
622
  return models.includes(modelName);
451
623
  }
624
+ /**
625
+ * Get recommendations for tool-calling capable Ollama models
626
+ * Provides guidance for users who want to use function calling locally
627
+ */
628
+ static getToolCallingRecommendations() {
629
+ return {
630
+ recommended: [
631
+ "llama3.1:8b-instruct",
632
+ "mistral:7b-instruct-v0.3",
633
+ "hermes3:8b-llama3.1",
634
+ "codellama:34b-instruct",
635
+ "firefunction-v2:70b",
636
+ ],
637
+ performance: {
638
+ "llama3.1:8b-instruct": { speed: 3, quality: 3, size: "4.6GB" },
639
+ "mistral:7b-instruct-v0.3": { speed: 3, quality: 2, size: "4.1GB" },
640
+ "hermes3:8b-llama3.1": { speed: 3, quality: 3, size: "4.6GB" },
641
+ "codellama:34b-instruct": { speed: 1, quality: 3, size: "19GB" },
642
+ "firefunction-v2:70b": { speed: 1, quality: 3, size: "40GB" },
643
+ },
644
+ notes: {
645
+ "llama3.1:8b-instruct": "Best balance of speed, quality, and tool calling capability",
646
+ "mistral:7b-instruct-v0.3": "Lightweight with reliable function calling",
647
+ "hermes3:8b-llama3.1": "Specialized for tool execution and reasoning",
648
+ "codellama:34b-instruct": "Excellent for code-related tool calling, requires more resources",
649
+ "firefunction-v2:70b": "Optimized specifically for function calling, requires high-end hardware",
650
+ },
651
+ installation: {
652
+ "llama3.1:8b-instruct": "ollama pull llama3.1:8b-instruct",
653
+ "mistral:7b-instruct-v0.3": "ollama pull mistral:7b-instruct-v0.3",
654
+ "hermes3:8b-llama3.1": "ollama pull hermes3:8b-llama3.1",
655
+ "codellama:34b-instruct": "ollama pull codellama:34b-instruct",
656
+ "firefunction-v2:70b": "ollama pull firefunction-v2:70b",
657
+ },
658
+ };
659
+ }
452
660
  }
453
661
  export default OllamaProvider;
@@ -8,6 +8,18 @@ import { logger } from "../utils/logger.js";
8
8
  import type { InMemoryMCPServerConfig, InMemoryToolInfo } from "../types/mcpTypes.js";
9
9
  import type { ToolArgs, ToolContext as CoreToolContext, ToolResult, SimpleTool as CoreSimpleTool } from "../types/tools.js";
10
10
  import type { JsonValue } from "../types/common.js";
11
+ /**
12
+ * Enhanced validation configuration
13
+ */
14
+ declare const VALIDATION_CONFIG: {
15
+ readonly NAME_MIN_LENGTH: 2;
16
+ readonly NAME_MAX_LENGTH: 50;
17
+ readonly DESCRIPTION_MIN_LENGTH: 10;
18
+ readonly DESCRIPTION_MAX_LENGTH: number;
19
+ readonly RESERVED_NAMES: Set<string>;
20
+ readonly RECOMMENDED_PATTERNS: readonly ["get_data", "fetch_info", "calculate_value", "send_message", "create_item", "update_record", "delete_file", "validate_input"];
21
+ readonly COMPILED_PATTERN_REGEXES: RegExp[];
22
+ };
11
23
  /**
12
24
  * Context provided to tools during execution
13
25
  * Extends the core ToolContext with SDK-specific features
@@ -86,6 +98,13 @@ export declare function createMCPServerFromTools(serverId: string, tools: Record
86
98
  * Helper to create a tool with type safety
87
99
  */
88
100
  export declare function createTool<TParams = ToolArgs>(config: SimpleTool): SimpleTool;
101
+ /**
102
+ * Helper to create a validated tool with suggested improvements
103
+ */
104
+ export declare function createValidatedTool(name: string, config: SimpleTool, options?: {
105
+ strict?: boolean;
106
+ suggestions?: boolean;
107
+ }): SimpleTool;
89
108
  /**
90
109
  * Helper to create a tool with typed parameters
91
110
  */
@@ -97,3 +116,26 @@ export declare function createTypedTool<TParams extends z.ZodSchema>(config: Omi
97
116
  * Validate tool configuration with detailed error messages
98
117
  */
99
118
  export declare function validateTool(name: string, tool: SimpleTool): void;
119
+ /**
120
+ * Utility to validate multiple tools at once
121
+ */
122
+ export declare function validateTools(tools: Record<string, SimpleTool>): {
123
+ valid: string[];
124
+ invalid: Array<{
125
+ name: string;
126
+ error: string;
127
+ }>;
128
+ };
129
+ /**
130
+ * Get validation configuration for external inspection
131
+ */
132
+ export declare function getValidationConfig(): typeof VALIDATION_CONFIG;
133
+ /**
134
+ * Check if a tool name is available (not reserved)
135
+ */
136
+ export declare function isToolNameAvailable(name: string): boolean;
137
+ /**
138
+ * Suggest alternative tool names if the provided name is invalid
139
+ */
140
+ export declare function suggestToolNames(baseName: string): string[];
141
+ export {};