@juspay/neurolink 5.0.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (214) hide show
  1. package/CHANGELOG.md +20 -7
  2. package/README.md +160 -172
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/chat/sse-handler.js +5 -4
  5. package/dist/chat/websocket-chat-handler.js +9 -9
  6. package/dist/cli/commands/config.d.ts +3 -3
  7. package/dist/cli/commands/mcp.js +9 -8
  8. package/dist/cli/commands/ollama.js +3 -3
  9. package/dist/cli/factories/command-factory.d.ts +18 -0
  10. package/dist/cli/factories/command-factory.js +183 -0
  11. package/dist/cli/index.js +105 -157
  12. package/dist/cli/utils/interactive-setup.js +2 -2
  13. package/dist/core/base-provider.d.ts +423 -0
  14. package/dist/core/base-provider.js +365 -0
  15. package/dist/core/constants.d.ts +1 -1
  16. package/dist/core/constants.js +1 -1
  17. package/dist/core/dynamic-models.d.ts +6 -6
  18. package/dist/core/evaluation.d.ts +19 -80
  19. package/dist/core/evaluation.js +185 -484
  20. package/dist/core/factory.d.ts +3 -3
  21. package/dist/core/factory.js +31 -91
  22. package/dist/core/service-registry.d.ts +47 -0
  23. package/dist/core/service-registry.js +112 -0
  24. package/dist/core/types.d.ts +49 -49
  25. package/dist/core/types.js +1 -0
  26. package/dist/factories/compatibility-factory.d.ts +20 -0
  27. package/dist/factories/compatibility-factory.js +69 -0
  28. package/dist/factories/provider-factory.d.ts +72 -0
  29. package/dist/factories/provider-factory.js +144 -0
  30. package/dist/factories/provider-generate-factory.d.ts +20 -0
  31. package/dist/factories/provider-generate-factory.js +87 -0
  32. package/dist/factories/provider-registry.d.ts +38 -0
  33. package/dist/factories/provider-registry.js +107 -0
  34. package/dist/index.d.ts +8 -5
  35. package/dist/index.js +5 -5
  36. package/dist/lib/agent/direct-tools.d.ts +6 -6
  37. package/dist/lib/chat/sse-handler.js +5 -4
  38. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  39. package/dist/lib/core/base-provider.d.ts +423 -0
  40. package/dist/lib/core/base-provider.js +365 -0
  41. package/dist/lib/core/constants.d.ts +1 -1
  42. package/dist/lib/core/constants.js +1 -1
  43. package/dist/lib/core/dynamic-models.d.ts +6 -6
  44. package/dist/lib/core/evaluation.d.ts +19 -80
  45. package/dist/lib/core/evaluation.js +185 -484
  46. package/dist/lib/core/factory.d.ts +3 -3
  47. package/dist/lib/core/factory.js +30 -91
  48. package/dist/lib/core/service-registry.d.ts +47 -0
  49. package/dist/lib/core/service-registry.js +112 -0
  50. package/dist/lib/core/types.d.ts +49 -49
  51. package/dist/lib/core/types.js +1 -0
  52. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  53. package/dist/lib/factories/compatibility-factory.js +69 -0
  54. package/dist/lib/factories/provider-factory.d.ts +72 -0
  55. package/dist/lib/factories/provider-factory.js +144 -0
  56. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  57. package/dist/lib/factories/provider-generate-factory.js +87 -0
  58. package/dist/lib/factories/provider-registry.d.ts +38 -0
  59. package/dist/lib/factories/provider-registry.js +107 -0
  60. package/dist/lib/index.d.ts +8 -5
  61. package/dist/lib/index.js +5 -5
  62. package/dist/lib/mcp/client.js +5 -5
  63. package/dist/lib/mcp/config.js +28 -3
  64. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  65. package/dist/lib/mcp/external-client.js +2 -2
  66. package/dist/lib/mcp/factory.d.ts +1 -1
  67. package/dist/lib/mcp/factory.js +1 -1
  68. package/dist/lib/mcp/function-calling.js +1 -1
  69. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  70. package/dist/lib/mcp/initialize-tools.js +45 -1
  71. package/dist/lib/mcp/initialize.js +16 -6
  72. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  73. package/dist/lib/mcp/orchestrator.js +4 -4
  74. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  75. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  76. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  77. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +8 -6
  78. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  79. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  80. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  81. package/dist/lib/mcp/unified-registry.js +42 -9
  82. package/dist/lib/neurolink.d.ts +161 -174
  83. package/dist/lib/neurolink.js +723 -397
  84. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  85. package/dist/lib/providers/amazon-bedrock.js +143 -0
  86. package/dist/lib/providers/analytics-helper.js +7 -4
  87. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  88. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  89. package/dist/lib/providers/anthropic.d.ts +19 -39
  90. package/dist/lib/providers/anthropic.js +84 -378
  91. package/dist/lib/providers/azure-openai.d.ts +20 -0
  92. package/dist/lib/providers/azure-openai.js +89 -0
  93. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  94. package/dist/lib/providers/function-calling-provider.js +114 -64
  95. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  96. package/dist/lib/providers/google-ai-studio.js +107 -0
  97. package/dist/lib/providers/google-vertex.d.ts +47 -0
  98. package/dist/lib/providers/google-vertex.js +205 -0
  99. package/dist/lib/providers/huggingFace.d.ts +33 -27
  100. package/dist/lib/providers/huggingFace.js +103 -400
  101. package/dist/lib/providers/index.d.ts +9 -9
  102. package/dist/lib/providers/index.js +9 -9
  103. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  104. package/dist/lib/providers/mcp-provider.js +63 -18
  105. package/dist/lib/providers/mistral.d.ts +42 -0
  106. package/dist/lib/providers/mistral.js +160 -0
  107. package/dist/lib/providers/ollama.d.ts +52 -35
  108. package/dist/lib/providers/ollama.js +297 -477
  109. package/dist/lib/providers/openAI.d.ts +21 -21
  110. package/dist/lib/providers/openAI.js +81 -245
  111. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  112. package/dist/lib/sdk/tool-extension.js +283 -0
  113. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  114. package/dist/lib/sdk/tool-registration.js +167 -0
  115. package/dist/lib/types/generate-types.d.ts +80 -0
  116. package/dist/lib/types/generate-types.js +1 -0
  117. package/dist/lib/types/mcp-types.d.ts +116 -0
  118. package/dist/lib/types/mcp-types.js +5 -0
  119. package/dist/lib/types/stream-types.d.ts +95 -0
  120. package/dist/lib/types/stream-types.js +1 -0
  121. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  122. package/dist/lib/types/universal-provider-options.js +53 -0
  123. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  124. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  125. package/dist/lib/utils/streaming-utils.js +0 -3
  126. package/dist/mcp/client.js +5 -5
  127. package/dist/mcp/config.js +28 -3
  128. package/dist/mcp/dynamic-orchestrator.js +8 -8
  129. package/dist/mcp/external-client.js +2 -2
  130. package/dist/mcp/factory.d.ts +1 -1
  131. package/dist/mcp/factory.js +1 -1
  132. package/dist/mcp/function-calling.js +1 -1
  133. package/dist/mcp/initialize-tools.d.ts +1 -1
  134. package/dist/mcp/initialize-tools.js +45 -1
  135. package/dist/mcp/initialize.js +16 -6
  136. package/dist/mcp/neurolink-mcp-client.js +10 -10
  137. package/dist/mcp/orchestrator.js +4 -4
  138. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  139. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  140. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  141. package/dist/mcp/servers/ai-providers/ai-core-server.js +8 -6
  142. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  143. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  144. package/dist/mcp/unified-registry.d.ts +4 -0
  145. package/dist/mcp/unified-registry.js +42 -9
  146. package/dist/neurolink.d.ts +161 -174
  147. package/dist/neurolink.js +723 -397
  148. package/dist/providers/amazon-bedrock.d.ts +32 -0
  149. package/dist/providers/amazon-bedrock.js +143 -0
  150. package/dist/providers/analytics-helper.js +7 -4
  151. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  152. package/dist/providers/anthropic-baseprovider.js +114 -0
  153. package/dist/providers/anthropic.d.ts +19 -39
  154. package/dist/providers/anthropic.js +83 -377
  155. package/dist/providers/azure-openai.d.ts +20 -0
  156. package/dist/providers/azure-openai.js +89 -0
  157. package/dist/providers/function-calling-provider.d.ts +14 -12
  158. package/dist/providers/function-calling-provider.js +114 -64
  159. package/dist/providers/google-ai-studio.d.ts +23 -0
  160. package/dist/providers/google-ai-studio.js +108 -0
  161. package/dist/providers/google-vertex.d.ts +47 -0
  162. package/dist/providers/google-vertex.js +205 -0
  163. package/dist/providers/huggingFace.d.ts +33 -27
  164. package/dist/providers/huggingFace.js +102 -399
  165. package/dist/providers/index.d.ts +9 -9
  166. package/dist/providers/index.js +9 -9
  167. package/dist/providers/mcp-provider.d.ts +13 -8
  168. package/dist/providers/mcp-provider.js +63 -18
  169. package/dist/providers/mistral.d.ts +42 -0
  170. package/dist/providers/mistral.js +160 -0
  171. package/dist/providers/ollama.d.ts +52 -35
  172. package/dist/providers/ollama.js +297 -476
  173. package/dist/providers/openAI.d.ts +21 -21
  174. package/dist/providers/openAI.js +81 -246
  175. package/dist/sdk/tool-extension.d.ts +181 -0
  176. package/dist/sdk/tool-extension.js +283 -0
  177. package/dist/sdk/tool-registration.d.ts +95 -0
  178. package/dist/sdk/tool-registration.js +168 -0
  179. package/dist/types/generate-types.d.ts +80 -0
  180. package/dist/types/generate-types.js +1 -0
  181. package/dist/types/mcp-types.d.ts +116 -0
  182. package/dist/types/mcp-types.js +5 -0
  183. package/dist/types/stream-types.d.ts +95 -0
  184. package/dist/types/stream-types.js +1 -0
  185. package/dist/types/universal-provider-options.d.ts +87 -0
  186. package/dist/types/universal-provider-options.js +53 -0
  187. package/dist/utils/providerUtils-fixed.js +1 -1
  188. package/dist/utils/streaming-utils.d.ts +14 -2
  189. package/dist/utils/streaming-utils.js +0 -3
  190. package/package.json +15 -10
  191. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -89
  192. package/dist/lib/providers/agent-enhanced-provider.js +0 -614
  193. package/dist/lib/providers/amazonBedrock.d.ts +0 -19
  194. package/dist/lib/providers/amazonBedrock.js +0 -334
  195. package/dist/lib/providers/azureOpenAI.d.ts +0 -39
  196. package/dist/lib/providers/azureOpenAI.js +0 -436
  197. package/dist/lib/providers/googleAIStudio.d.ts +0 -49
  198. package/dist/lib/providers/googleAIStudio.js +0 -333
  199. package/dist/lib/providers/googleVertexAI.d.ts +0 -38
  200. package/dist/lib/providers/googleVertexAI.js +0 -519
  201. package/dist/lib/providers/mistralAI.d.ts +0 -34
  202. package/dist/lib/providers/mistralAI.js +0 -294
  203. package/dist/providers/agent-enhanced-provider.d.ts +0 -89
  204. package/dist/providers/agent-enhanced-provider.js +0 -614
  205. package/dist/providers/amazonBedrock.d.ts +0 -19
  206. package/dist/providers/amazonBedrock.js +0 -334
  207. package/dist/providers/azureOpenAI.d.ts +0 -39
  208. package/dist/providers/azureOpenAI.js +0 -437
  209. package/dist/providers/googleAIStudio.d.ts +0 -49
  210. package/dist/providers/googleAIStudio.js +0 -333
  211. package/dist/providers/googleVertexAI.d.ts +0 -38
  212. package/dist/providers/googleVertexAI.js +0 -519
  213. package/dist/providers/mistralAI.d.ts +0 -34
  214. package/dist/providers/mistralAI.js +0 -294
@@ -1,33 +1,39 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
- import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
3
- import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
4
- export declare class HuggingFace implements AIProvider {
5
- private modelName;
6
- private client;
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
5
+ import { BaseProvider } from "../core/base-provider.js";
6
+ /**
7
+ * HuggingFace Provider - BaseProvider Implementation
8
+ * Using AI SDK with HuggingFace's OpenAI-compatible endpoint
9
+ */
10
+ export declare class HuggingFaceProvider extends BaseProvider {
11
+ private model;
12
+ constructor(modelName?: string);
7
13
  /**
8
- * Initializes a new instance of HuggingFace
9
- * @param modelName - Optional model name to override the default from config
14
+ * HuggingFace models currently don't properly support tool/function calling
15
+ *
16
+ * **Tested Models & Issues:**
17
+ * - microsoft/DialoGPT-medium: Describes tools instead of executing them
18
+ * - Most HF models via router endpoint: Function schema passed but not executed
19
+ * - Issue: Models treat tool definitions as conversation context rather than executable functions
20
+ *
21
+ * **Known Limitations:**
22
+ * - Tools are visible to model but treated as descriptive text
23
+ * - No proper function call response format handling
24
+ * - HuggingFace router endpoint doesn't enforce OpenAI-compatible tool execution
25
+ *
26
+ * @returns false to disable tools by default until proper implementation
10
27
  */
11
- constructor(modelName?: string | null);
28
+ supportsTools(): boolean;
29
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
30
+ protected getProviderName(): AIProviderName;
31
+ protected getDefaultModel(): string;
12
32
  /**
13
- * Gets the appropriate model instance
14
- * @private
33
+ * Returns the Vercel AI SDK model instance for HuggingFace
15
34
  */
16
- private getModel;
17
- /**
18
- * Processes text using streaming approach with enhanced error handling callbacks
19
- * @param prompt - The input text prompt to analyze
20
- * @param analysisSchema - Optional Zod schema or Schema object for output validation
21
- * @returns Promise resolving to StreamTextResult or null if operation fails
22
- */
23
- streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
24
- /**
25
- * Processes text using non-streaming approach with optional schema validation
26
- * @param prompt - The input text prompt to analyze
27
- * @param analysisSchema - Optional Zod schema or Schema object for output validation
28
- * @returns Promise resolving to GenerateTextResult or null if operation fails
29
- */
30
- generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
31
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
32
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
35
+ protected getAISDKModel(): LanguageModelV1;
36
+ protected handleProviderError(error: any): Error;
37
+ private validateStreamOptions;
33
38
  }
39
+ export default HuggingFaceProvider;
@@ -1,433 +1,136 @@
1
- import { HfInference } from "@huggingface/inference";
2
- import { streamText, generateText, Output, } from "ai";
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
+ import { streamText, Output } from "ai";
3
+ import { BaseProvider } from "../core/base-provider.js";
3
4
  import { logger } from "../utils/logger.js";
4
5
  import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
- import { evaluateResponse } from "../core/evaluation.js";
7
- // Default system context
8
- const DEFAULT_SYSTEM_CONTEXT = {
9
- systemPrompt: "You are a helpful AI assistant.",
10
- };
11
7
  // Configuration helpers
12
8
  const getHuggingFaceApiKey = () => {
13
9
  const apiKey = process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN;
14
10
  if (!apiKey) {
15
- throw new Error("HUGGINGFACE_API_KEY environment variable is not set");
11
+ throw new Error(`❌ HuggingFace Provider Configuration Error\n\nMissing required environment variables: HUGGINGFACE_API_KEY\n\n🔧 Step 1: Get Credentials\n1. Visit: https://huggingface.co/settings/tokens\n2. Create new API token\n3. Copy the token\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nHUGGINGFACE_API_KEY=your_token_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
16
12
  }
17
13
  return apiKey;
18
14
  };
19
- const getHuggingFaceModelId = () => {
15
+ const getDefaultHuggingFaceModel = () => {
20
16
  return process.env.HUGGINGFACE_MODEL || "microsoft/DialoGPT-medium";
21
17
  };
22
- const hasValidAuth = () => {
18
+ const hasHuggingFaceCredentials = () => {
23
19
  return !!(process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN);
24
20
  };
25
- // Lazy initialization cache
26
- let _hfClient = null;
27
- function getHuggingFaceClient() {
28
- if (!_hfClient) {
21
+ /**
22
+ * HuggingFace Provider - BaseProvider Implementation
23
+ * Using AI SDK with HuggingFace's OpenAI-compatible endpoint
24
+ */
25
+ export class HuggingFaceProvider extends BaseProvider {
26
+ model;
27
+ constructor(modelName) {
28
+ super(modelName, "huggingface");
29
+ // Get API key and validate
29
30
  const apiKey = getHuggingFaceApiKey();
30
- _hfClient = new HfInference(apiKey);
31
- }
32
- return _hfClient;
33
- }
34
- // Retry configuration for model loading
35
- const RETRY_CONFIG = {
36
- maxRetries: 3,
37
- baseDelay: 2000, // 2 seconds
38
- maxDelay: 30000, // 30 seconds
39
- backoffMultiplier: 2,
40
- };
41
- // Helper function for exponential backoff retry
42
- async function retryWithBackoff(operation, retryConfig = RETRY_CONFIG) {
43
- let lastError;
44
- for (let attempt = 0; attempt <= retryConfig.maxRetries; attempt++) {
45
- try {
46
- return await operation();
47
- }
48
- catch (error) {
49
- lastError = error;
50
- // Check if it's a model loading error (503 status)
51
- if (error instanceof Error && error.message.includes("503")) {
52
- if (attempt < retryConfig.maxRetries) {
53
- const delay = Math.min(retryConfig.baseDelay *
54
- Math.pow(retryConfig.backoffMultiplier, attempt), retryConfig.maxDelay);
55
- logger.debug("HuggingFace model loading, retrying...", {
56
- attempt: attempt + 1,
57
- maxRetries: retryConfig.maxRetries,
58
- delayMs: delay,
59
- error: error.message,
60
- });
61
- await new Promise((resolve) => setTimeout(resolve, delay));
62
- continue;
63
- }
64
- }
65
- // For non-503 errors or final attempt, throw immediately
66
- throw error;
67
- }
68
- }
69
- throw lastError;
70
- }
71
- // Custom LanguageModelV1 implementation for Hugging Face
72
- class HuggingFaceLanguageModel {
73
- specificationVersion = "v1";
74
- provider = "huggingface";
75
- modelId;
76
- maxTokens;
77
- supportsStreaming = true;
78
- defaultObjectGenerationMode = "json";
79
- client;
80
- constructor(modelId, client) {
81
- this.modelId = modelId;
82
- this.client = client;
83
- }
84
- estimateTokens(text) {
85
- return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
86
- }
87
- convertMessagesToPrompt(messages) {
88
- return messages
89
- .map((msg) => {
90
- if (typeof msg.content === "string") {
91
- return `${msg.role}: ${msg.content}`;
92
- }
93
- else if (Array.isArray(msg.content)) {
94
- // Handle multi-part content (text, images, etc.)
95
- return `${msg.role}: ${msg.content
96
- .filter((part) => part.type === "text")
97
- .map((part) => part.text)
98
- .join(" ")}`;
99
- }
100
- return "";
101
- })
102
- .join("\n");
103
- }
104
- async doGenerate(options) {
105
- const prompt = this.convertMessagesToPrompt(options.prompt);
106
- const response = await retryWithBackoff(async () => {
107
- return await this.client.textGeneration({
108
- model: this.modelId,
109
- inputs: prompt,
110
- parameters: {
111
- temperature: options.temperature || 0.7,
112
- max_new_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
113
- return_full_text: false,
114
- do_sample: (options.temperature || 0.7) > 0,
115
- },
116
- });
31
+ // Create HuggingFace provider using unified router endpoint (2025)
32
+ const huggingface = createOpenAI({
33
+ apiKey: apiKey,
34
+ baseURL: "https://router.huggingface.co/v1",
117
35
  });
118
- const generatedText = response.generated_text || "";
119
- const promptTokens = this.estimateTokens(prompt);
120
- const completionTokens = this.estimateTokens(generatedText);
121
- return {
122
- text: generatedText,
123
- usage: {
124
- promptTokens,
125
- completionTokens,
126
- totalTokens: promptTokens + completionTokens,
127
- },
128
- finishReason: "stop",
129
- logprobs: undefined,
130
- rawCall: { rawPrompt: prompt, rawSettings: options },
131
- rawResponse: { headers: {} },
132
- };
133
- }
134
- async doStream(options) {
135
- const prompt = this.convertMessagesToPrompt(options.prompt);
136
- // HuggingFace Inference API doesn't support true streaming
137
- // We'll simulate streaming by generating the full text and chunking it
138
- const response = await this.doGenerate(options);
139
- // Create a ReadableStream that chunks the response
140
- const stream = new ReadableStream({
141
- start(controller) {
142
- const text = response.text || "";
143
- const chunkSize = Math.max(1, Math.floor(text.length / 10)); // 10 chunks
144
- let index = 0;
145
- const pushChunk = () => {
146
- if (index < text.length) {
147
- const chunk = text.slice(index, index + chunkSize);
148
- controller.enqueue({
149
- type: "text-delta",
150
- textDelta: chunk,
151
- });
152
- index += chunkSize;
153
- // Add delay to simulate streaming
154
- setTimeout(pushChunk, 50);
155
- }
156
- else {
157
- // Send finish event
158
- controller.enqueue({
159
- type: "finish",
160
- finishReason: response.finishReason,
161
- usage: response.usage,
162
- logprobs: response.logprobs,
163
- });
164
- controller.close();
165
- }
166
- };
167
- pushChunk();
168
- },
36
+ // Initialize model
37
+ this.model = huggingface(this.modelName);
38
+ logger.debug("HuggingFaceProvider initialized", {
39
+ model: this.modelName,
40
+ provider: this.providerName,
169
41
  });
170
- return {
171
- stream,
172
- rawCall: response.rawCall,
173
- rawResponse: response.rawResponse,
174
- };
175
42
  }
176
- }
177
- // Hugging Face class with enhanced error handling
178
- export class HuggingFace {
179
- modelName;
180
- client;
43
+ // ===================
44
+ // ABSTRACT METHOD IMPLEMENTATIONS
45
+ // ===================
181
46
  /**
182
- * Initializes a new instance of HuggingFace
183
- * @param modelName - Optional model name to override the default from config
47
+ * HuggingFace models currently don't properly support tool/function calling
48
+ *
49
+ * **Tested Models & Issues:**
50
+ * - microsoft/DialoGPT-medium: Describes tools instead of executing them
51
+ * - Most HF models via router endpoint: Function schema passed but not executed
52
+ * - Issue: Models treat tool definitions as conversation context rather than executable functions
53
+ *
54
+ * **Known Limitations:**
55
+ * - Tools are visible to model but treated as descriptive text
56
+ * - No proper function call response format handling
57
+ * - HuggingFace router endpoint doesn't enforce OpenAI-compatible tool execution
58
+ *
59
+ * @returns false to disable tools by default until proper implementation
184
60
  */
185
- constructor(modelName) {
186
- const functionTag = "HuggingFace.constructor";
187
- this.modelName = modelName || getHuggingFaceModelId();
188
- try {
189
- this.client = getHuggingFaceClient();
190
- logger.debug(`[${functionTag}] Initialization started`, {
191
- modelName: this.modelName,
192
- hasApiKey: hasValidAuth(),
193
- });
194
- logger.debug(`[${functionTag}] Initialization completed`, {
195
- modelName: this.modelName,
196
- success: true,
197
- });
198
- }
199
- catch (err) {
200
- logger.error(`[${functionTag}] Initialization failed`, {
201
- message: "Error in initializing Hugging Face",
202
- modelName: this.modelName,
203
- error: err instanceof Error ? err.message : String(err),
204
- stack: err instanceof Error ? err.stack : undefined,
205
- });
206
- throw err;
207
- }
208
- }
209
- /**
210
- * Gets the appropriate model instance
211
- * @private
212
- */
213
- getModel() {
214
- logger.debug("HuggingFace.getModel - Hugging Face model selected", {
215
- modelName: this.modelName,
216
- });
217
- return new HuggingFaceLanguageModel(this.modelName, this.client);
61
+ supportsTools() {
62
+ // TODO: Implement proper HuggingFace tool calling support
63
+ // Requires: Custom tool schema formatting, response parsing, execution flow
64
+ // Track models that support function calling: CodeLlama, Llama variants
65
+ return false;
218
66
  }
219
- /**
220
- * Processes text using streaming approach with enhanced error handling callbacks
221
- * @param prompt - The input text prompt to analyze
222
- * @param analysisSchema - Optional Zod schema or Schema object for output validation
223
- * @returns Promise resolving to StreamTextResult or null if operation fails
224
- */
225
- async streamText(optionsOrPrompt, analysisSchema) {
226
- const functionTag = "HuggingFace.streamText";
227
- const provider = "huggingface";
228
- let chunkCount = 0;
67
+ // executeGenerate removed - BaseProvider handles all generation with tools
68
+ async executeStream(options, analysisSchema) {
69
+ this.validateStreamOptions(options);
70
+ const timeout = this.getTimeout(options);
71
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
229
72
  try {
230
- // Parse parameters - support both string and options object
231
- const options = typeof optionsOrPrompt === "string"
232
- ? { prompt: optionsOrPrompt }
233
- : optionsOrPrompt;
234
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
235
- // Use schema from options or fallback parameter
236
- const finalSchema = schema || analysisSchema;
237
- logger.debug(`[${functionTag}] Stream request started`, {
238
- provider,
239
- modelName: this.modelName,
240
- promptLength: prompt.length,
241
- temperature,
242
- maxTokens,
243
- hasSchema: !!finalSchema,
244
- timeout,
73
+ const result = await streamText({
74
+ model: this.model,
75
+ prompt: options.input.text,
76
+ system: options.systemPrompt,
77
+ temperature: options.temperature,
78
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
79
+ tools: options.tools,
80
+ toolChoice: "auto",
81
+ abortSignal: timeoutController?.controller.signal,
245
82
  });
246
- const model = this.getModel();
247
- // Create timeout controller if timeout is specified
248
- const timeoutController = createTimeoutController(timeout, provider, "stream");
249
- const streamOptions = {
250
- model: model,
251
- prompt: prompt,
252
- system: systemPrompt,
253
- temperature,
254
- maxTokens,
255
- // Add abort signal if available
256
- ...(timeoutController && {
257
- abortSignal: timeoutController.controller.signal,
258
- }),
259
- onError: (event) => {
260
- const error = event.error;
261
- const errorMessage = error instanceof Error ? error.message : String(error);
262
- const errorStack = error instanceof Error ? error.stack : undefined;
263
- logger.error(`[${functionTag}] Stream text error`, {
264
- provider,
265
- modelName: this.modelName,
266
- error: errorMessage,
267
- stack: errorStack,
268
- promptLength: prompt.length,
269
- chunkCount,
270
- });
271
- },
272
- onFinish: (event) => {
273
- logger.debug(`[${functionTag}] Stream text finished`, {
274
- provider,
275
- modelName: this.modelName,
276
- finishReason: event.finishReason,
277
- usage: event.usage,
278
- totalChunks: chunkCount,
279
- promptLength: prompt.length,
280
- responseLength: event.text?.length || 0,
281
- });
282
- },
283
- onChunk: (event) => {
284
- chunkCount++;
285
- logger.debug(`[${functionTag}] Stream text chunk`, {
286
- provider,
287
- modelName: this.modelName,
288
- chunkNumber: chunkCount,
289
- chunkLength: event.chunk.text?.length || 0,
290
- chunkType: event.chunk.type,
291
- });
292
- },
83
+ timeoutController?.cleanup();
84
+ // Transform stream to match StreamResult interface
85
+ const transformedStream = async function* () {
86
+ for await (const chunk of result.textStream) {
87
+ yield { content: chunk };
88
+ }
89
+ };
90
+ return {
91
+ stream: transformedStream(),
92
+ provider: this.providerName,
93
+ model: this.modelName,
293
94
  };
294
- if (finalSchema) {
295
- streamOptions.experimental_output = Output.object({
296
- schema: finalSchema,
297
- });
298
- }
299
- const result = streamText(streamOptions);
300
- // For streaming, we can't clean up immediately, but the timeout will auto-clean
301
- // The user should handle the stream and any timeout errors
302
- return result;
303
95
  }
304
- catch (err) {
305
- // Log timeout errors specifically
306
- if (err instanceof TimeoutError) {
307
- logger.error(`[${functionTag}] Timeout error`, {
308
- provider,
309
- modelName: this.modelName,
310
- timeout: err.timeout,
311
- message: err.message,
312
- });
313
- }
314
- else {
315
- logger.error(`[${functionTag}] Exception`, {
316
- provider,
317
- modelName: this.modelName,
318
- message: "Error in streaming text",
319
- err: String(err),
320
- promptLength: typeof optionsOrPrompt === "string"
321
- ? optionsOrPrompt.length
322
- : optionsOrPrompt.prompt.length,
323
- });
324
- }
325
- throw err; // Re-throw error to trigger fallback
96
+ catch (error) {
97
+ timeoutController?.cleanup();
98
+ throw this.handleProviderError(error);
326
99
  }
327
100
  }
101
+ getProviderName() {
102
+ return "huggingface";
103
+ }
104
+ getDefaultModel() {
105
+ return getDefaultHuggingFaceModel();
106
+ }
328
107
  /**
329
- * Processes text using non-streaming approach with optional schema validation
330
- * @param prompt - The input text prompt to analyze
331
- * @param analysisSchema - Optional Zod schema or Schema object for output validation
332
- * @returns Promise resolving to GenerateTextResult or null if operation fails
108
+ * Returns the Vercel AI SDK model instance for HuggingFace
333
109
  */
334
- async generateText(optionsOrPrompt, analysisSchema) {
335
- const functionTag = "HuggingFace.generateText";
336
- const provider = "huggingface";
337
- const startTime = Date.now();
338
- try {
339
- // Parse parameters - support both string and options object
340
- const options = typeof optionsOrPrompt === "string"
341
- ? { prompt: optionsOrPrompt }
342
- : optionsOrPrompt;
343
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
344
- // Use schema from options or fallback parameter
345
- const finalSchema = schema || analysisSchema;
346
- logger.debug(`[${functionTag}] Generate request started`, {
347
- provider,
348
- modelName: this.modelName,
349
- promptLength: prompt.length,
350
- temperature,
351
- maxTokens,
352
- timeout,
353
- });
354
- const model = this.getModel();
355
- // Create timeout controller if timeout is specified
356
- const timeoutController = createTimeoutController(timeout, provider, "generate");
357
- const generateOptions = {
358
- model: model,
359
- prompt: prompt,
360
- system: systemPrompt,
361
- temperature,
362
- maxTokens,
363
- // Add abort signal if available
364
- ...(timeoutController && {
365
- abortSignal: timeoutController.controller.signal,
366
- }),
367
- };
368
- if (finalSchema) {
369
- generateOptions.experimental_output = Output.object({
370
- schema: finalSchema,
371
- });
372
- }
373
- try {
374
- const result = await generateText(generateOptions);
375
- // Clean up timeout if successful
376
- timeoutController?.cleanup();
377
- logger.debug(`[${functionTag}] Generate text completed`, {
378
- provider,
379
- modelName: this.modelName,
380
- usage: result.usage,
381
- finishReason: result.finishReason,
382
- responseLength: result.text?.length || 0,
383
- timeout,
384
- });
385
- // Add analytics if enabled
386
- if (options.enableAnalytics) {
387
- result.analytics = {
388
- provider,
389
- model: this.modelName,
390
- tokens: result.usage,
391
- responseTime: Date.now() - startTime,
392
- context: options.context,
393
- };
394
- }
395
- // Add evaluation if enabled
396
- if (options.enableEvaluation) {
397
- result.evaluation = await evaluateResponse(prompt, result.text, options.context);
398
- }
399
- return result;
400
- }
401
- finally {
402
- // Always cleanup timeout
403
- timeoutController?.cleanup();
404
- }
110
+ getAISDKModel() {
111
+ return this.model;
112
+ }
113
+ handleProviderError(error) {
114
+ if (error instanceof TimeoutError) {
115
+ return new Error(`HuggingFace request timed out: ${error.message}`);
405
116
  }
406
- catch (err) {
407
- // Log timeout errors specifically
408
- if (err instanceof TimeoutError) {
409
- logger.error(`[${functionTag}] Timeout error`, {
410
- provider,
411
- modelName: this.modelName,
412
- timeout: err.timeout,
413
- message: err.message,
414
- });
415
- }
416
- else {
417
- logger.error(`[${functionTag}] Exception`, {
418
- provider,
419
- modelName: this.modelName,
420
- message: "Error in generating text",
421
- err: String(err),
422
- });
423
- }
424
- throw err; // Re-throw error to trigger fallback
117
+ if (error?.message?.includes("API_TOKEN_INVALID") ||
118
+ error?.message?.includes("Invalid token")) {
119
+ return new Error("Invalid HuggingFace API token. Please check your HUGGING_FACE_API_KEY environment variable.");
425
120
  }
121
+ if (error?.message?.includes("rate limit")) {
122
+ return new Error("HuggingFace rate limit exceeded. Please try again later.");
123
+ }
124
+ return new Error(`HuggingFace error: ${error?.message || "Unknown error"}`);
426
125
  }
427
- async generate(optionsOrPrompt, analysisSchema) {
428
- return this.generateText(optionsOrPrompt, analysisSchema);
429
- }
430
- async gen(optionsOrPrompt, analysisSchema) {
431
- return this.generateText(optionsOrPrompt, analysisSchema);
126
+ // ===================
127
+ // PRIVATE VALIDATION METHODS
128
+ // ===================
129
+ validateStreamOptions(options) {
130
+ if (!options.input?.text || options.input.text.trim().length === 0) {
131
+ throw new Error("Input text is required and cannot be empty");
132
+ }
432
133
  }
433
134
  }
135
+ // Export for factory registration
136
+ export default HuggingFaceProvider;
@@ -2,15 +2,15 @@
2
2
  * Provider exports for Vercel AI SDK integration
3
3
  * This file centralizes all AI provider classes for easy import and usage
4
4
  */
5
- export { GoogleVertexAI } from "./googleVertexAI.js";
6
- export { AmazonBedrock } from "./amazonBedrock.js";
7
- export { OpenAI } from "./openAI.js";
8
- export { AnthropicProvider } from "./anthropic.js";
9
- export { AzureOpenAIProvider } from "./azureOpenAI.js";
10
- export { GoogleAIStudio } from "./googleAIStudio.js";
11
- export { HuggingFace } from "./huggingFace.js";
12
- export { Ollama } from "./ollama.js";
13
- export { MistralAI } from "./mistralAI.js";
5
+ export { GoogleVertexProvider as GoogleVertexAI } from "./google-vertex.js";
6
+ export { AmazonBedrockProvider as AmazonBedrock } from "./amazon-bedrock.js";
7
+ export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
+ export { AzureOpenAIProvider } from "./azure-openai.js";
10
+ export { GoogleAIStudioProvider as GoogleAIStudio } from "./google-ai-studio.js";
11
+ export { HuggingFaceProvider as HuggingFace } from "./huggingFace.js";
12
+ export { OllamaProvider as Ollama } from "./ollama.js";
13
+ export { MistralProvider as MistralAI } from "./mistral.js";
14
14
  export type { AIProvider } from "../core/types.js";
15
15
  /**
16
16
  * Provider registry for dynamic provider instantiation
@@ -2,15 +2,15 @@
2
2
  * Provider exports for Vercel AI SDK integration
3
3
  * This file centralizes all AI provider classes for easy import and usage
4
4
  */
5
- export { GoogleVertexAI } from "./googleVertexAI.js";
6
- export { AmazonBedrock } from "./amazonBedrock.js";
7
- export { OpenAI } from "./openAI.js";
8
- export { AnthropicProvider } from "./anthropic.js";
9
- export { AzureOpenAIProvider } from "./azureOpenAI.js";
10
- export { GoogleAIStudio } from "./googleAIStudio.js";
11
- export { HuggingFace } from "./huggingFace.js";
12
- export { Ollama } from "./ollama.js";
13
- export { MistralAI } from "./mistralAI.js";
5
+ export { GoogleVertexProvider as GoogleVertexAI } from "./google-vertex.js";
6
+ export { AmazonBedrockProvider as AmazonBedrock } from "./amazon-bedrock.js";
7
+ export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
+ export { AzureOpenAIProvider } from "./azure-openai.js";
10
+ export { GoogleAIStudioProvider as GoogleAIStudio } from "./google-ai-studio.js";
11
+ export { HuggingFaceProvider as HuggingFace } from "./huggingFace.js";
12
+ export { OllamaProvider as Ollama } from "./ollama.js";
13
+ export { MistralProvider as MistralAI } from "./mistral.js";
14
14
  /**
15
15
  * Provider registry for dynamic provider instantiation
16
16
  */