@juspay/neurolink 5.1.0 → 5.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -9
- package/README.md +123 -126
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +8 -7
- package/dist/cli/factories/command-factory.d.ts +4 -0
- package/dist/cli/factories/command-factory.js +63 -8
- package/dist/cli/index.js +87 -140
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +376 -0
- package/dist/core/constants.d.ts +2 -1
- package/dist/core/constants.js +2 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +8 -1
- package/dist/factories/compatibility-factory.js +1 -1
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +4 -3
- package/dist/index.js +2 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +376 -0
- package/dist/lib/core/constants.d.ts +2 -1
- package/dist/lib/core/constants.js +2 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +8 -1
- package/dist/lib/factories/compatibility-factory.js +1 -1
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +4 -3
- package/dist/lib/index.js +2 -4
- package/dist/lib/mcp/client.d.ts +1 -0
- package/dist/lib/mcp/client.js +1 -0
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/context-manager.d.ts +1 -0
- package/dist/lib/mcp/context-manager.js +8 -4
- package/dist/lib/mcp/function-calling.d.ts +13 -0
- package/dist/lib/mcp/function-calling.js +134 -35
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +156 -117
- package/dist/lib/neurolink.js +619 -404
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -43
- package/dist/lib/providers/anthropic.js +82 -306
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/function-calling-provider.d.ts +64 -2
- package/dist/lib/providers/function-calling-provider.js +208 -9
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +32 -25
- package/dist/lib/providers/huggingFace.js +97 -431
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.js +24 -5
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -36
- package/dist/lib/providers/ollama.js +297 -520
- package/dist/lib/providers/openAI.d.ts +19 -18
- package/dist/lib/providers/openAI.js +76 -275
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/services/streaming/streaming-manager.js +11 -10
- package/dist/lib/services/websocket/websocket-server.js +12 -11
- package/dist/lib/telemetry/telemetry-service.js +8 -7
- package/dist/lib/types/generate-types.d.ts +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +30 -18
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/mcp/client.d.ts +1 -0
- package/dist/mcp/client.js +1 -0
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/context-manager.d.ts +1 -0
- package/dist/mcp/context-manager.js +8 -4
- package/dist/mcp/function-calling.d.ts +13 -0
- package/dist/mcp/function-calling.js +134 -35
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/mcp/neurolink-mcp-client.js +21 -5
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +156 -117
- package/dist/neurolink.js +619 -404
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -43
- package/dist/providers/anthropic.js +81 -305
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/function-calling-provider.d.ts +64 -2
- package/dist/providers/function-calling-provider.js +208 -9
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +32 -25
- package/dist/providers/huggingFace.js +96 -430
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.js +24 -5
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -36
- package/dist/providers/ollama.js +297 -519
- package/dist/providers/openAI.d.ts +19 -18
- package/dist/providers/openAI.js +76 -276
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/services/streaming/streaming-manager.js +11 -10
- package/dist/services/websocket/websocket-server.js +12 -11
- package/dist/telemetry/telemetry-service.js +8 -7
- package/dist/types/generate-types.d.ts +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +30 -18
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/package.json +12 -5
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/lib/providers/agent-enhanced-provider.js +0 -605
- package/dist/lib/providers/amazonBedrock.d.ts +0 -28
- package/dist/lib/providers/amazonBedrock.js +0 -364
- package/dist/lib/providers/azureOpenAI.d.ts +0 -42
- package/dist/lib/providers/azureOpenAI.js +0 -347
- package/dist/lib/providers/googleAIStudio.d.ts +0 -42
- package/dist/lib/providers/googleAIStudio.js +0 -364
- package/dist/lib/providers/googleVertexAI.d.ts +0 -34
- package/dist/lib/providers/googleVertexAI.js +0 -547
- package/dist/lib/providers/mistralAI.d.ts +0 -37
- package/dist/lib/providers/mistralAI.js +0 -325
- package/dist/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/providers/agent-enhanced-provider.js +0 -606
- package/dist/providers/amazonBedrock.d.ts +0 -28
- package/dist/providers/amazonBedrock.js +0 -364
- package/dist/providers/azureOpenAI.d.ts +0 -42
- package/dist/providers/azureOpenAI.js +0 -348
- package/dist/providers/googleAIStudio.d.ts +0 -42
- package/dist/providers/googleAIStudio.js +0 -364
- package/dist/providers/googleVertexAI.d.ts +0 -34
- package/dist/providers/googleVertexAI.js +0 -547
- package/dist/providers/mistralAI.d.ts +0 -37
- package/dist/providers/mistralAI.js +0 -325
|
@@ -1,27 +1,28 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
-
import
|
|
3
|
+
import { AIProviderName } from "../core/types.js";
|
|
4
4
|
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
|
|
5
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
6
|
+
/**
|
|
7
|
+
* OpenAI Provider v2 - BaseProvider Implementation
|
|
8
|
+
* Migrated to use factory pattern with exact Google AI provider pattern
|
|
9
|
+
*/
|
|
10
|
+
export declare class OpenAIProvider extends BaseProvider {
|
|
8
11
|
private model;
|
|
9
|
-
constructor(modelName?: string
|
|
12
|
+
constructor(modelName?: string);
|
|
13
|
+
protected getProviderName(): AIProviderName;
|
|
14
|
+
protected getDefaultModel(): string;
|
|
10
15
|
/**
|
|
11
|
-
*
|
|
16
|
+
* Returns the Vercel AI SDK model instance for OpenAI
|
|
12
17
|
*/
|
|
13
|
-
|
|
14
|
-
|
|
18
|
+
protected getAISDKModel(): LanguageModelV1;
|
|
19
|
+
protected handleProviderError(error: any): Error;
|
|
15
20
|
/**
|
|
16
|
-
*
|
|
17
|
-
*
|
|
21
|
+
* executeGenerate method removed - generation is now handled by BaseProvider.
|
|
22
|
+
* For details on the changes and migration steps, refer to the BaseProvider documentation
|
|
23
|
+
* and the migration guide in the project repository.
|
|
18
24
|
*/
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
* Short alias for generate() - CLI-SDK consistency
|
|
22
|
-
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
23
|
-
* @param analysisSchema - Optional schema for output validation
|
|
24
|
-
* @returns Promise resolving to GenerateResult or null
|
|
25
|
-
*/
|
|
26
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
|
|
25
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
26
|
+
private validateStreamOptions;
|
|
27
27
|
}
|
|
28
|
+
export default OpenAIProvider;
|
|
@@ -1,311 +1,112 @@
|
|
|
1
1
|
import { openai } from "@ai-sdk/openai";
|
|
2
|
-
import { streamText
|
|
2
|
+
import { streamText } from "ai";
|
|
3
|
+
import { AIProviderName } from "../core/types.js";
|
|
4
|
+
import { BaseProvider } from "../core/base-provider.js";
|
|
3
5
|
import { logger } from "../utils/logger.js";
|
|
4
|
-
import { createTimeoutController,
|
|
6
|
+
import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
5
7
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
-
import { evaluateResponse } from "../core/evaluation.js";
|
|
7
|
-
// Default system context
|
|
8
|
-
const DEFAULT_SYSTEM_CONTEXT = {
|
|
9
|
-
systemPrompt: "You are a helpful AI assistant.",
|
|
10
|
-
};
|
|
11
8
|
// Configuration helpers
|
|
12
9
|
const getOpenAIApiKey = () => {
|
|
13
10
|
const apiKey = process.env.OPENAI_API_KEY;
|
|
14
11
|
if (!apiKey) {
|
|
15
|
-
|
|
16
|
-
throw new Error(`❌ OPENAI Provider Configuration Error
|
|
17
|
-
|
|
18
|
-
Missing required environment variables: OPENAI_API_KEY
|
|
19
|
-
|
|
20
|
-
🔧 Step 1: Get Credentials
|
|
21
|
-
Get your API key from https://platform.openai.com/api-keys
|
|
22
|
-
|
|
23
|
-
💡 Step 2: Add to your .env file (or export in CLI):
|
|
24
|
-
OPENAI_API_KEY="sk-proj-your-openai-api-key"
|
|
25
|
-
# Optional:
|
|
26
|
-
OPENAI_MODEL="gpt-4o"
|
|
27
|
-
OPENAI_BASE_URL="https://api.openai.com"
|
|
28
|
-
|
|
29
|
-
🚀 Step 3: Test the setup:
|
|
30
|
-
npx neurolink generate "Hello" --provider openai
|
|
31
|
-
|
|
32
|
-
📖 Full setup guide: https://docs.neurolink.ai/providers/openai`);
|
|
12
|
+
throw new Error(`❌ OPENAI Provider Configuration Error\n\nMissing required environment variables: OPENAI_API_KEY\n\n🔧 Step 1: Get Credentials\n1. Visit: https://platform.openai.com/api-keys\n2. Create new API key\n3. Copy the key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nOPENAI_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
|
|
33
13
|
}
|
|
34
14
|
return apiKey;
|
|
35
15
|
};
|
|
36
16
|
const getOpenAIModel = () => {
|
|
37
17
|
return process.env.OPENAI_MODEL || "gpt-4o";
|
|
38
18
|
};
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
19
|
+
/**
|
|
20
|
+
* OpenAI Provider v2 - BaseProvider Implementation
|
|
21
|
+
* Migrated to use factory pattern with exact Google AI provider pattern
|
|
22
|
+
*/
|
|
23
|
+
export class OpenAIProvider extends BaseProvider {
|
|
42
24
|
model;
|
|
43
25
|
constructor(modelName) {
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
err: String(err),
|
|
63
|
-
});
|
|
64
|
-
throw err;
|
|
65
|
-
}
|
|
26
|
+
super(modelName, AIProviderName.OPENAI);
|
|
27
|
+
// Set OpenAI API key as environment variable (required by @ai-sdk/openai)
|
|
28
|
+
process.env.OPENAI_API_KEY = getOpenAIApiKey();
|
|
29
|
+
// Initialize model
|
|
30
|
+
this.model = openai(this.modelName);
|
|
31
|
+
logger.debug("OpenAIProviderV2 initialized", {
|
|
32
|
+
model: this.modelName,
|
|
33
|
+
provider: this.providerName,
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
// ===================
|
|
37
|
+
// ABSTRACT METHOD IMPLEMENTATIONS
|
|
38
|
+
// ===================
|
|
39
|
+
getProviderName() {
|
|
40
|
+
return AIProviderName.OPENAI;
|
|
41
|
+
}
|
|
42
|
+
getDefaultModel() {
|
|
43
|
+
return getOpenAIModel();
|
|
66
44
|
}
|
|
67
45
|
/**
|
|
68
|
-
*
|
|
46
|
+
* Returns the Vercel AI SDK model instance for OpenAI
|
|
69
47
|
*/
|
|
70
|
-
|
|
48
|
+
getAISDKModel() {
|
|
71
49
|
return this.model;
|
|
72
50
|
}
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
const startTime = Date.now();
|
|
77
|
-
try {
|
|
78
|
-
// Parse parameters - support both string and options object
|
|
79
|
-
const options = typeof optionsOrPrompt === "string"
|
|
80
|
-
? { prompt: optionsOrPrompt }
|
|
81
|
-
: optionsOrPrompt;
|
|
82
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
83
|
-
// Use schema from options or fallback parameter
|
|
84
|
-
const finalSchema = schema || analysisSchema;
|
|
85
|
-
logger.debug(`[${functionTag}] Generate text started`, {
|
|
86
|
-
provider,
|
|
87
|
-
modelName: this.modelName,
|
|
88
|
-
promptLength: prompt?.length || 0,
|
|
89
|
-
temperature,
|
|
90
|
-
maxTokens,
|
|
91
|
-
timeout,
|
|
92
|
-
});
|
|
93
|
-
// Create timeout controller if timeout is specified
|
|
94
|
-
const timeoutController = createTimeoutController(timeout, provider, "generate");
|
|
95
|
-
const generateOptions = {
|
|
96
|
-
model: this.model,
|
|
97
|
-
prompt: prompt,
|
|
98
|
-
system: systemPrompt,
|
|
99
|
-
temperature,
|
|
100
|
-
maxTokens,
|
|
101
|
-
// Add abort signal if available
|
|
102
|
-
...(timeoutController && {
|
|
103
|
-
abortSignal: timeoutController.controller.signal,
|
|
104
|
-
}),
|
|
105
|
-
};
|
|
106
|
-
if (finalSchema) {
|
|
107
|
-
generateOptions.experimental_output = Output.object({
|
|
108
|
-
schema: finalSchema,
|
|
109
|
-
});
|
|
110
|
-
}
|
|
111
|
-
try {
|
|
112
|
-
const result = await generateText(generateOptions);
|
|
113
|
-
// Clean up timeout if successful
|
|
114
|
-
timeoutController?.cleanup();
|
|
115
|
-
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
116
|
-
provider,
|
|
117
|
-
modelName: this.modelName,
|
|
118
|
-
usage: result.usage,
|
|
119
|
-
finishReason: result.finishReason,
|
|
120
|
-
responseLength: result.text?.length || 0,
|
|
121
|
-
timeout,
|
|
122
|
-
});
|
|
123
|
-
// Add analytics if enabled
|
|
124
|
-
if (options.enableAnalytics) {
|
|
125
|
-
const { createAnalytics } = await import("./analytics-helper.js");
|
|
126
|
-
result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
|
|
127
|
-
}
|
|
128
|
-
// Add evaluation if enabled
|
|
129
|
-
if (options.enableEvaluation) {
|
|
130
|
-
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
131
|
-
}
|
|
132
|
-
return {
|
|
133
|
-
content: result.text,
|
|
134
|
-
provider: "openai",
|
|
135
|
-
model: this.modelName,
|
|
136
|
-
usage: result.usage
|
|
137
|
-
? {
|
|
138
|
-
inputTokens: result.usage.promptTokens,
|
|
139
|
-
outputTokens: result.usage.completionTokens,
|
|
140
|
-
totalTokens: result.usage.totalTokens,
|
|
141
|
-
}
|
|
142
|
-
: undefined,
|
|
143
|
-
responseTime: Date.now() - startTime,
|
|
144
|
-
};
|
|
145
|
-
}
|
|
146
|
-
finally {
|
|
147
|
-
// Always cleanup timeout
|
|
148
|
-
timeoutController?.cleanup();
|
|
149
|
-
}
|
|
51
|
+
handleProviderError(error) {
|
|
52
|
+
if (error instanceof TimeoutError) {
|
|
53
|
+
return new Error(`OpenAI request timed out: ${error.message}`);
|
|
150
54
|
}
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
logger.debug(`[${functionTag}] Timeout error`, {
|
|
155
|
-
provider,
|
|
156
|
-
modelName: this.modelName,
|
|
157
|
-
timeout: err.timeout,
|
|
158
|
-
message: err.message,
|
|
159
|
-
});
|
|
160
|
-
}
|
|
161
|
-
else {
|
|
162
|
-
logger.debug(`[${functionTag}] Exception`, {
|
|
163
|
-
provider,
|
|
164
|
-
modelName: this.modelName,
|
|
165
|
-
message: "Error in generating text",
|
|
166
|
-
err: String(err),
|
|
167
|
-
});
|
|
168
|
-
}
|
|
169
|
-
throw err; // Re-throw error to trigger fallback
|
|
55
|
+
if (error?.message?.includes("API_KEY_INVALID") ||
|
|
56
|
+
error?.message?.includes("Invalid API key")) {
|
|
57
|
+
return new Error("Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.");
|
|
170
58
|
}
|
|
59
|
+
if (error?.message?.includes("rate limit")) {
|
|
60
|
+
return new Error("OpenAI rate limit exceeded. Please try again later.");
|
|
61
|
+
}
|
|
62
|
+
return new Error(`OpenAI error: ${error?.message || "Unknown error"}`);
|
|
171
63
|
}
|
|
172
64
|
/**
|
|
173
|
-
*
|
|
174
|
-
*
|
|
65
|
+
* executeGenerate method removed - generation is now handled by BaseProvider.
|
|
66
|
+
* For details on the changes and migration steps, refer to the BaseProvider documentation
|
|
67
|
+
* and the migration guide in the project repository.
|
|
175
68
|
*/
|
|
176
|
-
async
|
|
177
|
-
|
|
178
|
-
const
|
|
179
|
-
|
|
180
|
-
const startTime = Date.now();
|
|
69
|
+
async executeStream(options, analysisSchema) {
|
|
70
|
+
this.validateStreamOptions(options);
|
|
71
|
+
const timeout = this.getTimeout(options);
|
|
72
|
+
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
181
73
|
try {
|
|
182
|
-
|
|
183
|
-
const options = typeof optionsOrPrompt === "string"
|
|
184
|
-
? { input: { text: optionsOrPrompt } }
|
|
185
|
-
: optionsOrPrompt;
|
|
186
|
-
// Validate input
|
|
187
|
-
if (!options?.input?.text ||
|
|
188
|
-
typeof options.input.text !== "string" ||
|
|
189
|
-
options.input.text.trim() === "") {
|
|
190
|
-
throw new Error("Stream options must include input.text as a non-empty string");
|
|
191
|
-
}
|
|
192
|
-
// Convert to internal parameters
|
|
193
|
-
const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
194
|
-
// Use schema from options or fallback parameter
|
|
195
|
-
const finalSchema = schema || analysisSchema;
|
|
196
|
-
logger.debug(`[${functionTag}] Stream request started`, {
|
|
197
|
-
provider,
|
|
198
|
-
modelName: this.modelName,
|
|
199
|
-
promptLength: prompt?.length || 0,
|
|
200
|
-
temperature,
|
|
201
|
-
maxTokens,
|
|
202
|
-
timeout,
|
|
203
|
-
});
|
|
204
|
-
// Create timeout controller if timeout is specified
|
|
205
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
206
|
-
const streamOptions = {
|
|
74
|
+
const result = await streamText({
|
|
207
75
|
model: this.model,
|
|
208
|
-
prompt:
|
|
209
|
-
system: systemPrompt,
|
|
210
|
-
temperature,
|
|
211
|
-
maxTokens,
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
}),
|
|
216
|
-
onError: (event) => {
|
|
217
|
-
const error = event.error;
|
|
218
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
219
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
220
|
-
logger.debug(`[${functionTag}] Stream error`, {
|
|
221
|
-
provider,
|
|
222
|
-
modelName: this.modelName,
|
|
223
|
-
error: errorMessage,
|
|
224
|
-
stack: errorStack,
|
|
225
|
-
promptLength: prompt.length,
|
|
226
|
-
chunkCount,
|
|
227
|
-
});
|
|
228
|
-
},
|
|
229
|
-
onFinish: (event) => {
|
|
230
|
-
logger.debug(`[${functionTag}] Stream finished`, {
|
|
231
|
-
provider,
|
|
232
|
-
modelName: this.modelName,
|
|
233
|
-
finishReason: event.finishReason,
|
|
234
|
-
usage: event.usage,
|
|
235
|
-
totalChunks: chunkCount,
|
|
236
|
-
promptLength: prompt.length,
|
|
237
|
-
responseLength: event.text?.length || 0,
|
|
238
|
-
});
|
|
239
|
-
},
|
|
240
|
-
onChunk: (event) => {
|
|
241
|
-
chunkCount++;
|
|
242
|
-
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
243
|
-
provider,
|
|
244
|
-
modelName: this.modelName,
|
|
245
|
-
chunkNumber: chunkCount,
|
|
246
|
-
chunkLength: event.chunk.text?.length || 0,
|
|
247
|
-
chunkType: event.chunk.type,
|
|
248
|
-
});
|
|
249
|
-
},
|
|
250
|
-
};
|
|
251
|
-
if (finalSchema) {
|
|
252
|
-
streamOptions.experimental_output = Output.object({
|
|
253
|
-
schema: finalSchema,
|
|
254
|
-
});
|
|
255
|
-
}
|
|
256
|
-
const result = streamText(streamOptions);
|
|
257
|
-
logger.debug(`[${functionTag}] Stream request completed`, {
|
|
258
|
-
provider,
|
|
259
|
-
modelName: this.modelName,
|
|
76
|
+
prompt: options.input.text,
|
|
77
|
+
system: options.systemPrompt,
|
|
78
|
+
temperature: options.temperature,
|
|
79
|
+
maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
|
|
80
|
+
tools: options.tools,
|
|
81
|
+
toolChoice: "auto",
|
|
82
|
+
abortSignal: timeoutController?.controller.signal,
|
|
260
83
|
});
|
|
261
|
-
|
|
84
|
+
timeoutController?.cleanup();
|
|
85
|
+
// Transform stream to match StreamResult interface
|
|
86
|
+
const transformedStream = async function* () {
|
|
87
|
+
for await (const chunk of result.textStream) {
|
|
88
|
+
yield { content: chunk };
|
|
89
|
+
}
|
|
90
|
+
};
|
|
262
91
|
return {
|
|
263
|
-
stream:
|
|
264
|
-
|
|
265
|
-
for await (const chunk of result.textStream) {
|
|
266
|
-
yield { content: chunk };
|
|
267
|
-
}
|
|
268
|
-
})()
|
|
269
|
-
: (async function* () {
|
|
270
|
-
yield { content: "" };
|
|
271
|
-
throw new Error("No textStream available from AI SDK");
|
|
272
|
-
})(),
|
|
273
|
-
provider: "openai",
|
|
92
|
+
stream: transformedStream(),
|
|
93
|
+
provider: this.providerName,
|
|
274
94
|
model: this.modelName,
|
|
275
|
-
metadata: {
|
|
276
|
-
streamId: `openai-${Date.now()}`,
|
|
277
|
-
startTime,
|
|
278
|
-
},
|
|
279
95
|
};
|
|
280
96
|
}
|
|
281
|
-
catch (
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
logger.debug(`[${functionTag}] Timeout error`, {
|
|
285
|
-
provider,
|
|
286
|
-
modelName: this.modelName,
|
|
287
|
-
timeout: err.timeout,
|
|
288
|
-
message: err.message,
|
|
289
|
-
});
|
|
290
|
-
}
|
|
291
|
-
else {
|
|
292
|
-
logger.debug(`[${functionTag}] Exception`, {
|
|
293
|
-
provider,
|
|
294
|
-
modelName: this.modelName,
|
|
295
|
-
message: "Error in streaming content",
|
|
296
|
-
err: String(err),
|
|
297
|
-
});
|
|
298
|
-
}
|
|
299
|
-
throw err; // Re-throw error to trigger fallback
|
|
97
|
+
catch (error) {
|
|
98
|
+
timeoutController?.cleanup();
|
|
99
|
+
throw this.handleProviderError(error);
|
|
300
100
|
}
|
|
301
101
|
}
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
return this.generate(optionsOrPrompt, analysisSchema);
|
|
102
|
+
// ===================
|
|
103
|
+
// PRIVATE VALIDATION METHODS
|
|
104
|
+
// ===================
|
|
105
|
+
validateStreamOptions(options) {
|
|
106
|
+
if (!options.input?.text || options.input.text.trim().length === 0) {
|
|
107
|
+
throw new Error("Input text is required and cannot be empty");
|
|
108
|
+
}
|
|
310
109
|
}
|
|
311
110
|
}
|
|
111
|
+
// Export for factory registration
|
|
112
|
+
export default OpenAIProvider;
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink SDK Tool Extension System
|
|
3
|
+
* Allows developers to register custom tools that integrate with AI providers
|
|
4
|
+
*/
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
import type { Tool } from "ai";
|
|
7
|
+
import { logger } from "../utils/logger.js";
|
|
8
|
+
/**
|
|
9
|
+
* Custom tool interface for SDK users
|
|
10
|
+
*/
|
|
11
|
+
export interface CustomTool {
|
|
12
|
+
/**
|
|
13
|
+
* Tool description that helps AI understand when to use it
|
|
14
|
+
*/
|
|
15
|
+
description: string;
|
|
16
|
+
/**
|
|
17
|
+
* Parameters schema using Zod or JSON Schema
|
|
18
|
+
*/
|
|
19
|
+
parameters?: z.ZodSchema | Record<string, any>;
|
|
20
|
+
/**
|
|
21
|
+
* Tool execution function
|
|
22
|
+
*/
|
|
23
|
+
execute: (args: any, context?: ToolContext) => Promise<any> | any;
|
|
24
|
+
/**
|
|
25
|
+
* Optional metadata
|
|
26
|
+
*/
|
|
27
|
+
category?: string;
|
|
28
|
+
version?: string;
|
|
29
|
+
author?: string;
|
|
30
|
+
/**
|
|
31
|
+
* Optional configuration
|
|
32
|
+
*/
|
|
33
|
+
config?: {
|
|
34
|
+
timeout?: number;
|
|
35
|
+
retries?: number;
|
|
36
|
+
rateLimit?: {
|
|
37
|
+
requests: number;
|
|
38
|
+
window: number;
|
|
39
|
+
};
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
/**
|
|
43
|
+
* Context provided to tools during execution
|
|
44
|
+
*/
|
|
45
|
+
export interface ToolContext {
|
|
46
|
+
/**
|
|
47
|
+
* Call another tool
|
|
48
|
+
*/
|
|
49
|
+
callTool: (name: string, args: any) => Promise<any>;
|
|
50
|
+
/**
|
|
51
|
+
* Current session information
|
|
52
|
+
*/
|
|
53
|
+
session: {
|
|
54
|
+
id: string;
|
|
55
|
+
userId?: string;
|
|
56
|
+
provider?: string;
|
|
57
|
+
model?: string;
|
|
58
|
+
};
|
|
59
|
+
/**
|
|
60
|
+
* Logger instance
|
|
61
|
+
*/
|
|
62
|
+
logger: typeof logger;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Tool middleware function
|
|
66
|
+
*/
|
|
67
|
+
export type ToolMiddleware = (toolName: string, args: any, next: () => Promise<any>, context: ToolContext) => Promise<any>;
|
|
68
|
+
/**
|
|
69
|
+
* Tool permission configuration
|
|
70
|
+
*/
|
|
71
|
+
export interface ToolPermissions {
|
|
72
|
+
allowlist?: string[];
|
|
73
|
+
denylist?: string[];
|
|
74
|
+
requireApproval?: string[];
|
|
75
|
+
customValidator?: (toolName: string, args: any) => boolean | Promise<boolean>;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Converts a custom tool to Vercel AI SDK format
|
|
79
|
+
*/
|
|
80
|
+
export declare function convertToAISDKTool(name: string, customTool: CustomTool): Tool;
|
|
81
|
+
/**
|
|
82
|
+
* Tool registry for managing custom tools
|
|
83
|
+
*/
|
|
84
|
+
export declare class ToolRegistry {
|
|
85
|
+
private tools;
|
|
86
|
+
private middleware;
|
|
87
|
+
private permissions;
|
|
88
|
+
private rateLimits;
|
|
89
|
+
/**
|
|
90
|
+
* Simple rate limiting check with automatic cleanup
|
|
91
|
+
*/
|
|
92
|
+
private checkRateLimit;
|
|
93
|
+
/**
|
|
94
|
+
* Register a custom tool
|
|
95
|
+
*/
|
|
96
|
+
register(name: string, tool: CustomTool): void;
|
|
97
|
+
/**
|
|
98
|
+
* Register multiple tools at once
|
|
99
|
+
*/
|
|
100
|
+
registerMany(tools: Record<string, CustomTool>): void;
|
|
101
|
+
/**
|
|
102
|
+
* Unregister a tool
|
|
103
|
+
*/
|
|
104
|
+
unregister(name: string): boolean;
|
|
105
|
+
/**
|
|
106
|
+
* Get a tool by name
|
|
107
|
+
*/
|
|
108
|
+
get(name: string): CustomTool | undefined;
|
|
109
|
+
/**
|
|
110
|
+
* Get all registered tools
|
|
111
|
+
*/
|
|
112
|
+
getAll(): Map<string, CustomTool>;
|
|
113
|
+
/**
|
|
114
|
+
* Convert all tools to AI SDK format
|
|
115
|
+
*/
|
|
116
|
+
toAISDKTools(): Record<string, Tool>;
|
|
117
|
+
/**
|
|
118
|
+
* Add middleware
|
|
119
|
+
*/
|
|
120
|
+
use(middleware: ToolMiddleware): void;
|
|
121
|
+
/**
|
|
122
|
+
* Set permissions
|
|
123
|
+
*/
|
|
124
|
+
setPermissions(permissions: ToolPermissions): void;
|
|
125
|
+
/**
|
|
126
|
+
* Check if a tool is allowed
|
|
127
|
+
*/
|
|
128
|
+
private isToolAllowed;
|
|
129
|
+
/**
|
|
130
|
+
* Execute a tool with middleware
|
|
131
|
+
*/
|
|
132
|
+
execute(name: string, args: any, context: ToolContext): Promise<any>;
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Create a simple tool helper
|
|
136
|
+
*/
|
|
137
|
+
export declare function createTool(config: CustomTool): CustomTool;
|
|
138
|
+
/**
|
|
139
|
+
* Create an async tool helper
|
|
140
|
+
*/
|
|
141
|
+
export declare function createAsyncTool(config: Omit<CustomTool, "execute"> & {
|
|
142
|
+
execute: (args: any, context?: ToolContext) => Promise<any>;
|
|
143
|
+
}): CustomTool;
|
|
144
|
+
/**
|
|
145
|
+
* Create a batch tool that processes multiple items
|
|
146
|
+
*/
|
|
147
|
+
export declare function createBatchTool<T, R>(config: Omit<CustomTool, "execute" | "parameters"> & {
|
|
148
|
+
parameters: z.ZodSchema<{
|
|
149
|
+
items: T[];
|
|
150
|
+
}>;
|
|
151
|
+
processItem: (item: T, context?: ToolContext) => Promise<R> | R;
|
|
152
|
+
batchSize?: number;
|
|
153
|
+
}): CustomTool;
|
|
154
|
+
/**
|
|
155
|
+
* Tool testing utilities
|
|
156
|
+
*/
|
|
157
|
+
export declare const TestUtils: {
|
|
158
|
+
/**
|
|
159
|
+
* Create a mock tool context
|
|
160
|
+
*/
|
|
161
|
+
mockContext(overrides?: Partial<ToolContext>): ToolContext;
|
|
162
|
+
/**
|
|
163
|
+
* Test a tool with mock data
|
|
164
|
+
*/
|
|
165
|
+
testTool(tool: CustomTool, testCases: Array<{
|
|
166
|
+
input: any;
|
|
167
|
+
expected?: any;
|
|
168
|
+
}>): Promise<({
|
|
169
|
+
input: any;
|
|
170
|
+
output: any;
|
|
171
|
+
success: boolean;
|
|
172
|
+
matches: boolean | undefined;
|
|
173
|
+
error?: undefined;
|
|
174
|
+
} | {
|
|
175
|
+
input: any;
|
|
176
|
+
error: string;
|
|
177
|
+
success: boolean;
|
|
178
|
+
output?: undefined;
|
|
179
|
+
matches?: undefined;
|
|
180
|
+
})[]>;
|
|
181
|
+
};
|