@juspay/neurolink 1.6.0 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +200 -7
- package/README.md +101 -18
- package/dist/agent/direct-tools.d.ts +1203 -0
- package/dist/agent/direct-tools.js +387 -0
- package/dist/cli/commands/agent-generate.d.ts +2 -0
- package/dist/cli/commands/agent-generate.js +70 -0
- package/dist/cli/commands/config.d.ts +6 -6
- package/dist/cli/commands/config.js +326 -273
- package/dist/cli/commands/mcp.d.ts +2 -1
- package/dist/cli/commands/mcp.js +874 -146
- package/dist/cli/commands/ollama.d.ts +1 -1
- package/dist/cli/commands/ollama.js +153 -143
- package/dist/cli/index.js +687 -325
- package/dist/cli/utils/complete-setup.d.ts +19 -0
- package/dist/cli/utils/complete-setup.js +81 -0
- package/dist/cli/utils/env-manager.d.ts +44 -0
- package/dist/cli/utils/env-manager.js +226 -0
- package/dist/cli/utils/interactive-setup.d.ts +48 -0
- package/dist/cli/utils/interactive-setup.js +302 -0
- package/dist/core/dynamic-models.d.ts +208 -0
- package/dist/core/dynamic-models.js +250 -0
- package/dist/core/factory.d.ts +13 -6
- package/dist/core/factory.js +177 -62
- package/dist/core/types.d.ts +4 -2
- package/dist/core/types.js +4 -4
- package/dist/index.d.ts +16 -16
- package/dist/index.js +16 -16
- package/dist/lib/agent/direct-tools.d.ts +1203 -0
- package/dist/lib/agent/direct-tools.js +387 -0
- package/dist/lib/core/dynamic-models.d.ts +208 -0
- package/dist/lib/core/dynamic-models.js +250 -0
- package/dist/lib/core/factory.d.ts +13 -6
- package/dist/lib/core/factory.js +177 -62
- package/dist/lib/core/types.d.ts +4 -2
- package/dist/lib/core/types.js +4 -4
- package/dist/lib/index.d.ts +16 -16
- package/dist/lib/index.js +16 -16
- package/dist/lib/mcp/auto-discovery.d.ts +120 -0
- package/dist/lib/mcp/auto-discovery.js +793 -0
- package/dist/lib/mcp/client.d.ts +66 -0
- package/dist/lib/mcp/client.js +245 -0
- package/dist/lib/mcp/config.d.ts +31 -0
- package/dist/lib/mcp/config.js +74 -0
- package/dist/lib/mcp/context-manager.d.ts +4 -4
- package/dist/lib/mcp/context-manager.js +24 -18
- package/dist/lib/mcp/factory.d.ts +28 -11
- package/dist/lib/mcp/factory.js +36 -29
- package/dist/lib/mcp/function-calling.d.ts +51 -0
- package/dist/lib/mcp/function-calling.js +510 -0
- package/dist/lib/mcp/index.d.ts +190 -0
- package/dist/lib/mcp/index.js +156 -0
- package/dist/lib/mcp/initialize-tools.d.ts +28 -0
- package/dist/lib/mcp/initialize-tools.js +209 -0
- package/dist/lib/mcp/initialize.d.ts +17 -0
- package/dist/lib/mcp/initialize.js +51 -0
- package/dist/lib/mcp/logging.d.ts +71 -0
- package/dist/lib/mcp/logging.js +183 -0
- package/dist/lib/mcp/manager.d.ts +67 -0
- package/dist/lib/mcp/manager.js +176 -0
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
- package/dist/lib/mcp/orchestrator.d.ts +3 -3
- package/dist/lib/mcp/orchestrator.js +46 -43
- package/dist/lib/mcp/registry.d.ts +12 -4
- package/dist/lib/mcp/registry.js +64 -37
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/lib/mcp/tool-integration.d.ts +67 -0
- package/dist/lib/mcp/tool-integration.js +179 -0
- package/dist/lib/mcp/unified-registry.d.ts +269 -0
- package/dist/lib/mcp/unified-registry.js +1411 -0
- package/dist/lib/neurolink.d.ts +68 -6
- package/dist/lib/neurolink.js +304 -42
- package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/lib/providers/agent-enhanced-provider.js +242 -0
- package/dist/lib/providers/amazonBedrock.d.ts +3 -3
- package/dist/lib/providers/amazonBedrock.js +54 -50
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +92 -84
- package/dist/lib/providers/azureOpenAI.d.ts +2 -2
- package/dist/lib/providers/azureOpenAI.js +97 -86
- package/dist/lib/providers/function-calling-provider.d.ts +70 -0
- package/dist/lib/providers/function-calling-provider.js +359 -0
- package/dist/lib/providers/googleAIStudio.d.ts +10 -5
- package/dist/lib/providers/googleAIStudio.js +60 -38
- package/dist/lib/providers/googleVertexAI.d.ts +3 -3
- package/dist/lib/providers/googleVertexAI.js +96 -86
- package/dist/lib/providers/huggingFace.d.ts +3 -3
- package/dist/lib/providers/huggingFace.js +70 -63
- package/dist/lib/providers/index.d.ts +11 -11
- package/dist/lib/providers/index.js +18 -18
- package/dist/lib/providers/mcp-provider.d.ts +62 -0
- package/dist/lib/providers/mcp-provider.js +183 -0
- package/dist/lib/providers/mistralAI.d.ts +3 -3
- package/dist/lib/providers/mistralAI.js +42 -36
- package/dist/lib/providers/ollama.d.ts +4 -4
- package/dist/lib/providers/ollama.js +128 -98
- package/dist/lib/providers/openAI.d.ts +7 -3
- package/dist/lib/providers/openAI.js +45 -33
- package/dist/lib/utils/logger.js +2 -2
- package/dist/lib/utils/providerUtils-fixed.d.ts +8 -0
- package/dist/lib/utils/providerUtils-fixed.js +75 -0
- package/dist/lib/utils/providerUtils.d.ts +8 -1
- package/dist/lib/utils/providerUtils.js +63 -32
- package/dist/mcp/auto-discovery.d.ts +120 -0
- package/dist/mcp/auto-discovery.js +794 -0
- package/dist/mcp/client.d.ts +66 -0
- package/dist/mcp/client.js +245 -0
- package/dist/mcp/config.d.ts +31 -0
- package/dist/mcp/config.js +74 -0
- package/dist/mcp/context-manager.d.ts +4 -4
- package/dist/mcp/context-manager.js +24 -18
- package/dist/mcp/factory.d.ts +28 -11
- package/dist/mcp/factory.js +36 -29
- package/dist/mcp/function-calling.d.ts +51 -0
- package/dist/mcp/function-calling.js +510 -0
- package/dist/mcp/index.d.ts +190 -0
- package/dist/mcp/index.js +156 -0
- package/dist/mcp/initialize-tools.d.ts +28 -0
- package/dist/mcp/initialize-tools.js +210 -0
- package/dist/mcp/initialize.d.ts +17 -0
- package/dist/mcp/initialize.js +51 -0
- package/dist/mcp/logging.d.ts +71 -0
- package/dist/mcp/logging.js +183 -0
- package/dist/mcp/manager.d.ts +67 -0
- package/dist/mcp/manager.js +176 -0
- package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/mcp/neurolink-mcp-client.js +417 -0
- package/dist/mcp/orchestrator.d.ts +3 -3
- package/dist/mcp/orchestrator.js +46 -43
- package/dist/mcp/registry.d.ts +12 -4
- package/dist/mcp/registry.js +64 -37
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/mcp/tool-integration.d.ts +67 -0
- package/dist/mcp/tool-integration.js +179 -0
- package/dist/mcp/unified-registry.d.ts +269 -0
- package/dist/mcp/unified-registry.js +1411 -0
- package/dist/neurolink.d.ts +68 -6
- package/dist/neurolink.js +304 -42
- package/dist/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/providers/agent-enhanced-provider.js +242 -0
- package/dist/providers/amazonBedrock.d.ts +3 -3
- package/dist/providers/amazonBedrock.js +54 -50
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +92 -84
- package/dist/providers/azureOpenAI.d.ts +2 -2
- package/dist/providers/azureOpenAI.js +97 -86
- package/dist/providers/function-calling-provider.d.ts +70 -0
- package/dist/providers/function-calling-provider.js +359 -0
- package/dist/providers/googleAIStudio.d.ts +10 -5
- package/dist/providers/googleAIStudio.js +60 -38
- package/dist/providers/googleVertexAI.d.ts +3 -3
- package/dist/providers/googleVertexAI.js +96 -86
- package/dist/providers/huggingFace.d.ts +3 -3
- package/dist/providers/huggingFace.js +70 -63
- package/dist/providers/index.d.ts +11 -11
- package/dist/providers/index.js +18 -18
- package/dist/providers/mcp-provider.d.ts +62 -0
- package/dist/providers/mcp-provider.js +183 -0
- package/dist/providers/mistralAI.d.ts +3 -3
- package/dist/providers/mistralAI.js +42 -36
- package/dist/providers/ollama.d.ts +4 -4
- package/dist/providers/ollama.js +128 -98
- package/dist/providers/openAI.d.ts +7 -3
- package/dist/providers/openAI.js +45 -33
- package/dist/utils/logger.js +2 -2
- package/dist/utils/providerUtils-fixed.d.ts +8 -0
- package/dist/utils/providerUtils-fixed.js +75 -0
- package/dist/utils/providerUtils.d.ts +8 -1
- package/dist/utils/providerUtils.js +63 -32
- package/package.json +182 -160
|
@@ -4,8 +4,8 @@
|
|
|
4
4
|
* Enterprise-grade OpenAI integration through Microsoft Azure.
|
|
5
5
|
* Supports all OpenAI models with enhanced security and compliance.
|
|
6
6
|
*/
|
|
7
|
-
import { AIProviderName } from
|
|
8
|
-
import { logger } from
|
|
7
|
+
import { AIProviderName } from "../core/types.js";
|
|
8
|
+
import { logger } from "../utils/logger.js";
|
|
9
9
|
export class AzureOpenAIProvider {
|
|
10
10
|
name = AIProviderName.AZURE;
|
|
11
11
|
apiKey;
|
|
@@ -16,27 +16,28 @@ export class AzureOpenAIProvider {
|
|
|
16
16
|
this.apiKey = this.getApiKey();
|
|
17
17
|
this.endpoint = this.getEndpoint();
|
|
18
18
|
this.deploymentId = this.getDeploymentId();
|
|
19
|
-
this.apiVersion =
|
|
19
|
+
this.apiVersion =
|
|
20
|
+
process.env.AZURE_OPENAI_API_VERSION || "2024-02-15-preview";
|
|
20
21
|
logger.debug(`[AzureOpenAIProvider] Initialized with endpoint: ${this.endpoint}, deployment: ${this.deploymentId}`);
|
|
21
22
|
}
|
|
22
23
|
getApiKey() {
|
|
23
24
|
const apiKey = process.env.AZURE_OPENAI_API_KEY;
|
|
24
25
|
if (!apiKey) {
|
|
25
|
-
throw new Error(
|
|
26
|
+
throw new Error("AZURE_OPENAI_API_KEY environment variable is required");
|
|
26
27
|
}
|
|
27
28
|
return apiKey;
|
|
28
29
|
}
|
|
29
30
|
getEndpoint() {
|
|
30
31
|
const endpoint = process.env.AZURE_OPENAI_ENDPOINT;
|
|
31
32
|
if (!endpoint) {
|
|
32
|
-
throw new Error(
|
|
33
|
+
throw new Error("AZURE_OPENAI_ENDPOINT environment variable is required");
|
|
33
34
|
}
|
|
34
|
-
return endpoint.replace(/\/$/,
|
|
35
|
+
return endpoint.replace(/\/$/, ""); // Remove trailing slash
|
|
35
36
|
}
|
|
36
37
|
getDeploymentId() {
|
|
37
38
|
const deploymentId = process.env.AZURE_OPENAI_DEPLOYMENT_ID;
|
|
38
39
|
if (!deploymentId) {
|
|
39
|
-
throw new Error(
|
|
40
|
+
throw new Error("AZURE_OPENAI_DEPLOYMENT_ID environment variable is required");
|
|
40
41
|
}
|
|
41
42
|
return deploymentId;
|
|
42
43
|
}
|
|
@@ -46,15 +47,15 @@ export class AzureOpenAIProvider {
|
|
|
46
47
|
async makeRequest(body, stream = false) {
|
|
47
48
|
const url = this.getApiUrl(stream);
|
|
48
49
|
const headers = {
|
|
49
|
-
|
|
50
|
-
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
"api-key": this.apiKey,
|
|
51
52
|
};
|
|
52
|
-
logger.debug(`[AzureOpenAIProvider.makeRequest] ${stream ?
|
|
53
|
-
logger.debug(`[AzureOpenAIProvider.makeRequest] Max tokens: ${body.max_tokens ||
|
|
53
|
+
logger.debug(`[AzureOpenAIProvider.makeRequest] ${stream ? "Streaming" : "Non-streaming"} request to deployment: ${this.deploymentId}`);
|
|
54
|
+
logger.debug(`[AzureOpenAIProvider.makeRequest] Max tokens: ${body.max_tokens || "default"}, Temperature: ${body.temperature || "default"}`);
|
|
54
55
|
const response = await fetch(url, {
|
|
55
|
-
method:
|
|
56
|
+
method: "POST",
|
|
56
57
|
headers,
|
|
57
|
-
body: JSON.stringify(body)
|
|
58
|
+
body: JSON.stringify(body),
|
|
58
59
|
});
|
|
59
60
|
if (!response.ok) {
|
|
60
61
|
const errorText = await response.text();
|
|
@@ -64,34 +65,34 @@ export class AzureOpenAIProvider {
|
|
|
64
65
|
return response;
|
|
65
66
|
}
|
|
66
67
|
async generateText(optionsOrPrompt, schema) {
|
|
67
|
-
logger.debug(
|
|
68
|
+
logger.debug("[AzureOpenAIProvider.generateText] Starting text generation");
|
|
68
69
|
// Parse parameters with backward compatibility
|
|
69
|
-
const options = typeof optionsOrPrompt ===
|
|
70
|
+
const options = typeof optionsOrPrompt === "string"
|
|
70
71
|
? { prompt: optionsOrPrompt }
|
|
71
72
|
: optionsOrPrompt;
|
|
72
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt =
|
|
73
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
73
74
|
logger.debug(`[AzureOpenAIProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
|
|
74
75
|
const messages = [];
|
|
75
76
|
if (systemPrompt) {
|
|
76
77
|
messages.push({
|
|
77
|
-
role:
|
|
78
|
-
content: systemPrompt
|
|
78
|
+
role: "system",
|
|
79
|
+
content: systemPrompt,
|
|
79
80
|
});
|
|
80
81
|
}
|
|
81
82
|
messages.push({
|
|
82
|
-
role:
|
|
83
|
-
content: prompt
|
|
83
|
+
role: "user",
|
|
84
|
+
content: prompt,
|
|
84
85
|
});
|
|
85
86
|
const requestBody = {
|
|
86
87
|
messages,
|
|
87
88
|
temperature,
|
|
88
|
-
max_tokens: maxTokens
|
|
89
|
+
max_tokens: maxTokens,
|
|
89
90
|
};
|
|
90
91
|
try {
|
|
91
92
|
const response = await this.makeRequest(requestBody);
|
|
92
93
|
const data = await response.json();
|
|
93
94
|
logger.debug(`[AzureOpenAIProvider.generateText] Success. Generated ${data.usage.completion_tokens} tokens`);
|
|
94
|
-
const content = data.choices[0]?.message?.content ||
|
|
95
|
+
const content = data.choices[0]?.message?.content || "";
|
|
95
96
|
return {
|
|
96
97
|
content,
|
|
97
98
|
provider: this.name,
|
|
@@ -99,78 +100,81 @@ export class AzureOpenAIProvider {
|
|
|
99
100
|
usage: {
|
|
100
101
|
promptTokens: data.usage.prompt_tokens,
|
|
101
102
|
completionTokens: data.usage.completion_tokens,
|
|
102
|
-
totalTokens: data.usage.total_tokens
|
|
103
|
+
totalTokens: data.usage.total_tokens,
|
|
103
104
|
},
|
|
104
|
-
finishReason: data.choices[0]?.finish_reason ||
|
|
105
|
+
finishReason: data.choices[0]?.finish_reason || "stop",
|
|
105
106
|
};
|
|
106
107
|
}
|
|
107
108
|
catch (error) {
|
|
108
|
-
logger.error(
|
|
109
|
+
logger.error("[AzureOpenAIProvider.generateText] Error:", error);
|
|
109
110
|
throw error;
|
|
110
111
|
}
|
|
111
112
|
}
|
|
112
113
|
async streamText(optionsOrPrompt, schema) {
|
|
113
|
-
logger.debug(
|
|
114
|
+
logger.debug("[AzureOpenAIProvider.streamText] Starting text streaming");
|
|
114
115
|
// Parse parameters with backward compatibility
|
|
115
|
-
const options = typeof optionsOrPrompt ===
|
|
116
|
+
const options = typeof optionsOrPrompt === "string"
|
|
116
117
|
? { prompt: optionsOrPrompt }
|
|
117
118
|
: optionsOrPrompt;
|
|
118
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt =
|
|
119
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
119
120
|
logger.debug(`[AzureOpenAIProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
120
121
|
const messages = [];
|
|
121
122
|
if (systemPrompt) {
|
|
122
123
|
messages.push({
|
|
123
|
-
role:
|
|
124
|
-
content: systemPrompt
|
|
124
|
+
role: "system",
|
|
125
|
+
content: systemPrompt,
|
|
125
126
|
});
|
|
126
127
|
}
|
|
127
128
|
messages.push({
|
|
128
|
-
role:
|
|
129
|
-
content: prompt
|
|
129
|
+
role: "user",
|
|
130
|
+
content: prompt,
|
|
130
131
|
});
|
|
131
132
|
const requestBody = {
|
|
132
133
|
messages,
|
|
133
134
|
temperature,
|
|
134
135
|
max_tokens: maxTokens,
|
|
135
|
-
stream: true
|
|
136
|
+
stream: true,
|
|
136
137
|
};
|
|
137
138
|
try {
|
|
138
139
|
const response = await this.makeRequest(requestBody, true);
|
|
139
140
|
if (!response.body) {
|
|
140
|
-
throw new Error(
|
|
141
|
+
throw new Error("No response body received");
|
|
141
142
|
}
|
|
142
143
|
// Return a StreamTextResult-like object
|
|
143
144
|
return {
|
|
144
145
|
textStream: this.createAsyncIterable(response.body),
|
|
145
|
-
text:
|
|
146
|
+
text: "",
|
|
146
147
|
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
147
|
-
finishReason:
|
|
148
|
+
finishReason: "stop",
|
|
148
149
|
};
|
|
149
150
|
}
|
|
150
151
|
catch (error) {
|
|
151
|
-
logger.error(
|
|
152
|
+
logger.error("[AzureOpenAIProvider.streamText] Error:", error);
|
|
152
153
|
throw error;
|
|
153
154
|
}
|
|
154
155
|
}
|
|
155
156
|
async *createAsyncIterable(body) {
|
|
156
157
|
const reader = body.getReader();
|
|
157
158
|
const decoder = new TextDecoder();
|
|
158
|
-
let buffer =
|
|
159
|
+
let buffer = "";
|
|
159
160
|
try {
|
|
160
161
|
while (true) {
|
|
161
162
|
const { done, value } = await reader.read();
|
|
162
|
-
if (done)
|
|
163
|
+
if (done) {
|
|
163
164
|
break;
|
|
165
|
+
}
|
|
164
166
|
buffer += decoder.decode(value, { stream: true });
|
|
165
|
-
const lines = buffer.split(
|
|
166
|
-
buffer = lines.pop() ||
|
|
167
|
+
const lines = buffer.split("\n");
|
|
168
|
+
buffer = lines.pop() || "";
|
|
167
169
|
for (const line of lines) {
|
|
168
|
-
if (line.trim() ===
|
|
170
|
+
if (line.trim() === "") {
|
|
169
171
|
continue;
|
|
170
|
-
|
|
172
|
+
}
|
|
173
|
+
if (line.startsWith("data: ")) {
|
|
171
174
|
const data = line.slice(6);
|
|
172
|
-
if (data.trim() ===
|
|
175
|
+
if (data.trim() === "[DONE]") {
|
|
173
176
|
continue;
|
|
177
|
+
}
|
|
174
178
|
try {
|
|
175
179
|
const chunk = JSON.parse(data);
|
|
176
180
|
// Extract text content from chunk
|
|
@@ -179,7 +183,7 @@ export class AzureOpenAIProvider {
|
|
|
179
183
|
}
|
|
180
184
|
}
|
|
181
185
|
catch (parseError) {
|
|
182
|
-
logger.warn(
|
|
186
|
+
logger.warn("[AzureOpenAIProvider.createAsyncIterable] Failed to parse chunk:", parseError);
|
|
183
187
|
continue;
|
|
184
188
|
}
|
|
185
189
|
}
|
|
@@ -191,53 +195,56 @@ export class AzureOpenAIProvider {
|
|
|
191
195
|
}
|
|
192
196
|
}
|
|
193
197
|
async *generateTextStream(optionsOrPrompt) {
|
|
194
|
-
logger.debug(
|
|
198
|
+
logger.debug("[AzureOpenAIProvider.generateTextStream] Starting text streaming");
|
|
195
199
|
// Parse parameters with backward compatibility
|
|
196
|
-
const options = typeof optionsOrPrompt ===
|
|
200
|
+
const options = typeof optionsOrPrompt === "string"
|
|
197
201
|
? { prompt: optionsOrPrompt }
|
|
198
202
|
: optionsOrPrompt;
|
|
199
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt =
|
|
203
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
200
204
|
logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
201
205
|
const messages = [];
|
|
202
206
|
if (systemPrompt) {
|
|
203
207
|
messages.push({
|
|
204
|
-
role:
|
|
205
|
-
content: systemPrompt
|
|
208
|
+
role: "system",
|
|
209
|
+
content: systemPrompt,
|
|
206
210
|
});
|
|
207
211
|
}
|
|
208
212
|
messages.push({
|
|
209
|
-
role:
|
|
210
|
-
content: prompt
|
|
213
|
+
role: "user",
|
|
214
|
+
content: prompt,
|
|
211
215
|
});
|
|
212
216
|
const requestBody = {
|
|
213
217
|
messages,
|
|
214
218
|
temperature,
|
|
215
219
|
max_tokens: maxTokens,
|
|
216
|
-
stream: true
|
|
220
|
+
stream: true,
|
|
217
221
|
};
|
|
218
222
|
try {
|
|
219
223
|
const response = await this.makeRequest(requestBody, true);
|
|
220
224
|
if (!response.body) {
|
|
221
|
-
throw new Error(
|
|
225
|
+
throw new Error("No response body received");
|
|
222
226
|
}
|
|
223
227
|
const reader = response.body.getReader();
|
|
224
228
|
const decoder = new TextDecoder();
|
|
225
|
-
let buffer =
|
|
229
|
+
let buffer = "";
|
|
226
230
|
try {
|
|
227
231
|
while (true) {
|
|
228
232
|
const { done, value } = await reader.read();
|
|
229
|
-
if (done)
|
|
233
|
+
if (done) {
|
|
230
234
|
break;
|
|
235
|
+
}
|
|
231
236
|
buffer += decoder.decode(value, { stream: true });
|
|
232
|
-
const lines = buffer.split(
|
|
233
|
-
buffer = lines.pop() ||
|
|
237
|
+
const lines = buffer.split("\n");
|
|
238
|
+
buffer = lines.pop() || "";
|
|
234
239
|
for (const line of lines) {
|
|
235
|
-
if (line.trim() ===
|
|
240
|
+
if (line.trim() === "") {
|
|
236
241
|
continue;
|
|
237
|
-
|
|
242
|
+
}
|
|
243
|
+
if (line.startsWith("data: ")) {
|
|
238
244
|
const data = line.slice(6);
|
|
239
|
-
if (data.trim() ===
|
|
245
|
+
if (data.trim() === "[DONE]") {
|
|
240
246
|
continue;
|
|
247
|
+
}
|
|
241
248
|
try {
|
|
242
249
|
const chunk = JSON.parse(data);
|
|
243
250
|
// Extract text content from chunk
|
|
@@ -245,12 +252,12 @@ export class AzureOpenAIProvider {
|
|
|
245
252
|
yield {
|
|
246
253
|
content: chunk.choices[0].delta.content,
|
|
247
254
|
provider: this.name,
|
|
248
|
-
model: chunk.model || this.deploymentId
|
|
255
|
+
model: chunk.model || this.deploymentId,
|
|
249
256
|
};
|
|
250
257
|
}
|
|
251
258
|
}
|
|
252
259
|
catch (parseError) {
|
|
253
|
-
logger.warn(
|
|
260
|
+
logger.warn("[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:", parseError);
|
|
254
261
|
continue;
|
|
255
262
|
}
|
|
256
263
|
}
|
|
@@ -260,26 +267,26 @@ export class AzureOpenAIProvider {
|
|
|
260
267
|
finally {
|
|
261
268
|
reader.releaseLock();
|
|
262
269
|
}
|
|
263
|
-
logger.debug(
|
|
270
|
+
logger.debug("[AzureOpenAIProvider.generateTextStream] Streaming completed");
|
|
264
271
|
}
|
|
265
272
|
catch (error) {
|
|
266
|
-
logger.error(
|
|
273
|
+
logger.error("[AzureOpenAIProvider.generateTextStream] Error:", error);
|
|
267
274
|
throw error;
|
|
268
275
|
}
|
|
269
276
|
}
|
|
270
277
|
async testConnection() {
|
|
271
|
-
logger.debug(
|
|
278
|
+
logger.debug("[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI");
|
|
272
279
|
const startTime = Date.now();
|
|
273
280
|
try {
|
|
274
281
|
await this.generateText({
|
|
275
|
-
prompt:
|
|
276
|
-
maxTokens: 5
|
|
282
|
+
prompt: "Hello",
|
|
283
|
+
maxTokens: 5,
|
|
277
284
|
});
|
|
278
285
|
const responseTime = Date.now() - startTime;
|
|
279
286
|
logger.debug(`[AzureOpenAIProvider.testConnection] Connection test successful (${responseTime}ms)`);
|
|
280
287
|
return {
|
|
281
288
|
success: true,
|
|
282
|
-
responseTime
|
|
289
|
+
responseTime,
|
|
283
290
|
};
|
|
284
291
|
}
|
|
285
292
|
catch (error) {
|
|
@@ -287,8 +294,8 @@ export class AzureOpenAIProvider {
|
|
|
287
294
|
logger.error(`[AzureOpenAIProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
|
|
288
295
|
return {
|
|
289
296
|
success: false,
|
|
290
|
-
error: error instanceof Error ? error.message :
|
|
291
|
-
responseTime
|
|
297
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
298
|
+
responseTime,
|
|
292
299
|
};
|
|
293
300
|
}
|
|
294
301
|
}
|
|
@@ -304,18 +311,22 @@ export class AzureOpenAIProvider {
|
|
|
304
311
|
}
|
|
305
312
|
}
|
|
306
313
|
getRequiredConfig() {
|
|
307
|
-
return [
|
|
314
|
+
return [
|
|
315
|
+
"AZURE_OPENAI_API_KEY",
|
|
316
|
+
"AZURE_OPENAI_ENDPOINT",
|
|
317
|
+
"AZURE_OPENAI_DEPLOYMENT_ID",
|
|
318
|
+
];
|
|
308
319
|
}
|
|
309
320
|
getOptionalConfig() {
|
|
310
|
-
return [
|
|
321
|
+
return ["AZURE_OPENAI_API_VERSION"];
|
|
311
322
|
}
|
|
312
323
|
getModels() {
|
|
313
324
|
return [
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
325
|
+
"gpt-4",
|
|
326
|
+
"gpt-4-turbo",
|
|
327
|
+
"gpt-4-32k",
|
|
328
|
+
"gpt-35-turbo",
|
|
329
|
+
"gpt-35-turbo-16k",
|
|
319
330
|
];
|
|
320
331
|
}
|
|
321
332
|
supportsStreaming() {
|
|
@@ -326,14 +337,14 @@ export class AzureOpenAIProvider {
|
|
|
326
337
|
}
|
|
327
338
|
getCapabilities() {
|
|
328
339
|
return [
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
340
|
+
"text-generation",
|
|
341
|
+
"streaming",
|
|
342
|
+
"conversation",
|
|
343
|
+
"system-prompts",
|
|
344
|
+
"json-mode",
|
|
345
|
+
"function-calling",
|
|
346
|
+
"enterprise-security",
|
|
347
|
+
"content-filtering",
|
|
337
348
|
];
|
|
338
349
|
}
|
|
339
350
|
}
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Enhanced AI Provider with Real Function Calling Support
|
|
3
|
+
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
|
+
* This is the missing piece that enables true AI function calling!
|
|
5
|
+
*/
|
|
6
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
7
|
+
import { type GenerateTextResult, type StreamTextResult, type ToolSet, type Schema } from "ai";
|
|
8
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
9
|
+
/**
|
|
10
|
+
* Enhanced provider that enables real function calling with MCP tools
|
|
11
|
+
*/
|
|
12
|
+
export declare class FunctionCallingProvider implements AIProvider {
|
|
13
|
+
private baseProvider;
|
|
14
|
+
private enableFunctionCalling;
|
|
15
|
+
private sessionId;
|
|
16
|
+
private userId;
|
|
17
|
+
constructor(baseProvider: AIProvider, options?: {
|
|
18
|
+
enableFunctionCalling?: boolean;
|
|
19
|
+
sessionId?: string;
|
|
20
|
+
userId?: string;
|
|
21
|
+
});
|
|
22
|
+
/**
|
|
23
|
+
* Generate text with real function calling support
|
|
24
|
+
*/
|
|
25
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
26
|
+
/**
|
|
27
|
+
* Generate text using AI SDK's native function calling
|
|
28
|
+
*/
|
|
29
|
+
private generateTextWithTools;
|
|
30
|
+
/**
|
|
31
|
+
* Get the model from the base provider
|
|
32
|
+
* This is a temporary solution - ideally we'd have a getModel() method on AIProvider
|
|
33
|
+
*/
|
|
34
|
+
private getModelFromProvider;
|
|
35
|
+
/**
|
|
36
|
+
* Sanitize tool name to comply with AI provider requirements
|
|
37
|
+
*/
|
|
38
|
+
private sanitizeToolName;
|
|
39
|
+
/**
|
|
40
|
+
* Convert our tools to AI SDK format with proper execution
|
|
41
|
+
*/
|
|
42
|
+
private convertToAISDKTools;
|
|
43
|
+
/**
|
|
44
|
+
* Create function-aware system prompt
|
|
45
|
+
*/
|
|
46
|
+
private createFunctionAwareSystemPrompt;
|
|
47
|
+
/**
|
|
48
|
+
* Stream text with function calling support
|
|
49
|
+
*/
|
|
50
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Create a function-calling enhanced version of any AI provider
|
|
54
|
+
*/
|
|
55
|
+
export declare function createFunctionCallingProvider(baseProvider: AIProvider, options?: {
|
|
56
|
+
enableFunctionCalling?: boolean;
|
|
57
|
+
sessionId?: string;
|
|
58
|
+
userId?: string;
|
|
59
|
+
}): AIProvider;
|
|
60
|
+
/**
|
|
61
|
+
* Enhanced MCP Provider Factory that creates function-calling enabled providers
|
|
62
|
+
*/
|
|
63
|
+
export declare function createMCPAwareProviderV3(baseProvider: AIProvider, options?: {
|
|
64
|
+
providerName?: string;
|
|
65
|
+
modelName?: string;
|
|
66
|
+
enableMCP?: boolean;
|
|
67
|
+
enableFunctionCalling?: boolean;
|
|
68
|
+
sessionId?: string;
|
|
69
|
+
userId?: string;
|
|
70
|
+
}): AIProvider;
|