@juspay/neurolink 4.2.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -2
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -29
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +2 -3
- package/dist/cli/commands/agent-generate.d.ts +0 -1
- package/dist/cli/commands/agent-generate.js +0 -67
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type
|
|
3
|
-
import type {
|
|
2
|
+
import { type Schema } from "ai";
|
|
3
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
4
6
|
export declare class MistralAI implements AIProvider {
|
|
5
7
|
private modelName;
|
|
6
8
|
private client;
|
|
@@ -15,20 +17,21 @@ export declare class MistralAI implements AIProvider {
|
|
|
15
17
|
*/
|
|
16
18
|
private getModel;
|
|
17
19
|
/**
|
|
18
|
-
*
|
|
19
|
-
* @
|
|
20
|
-
|
|
21
|
-
|
|
20
|
+
* LEGACY METHOD: Use stream() instead for new code
|
|
21
|
+
* @deprecated Use stream() method instead
|
|
22
|
+
*/
|
|
23
|
+
/**
|
|
24
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
25
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
22
26
|
*/
|
|
23
|
-
|
|
27
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
24
28
|
/**
|
|
25
29
|
* Processes text using non-streaming approach with optional schema validation
|
|
26
30
|
* @param prompt - The input text prompt to analyze
|
|
27
31
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
28
|
-
* @returns Promise resolving to
|
|
32
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
29
33
|
*/
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
34
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
35
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
33
36
|
}
|
|
34
37
|
export default MistralAI;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { createMistral } from "@ai-sdk/mistral";
|
|
2
|
-
import { streamText, generateText, Output
|
|
2
|
+
import { streamText, generateText, Output } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
4
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
5
5
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
@@ -77,21 +77,31 @@ export class MistralAI {
|
|
|
77
77
|
return this.client(this.modelName);
|
|
78
78
|
}
|
|
79
79
|
/**
|
|
80
|
-
*
|
|
81
|
-
* @
|
|
82
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
83
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
80
|
+
* LEGACY METHOD: Use stream() instead for new code
|
|
81
|
+
* @deprecated Use stream() method instead
|
|
84
82
|
*/
|
|
85
|
-
|
|
86
|
-
|
|
83
|
+
/**
|
|
84
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
85
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
86
|
+
*/
|
|
87
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
88
|
+
const functionTag = "MistralAI.stream";
|
|
87
89
|
const provider = "mistral";
|
|
88
90
|
let chunkCount = 0;
|
|
91
|
+
const startTime = Date.now();
|
|
89
92
|
try {
|
|
90
93
|
// Parse parameters - support both string and options object
|
|
91
94
|
const options = typeof optionsOrPrompt === "string"
|
|
92
|
-
? {
|
|
95
|
+
? { input: { text: optionsOrPrompt } }
|
|
93
96
|
: optionsOrPrompt;
|
|
94
|
-
|
|
97
|
+
// Validate input
|
|
98
|
+
if (!options?.input?.text ||
|
|
99
|
+
typeof options.input.text !== "string" ||
|
|
100
|
+
options.input.text.trim() === "") {
|
|
101
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
102
|
+
}
|
|
103
|
+
// Extract parameters
|
|
104
|
+
const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
95
105
|
// Use schema from options or fallback parameter
|
|
96
106
|
const finalSchema = schema || analysisSchema;
|
|
97
107
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -120,7 +130,7 @@ export class MistralAI {
|
|
|
120
130
|
const error = event.error;
|
|
121
131
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
122
132
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
123
|
-
logger.
|
|
133
|
+
logger.debug(`[${functionTag}] Stream error`, {
|
|
124
134
|
provider,
|
|
125
135
|
modelName: this.modelName,
|
|
126
136
|
error: errorMessage,
|
|
@@ -130,7 +140,7 @@ export class MistralAI {
|
|
|
130
140
|
});
|
|
131
141
|
},
|
|
132
142
|
onFinish: (event) => {
|
|
133
|
-
logger.debug(`[${functionTag}] Stream
|
|
143
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
134
144
|
provider,
|
|
135
145
|
modelName: this.modelName,
|
|
136
146
|
finishReason: event.finishReason,
|
|
@@ -142,7 +152,7 @@ export class MistralAI {
|
|
|
142
152
|
},
|
|
143
153
|
onChunk: (event) => {
|
|
144
154
|
chunkCount++;
|
|
145
|
-
logger.debug(`[${functionTag}] Stream
|
|
155
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
146
156
|
provider,
|
|
147
157
|
modelName: this.modelName,
|
|
148
158
|
chunkNumber: chunkCount,
|
|
@@ -157,14 +167,29 @@ export class MistralAI {
|
|
|
157
167
|
});
|
|
158
168
|
}
|
|
159
169
|
const result = streamText(streamOptions);
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
170
|
+
logger.debug(`[${functionTag}] Stream request completed`, {
|
|
171
|
+
provider,
|
|
172
|
+
modelName: this.modelName,
|
|
173
|
+
});
|
|
174
|
+
// Convert to StreamResult format
|
|
175
|
+
return {
|
|
176
|
+
stream: (async function* () {
|
|
177
|
+
for await (const chunk of result.textStream) {
|
|
178
|
+
yield { content: chunk };
|
|
179
|
+
}
|
|
180
|
+
})(),
|
|
181
|
+
provider: "mistral",
|
|
182
|
+
model: this.modelName,
|
|
183
|
+
metadata: {
|
|
184
|
+
streamId: `mistral-${Date.now()}`,
|
|
185
|
+
startTime,
|
|
186
|
+
},
|
|
187
|
+
};
|
|
163
188
|
}
|
|
164
189
|
catch (err) {
|
|
165
190
|
// Log timeout errors specifically
|
|
166
191
|
if (err instanceof TimeoutError) {
|
|
167
|
-
logger.
|
|
192
|
+
logger.debug(`[${functionTag}] Timeout error`, {
|
|
168
193
|
provider,
|
|
169
194
|
modelName: this.modelName,
|
|
170
195
|
timeout: err.timeout,
|
|
@@ -172,14 +197,11 @@ export class MistralAI {
|
|
|
172
197
|
});
|
|
173
198
|
}
|
|
174
199
|
else {
|
|
175
|
-
logger.
|
|
200
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
176
201
|
provider,
|
|
177
202
|
modelName: this.modelName,
|
|
178
|
-
message: "Error in streaming
|
|
203
|
+
message: "Error in streaming content",
|
|
179
204
|
err: String(err),
|
|
180
|
-
promptLength: typeof optionsOrPrompt === "string"
|
|
181
|
-
? optionsOrPrompt.length
|
|
182
|
-
: optionsOrPrompt.prompt.length,
|
|
183
205
|
});
|
|
184
206
|
}
|
|
185
207
|
throw err; // Re-throw error to trigger fallback
|
|
@@ -189,10 +211,10 @@ export class MistralAI {
|
|
|
189
211
|
* Processes text using non-streaming approach with optional schema validation
|
|
190
212
|
* @param prompt - The input text prompt to analyze
|
|
191
213
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
192
|
-
* @returns Promise resolving to
|
|
214
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
193
215
|
*/
|
|
194
|
-
async
|
|
195
|
-
const functionTag = "MistralAI.
|
|
216
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
217
|
+
const functionTag = "MistralAI.generate";
|
|
196
218
|
const provider = "mistral";
|
|
197
219
|
const startTime = Date.now();
|
|
198
220
|
try {
|
|
@@ -256,7 +278,19 @@ export class MistralAI {
|
|
|
256
278
|
if (options.enableEvaluation) {
|
|
257
279
|
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
258
280
|
}
|
|
259
|
-
return
|
|
281
|
+
return {
|
|
282
|
+
content: result.text,
|
|
283
|
+
provider: "mistral",
|
|
284
|
+
model: this.modelName,
|
|
285
|
+
usage: result.usage
|
|
286
|
+
? {
|
|
287
|
+
inputTokens: result.usage.promptTokens,
|
|
288
|
+
outputTokens: result.usage.completionTokens,
|
|
289
|
+
totalTokens: result.usage.totalTokens,
|
|
290
|
+
}
|
|
291
|
+
: undefined,
|
|
292
|
+
responseTime: Date.now() - startTime,
|
|
293
|
+
};
|
|
260
294
|
}
|
|
261
295
|
finally {
|
|
262
296
|
// Always cleanup timeout
|
|
@@ -284,11 +318,8 @@ export class MistralAI {
|
|
|
284
318
|
throw err; // Re-throw error to trigger fallback
|
|
285
319
|
}
|
|
286
320
|
}
|
|
287
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
288
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
289
|
-
}
|
|
290
321
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
291
|
-
return this.
|
|
322
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
292
323
|
}
|
|
293
324
|
}
|
|
294
325
|
export default MistralAI;
|
|
@@ -10,8 +10,9 @@
|
|
|
10
10
|
* - Health checking and service validation
|
|
11
11
|
* - Streaming and non-streaming text generation
|
|
12
12
|
*/
|
|
13
|
-
import type { AIProvider, TextGenerationOptions,
|
|
14
|
-
import type {
|
|
13
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
14
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
15
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
15
16
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
16
17
|
import type { Schema } from "ai";
|
|
17
18
|
export declare class Ollama implements AIProvider {
|
|
@@ -41,13 +42,13 @@ export declare class Ollama implements AIProvider {
|
|
|
41
42
|
*/
|
|
42
43
|
pullModel(modelName: string): Promise<void>;
|
|
43
44
|
/**
|
|
44
|
-
*
|
|
45
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
46
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
45
47
|
*/
|
|
46
|
-
|
|
48
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
47
49
|
/**
|
|
48
|
-
* Generate
|
|
50
|
+
* Generate text using Ollama local models
|
|
49
51
|
*/
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
52
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
53
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
53
54
|
}
|
package/dist/providers/ollama.js
CHANGED
|
@@ -388,91 +388,139 @@ export class Ollama {
|
|
|
388
388
|
}
|
|
389
389
|
}
|
|
390
390
|
/**
|
|
391
|
-
*
|
|
391
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
392
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
392
393
|
*/
|
|
393
|
-
async
|
|
394
|
-
const functionTag = "Ollama.
|
|
394
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
395
|
+
const functionTag = "Ollama.stream";
|
|
395
396
|
const provider = "ollama";
|
|
397
|
+
let chunkCount = 0;
|
|
396
398
|
const startTime = Date.now();
|
|
397
399
|
try {
|
|
398
400
|
// Parse parameters - support both string and options object
|
|
399
401
|
const options = typeof optionsOrPrompt === "string"
|
|
400
|
-
? {
|
|
402
|
+
? { input: { text: optionsOrPrompt } }
|
|
401
403
|
: optionsOrPrompt;
|
|
402
|
-
|
|
404
|
+
// Validate input
|
|
405
|
+
if (!options?.input?.text ||
|
|
406
|
+
typeof options.input.text !== "string" ||
|
|
407
|
+
options.input.text.trim() === "") {
|
|
408
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
409
|
+
}
|
|
410
|
+
// Extract parameters
|
|
411
|
+
const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
|
|
403
412
|
// Use schema from options or fallback parameter
|
|
404
413
|
const finalSchema = schema || analysisSchema;
|
|
405
414
|
// Convert timeout to milliseconds if provided as string
|
|
406
415
|
const timeoutMs = timeout
|
|
407
416
|
? typeof timeout === "string"
|
|
408
|
-
? parseInt(getDefaultTimeout("ollama", "
|
|
417
|
+
? parseInt(getDefaultTimeout("ollama", "stream").replace(/[^\d]/g, ""))
|
|
409
418
|
: timeout
|
|
410
419
|
: this.defaultTimeout;
|
|
411
|
-
logger.debug(`[${functionTag}]
|
|
420
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
412
421
|
provider,
|
|
413
422
|
modelName: this.modelName,
|
|
414
423
|
promptLength: prompt.length,
|
|
415
424
|
temperature,
|
|
416
425
|
maxTokens,
|
|
426
|
+
hasSchema: !!finalSchema,
|
|
417
427
|
timeout: timeoutMs,
|
|
418
428
|
});
|
|
419
429
|
const model = this.getModel(timeoutMs);
|
|
420
|
-
const
|
|
430
|
+
const streamOptions = {
|
|
421
431
|
model: model,
|
|
422
432
|
prompt: prompt,
|
|
423
433
|
system: systemPrompt,
|
|
424
434
|
temperature,
|
|
425
435
|
maxTokens,
|
|
436
|
+
onError: (event) => {
|
|
437
|
+
const error = event.error;
|
|
438
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
439
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
440
|
+
logger.debug(`[${functionTag}] Stream error`, {
|
|
441
|
+
provider,
|
|
442
|
+
modelName: this.modelName,
|
|
443
|
+
error: errorMessage,
|
|
444
|
+
stack: errorStack,
|
|
445
|
+
promptLength: prompt.length,
|
|
446
|
+
chunkCount,
|
|
447
|
+
});
|
|
448
|
+
},
|
|
449
|
+
onFinish: (event) => {
|
|
450
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
451
|
+
provider,
|
|
452
|
+
modelName: this.modelName,
|
|
453
|
+
finishReason: event.finishReason,
|
|
454
|
+
usage: event.usage,
|
|
455
|
+
totalChunks: chunkCount,
|
|
456
|
+
promptLength: prompt.length,
|
|
457
|
+
responseLength: event.text?.length || 0,
|
|
458
|
+
});
|
|
459
|
+
},
|
|
460
|
+
onChunk: (event) => {
|
|
461
|
+
chunkCount++;
|
|
462
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
463
|
+
provider,
|
|
464
|
+
modelName: this.modelName,
|
|
465
|
+
chunkNumber: chunkCount,
|
|
466
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
467
|
+
chunkType: event.chunk.type,
|
|
468
|
+
});
|
|
469
|
+
},
|
|
426
470
|
};
|
|
427
471
|
if (finalSchema) {
|
|
428
|
-
|
|
472
|
+
streamOptions.experimental_output = Output.object({
|
|
429
473
|
schema: finalSchema,
|
|
430
474
|
});
|
|
431
475
|
}
|
|
432
|
-
const result =
|
|
433
|
-
|
|
434
|
-
throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
|
|
435
|
-
}
|
|
436
|
-
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
476
|
+
const result = streamText(streamOptions);
|
|
477
|
+
logger.debug(`[${functionTag}] Stream request completed`, {
|
|
437
478
|
provider,
|
|
438
479
|
modelName: this.modelName,
|
|
439
|
-
usage: result.usage,
|
|
440
|
-
finishReason: result.finishReason,
|
|
441
|
-
responseLength: result.text?.length || 0,
|
|
442
480
|
});
|
|
443
|
-
//
|
|
444
|
-
|
|
445
|
-
|
|
481
|
+
// Convert to StreamResult format
|
|
482
|
+
return {
|
|
483
|
+
stream: (async function* () {
|
|
484
|
+
for await (const chunk of result.textStream) {
|
|
485
|
+
yield { content: chunk };
|
|
486
|
+
}
|
|
487
|
+
})(),
|
|
488
|
+
provider: "ollama",
|
|
489
|
+
model: this.modelName,
|
|
490
|
+
metadata: {
|
|
491
|
+
streamId: `ollama-${Date.now()}`,
|
|
492
|
+
startTime,
|
|
493
|
+
},
|
|
494
|
+
};
|
|
495
|
+
}
|
|
496
|
+
catch (err) {
|
|
497
|
+
// Log timeout errors specifically
|
|
498
|
+
if (err instanceof TimeoutError) {
|
|
499
|
+
logger.debug(`[${functionTag}] Timeout error`, {
|
|
446
500
|
provider,
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
};
|
|
501
|
+
modelName: this.modelName,
|
|
502
|
+
timeout: err.timeout,
|
|
503
|
+
message: err.message,
|
|
504
|
+
});
|
|
452
505
|
}
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
506
|
+
else {
|
|
507
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
508
|
+
provider,
|
|
509
|
+
modelName: this.modelName,
|
|
510
|
+
message: "Error in streaming content",
|
|
511
|
+
err: String(err),
|
|
512
|
+
});
|
|
456
513
|
}
|
|
457
|
-
return result;
|
|
458
|
-
}
|
|
459
|
-
catch (err) {
|
|
460
|
-
logger.debug(`[${functionTag}] Exception`, {
|
|
461
|
-
provider,
|
|
462
|
-
modelName: this.modelName,
|
|
463
|
-
message: "Error in generating text",
|
|
464
|
-
err: String(err),
|
|
465
|
-
});
|
|
466
514
|
throw err; // Re-throw error to trigger fallback
|
|
467
515
|
}
|
|
468
516
|
}
|
|
469
517
|
/**
|
|
470
|
-
* Generate
|
|
518
|
+
* Generate text using Ollama local models
|
|
471
519
|
*/
|
|
472
|
-
async
|
|
473
|
-
const functionTag = "Ollama.
|
|
520
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
521
|
+
const functionTag = "Ollama.generate";
|
|
474
522
|
const provider = "ollama";
|
|
475
|
-
|
|
523
|
+
const startTime = Date.now();
|
|
476
524
|
try {
|
|
477
525
|
// Parse parameters - support both string and options object
|
|
478
526
|
const options = typeof optionsOrPrompt === "string"
|
|
@@ -484,85 +532,80 @@ export class Ollama {
|
|
|
484
532
|
// Convert timeout to milliseconds if provided as string
|
|
485
533
|
const timeoutMs = timeout
|
|
486
534
|
? typeof timeout === "string"
|
|
487
|
-
? parseInt(getDefaultTimeout("ollama", "
|
|
535
|
+
? parseInt(getDefaultTimeout("ollama", "generate").replace(/[^\d]/g, ""))
|
|
488
536
|
: timeout
|
|
489
537
|
: this.defaultTimeout;
|
|
490
|
-
logger.debug(`[${functionTag}]
|
|
538
|
+
logger.debug(`[${functionTag}] Generate request started`, {
|
|
491
539
|
provider,
|
|
492
540
|
modelName: this.modelName,
|
|
493
541
|
promptLength: prompt.length,
|
|
494
542
|
temperature,
|
|
495
543
|
maxTokens,
|
|
496
|
-
hasSchema: !!finalSchema,
|
|
497
544
|
timeout: timeoutMs,
|
|
498
545
|
});
|
|
499
546
|
const model = this.getModel(timeoutMs);
|
|
500
|
-
const
|
|
547
|
+
const generateOptions = {
|
|
501
548
|
model: model,
|
|
502
549
|
prompt: prompt,
|
|
503
550
|
system: systemPrompt,
|
|
504
551
|
temperature,
|
|
505
552
|
maxTokens,
|
|
506
|
-
onError: (event) => {
|
|
507
|
-
const error = event.error;
|
|
508
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
509
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
510
|
-
logger.debug(`[${functionTag}] Stream text error`, {
|
|
511
|
-
provider,
|
|
512
|
-
modelName: this.modelName,
|
|
513
|
-
error: errorMessage,
|
|
514
|
-
stack: errorStack,
|
|
515
|
-
promptLength: prompt.length,
|
|
516
|
-
chunkCount,
|
|
517
|
-
});
|
|
518
|
-
},
|
|
519
|
-
onFinish: (event) => {
|
|
520
|
-
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
521
|
-
provider,
|
|
522
|
-
modelName: this.modelName,
|
|
523
|
-
finishReason: event.finishReason,
|
|
524
|
-
usage: event.usage,
|
|
525
|
-
totalChunks: chunkCount,
|
|
526
|
-
promptLength: prompt.length,
|
|
527
|
-
responseLength: event.text?.length || 0,
|
|
528
|
-
});
|
|
529
|
-
},
|
|
530
|
-
onChunk: (event) => {
|
|
531
|
-
chunkCount++;
|
|
532
|
-
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
533
|
-
provider,
|
|
534
|
-
modelName: this.modelName,
|
|
535
|
-
chunkNumber: chunkCount,
|
|
536
|
-
chunkLength: event.chunk.text?.length || 0,
|
|
537
|
-
chunkType: event.chunk.type,
|
|
538
|
-
});
|
|
539
|
-
},
|
|
540
553
|
};
|
|
541
554
|
if (finalSchema) {
|
|
542
|
-
|
|
555
|
+
generateOptions.experimental_output = Output.object({
|
|
543
556
|
schema: finalSchema,
|
|
544
557
|
});
|
|
545
558
|
}
|
|
546
|
-
const result =
|
|
547
|
-
|
|
559
|
+
const result = await generateText(generateOptions);
|
|
560
|
+
if (result.text.includes("model not found")) {
|
|
561
|
+
throw new Error(`Model '${this.modelName}' not found. Please run 'ollama pull ${this.modelName}'`);
|
|
562
|
+
}
|
|
563
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
564
|
+
provider,
|
|
565
|
+
modelName: this.modelName,
|
|
566
|
+
usage: result.usage,
|
|
567
|
+
finishReason: result.finishReason,
|
|
568
|
+
responseLength: result.text?.length || 0,
|
|
569
|
+
});
|
|
570
|
+
// Add analytics if enabled
|
|
571
|
+
if (options.enableAnalytics) {
|
|
572
|
+
result.analytics = {
|
|
573
|
+
provider,
|
|
574
|
+
model: this.modelName,
|
|
575
|
+
tokens: result.usage,
|
|
576
|
+
responseTime: Date.now() - startTime,
|
|
577
|
+
context: options.context,
|
|
578
|
+
};
|
|
579
|
+
}
|
|
580
|
+
// Add evaluation if enabled
|
|
581
|
+
if (options.enableEvaluation) {
|
|
582
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
583
|
+
}
|
|
584
|
+
return {
|
|
585
|
+
content: result.text,
|
|
586
|
+
provider: "ollama",
|
|
587
|
+
model: this.modelName,
|
|
588
|
+
usage: result.usage
|
|
589
|
+
? {
|
|
590
|
+
inputTokens: result.usage.promptTokens,
|
|
591
|
+
outputTokens: result.usage.completionTokens,
|
|
592
|
+
totalTokens: result.usage.totalTokens,
|
|
593
|
+
}
|
|
594
|
+
: undefined,
|
|
595
|
+
responseTime: Date.now() - startTime,
|
|
596
|
+
};
|
|
548
597
|
}
|
|
549
598
|
catch (err) {
|
|
550
599
|
logger.debug(`[${functionTag}] Exception`, {
|
|
551
600
|
provider,
|
|
552
601
|
modelName: this.modelName,
|
|
553
|
-
message: "Error in
|
|
602
|
+
message: "Error in generating text",
|
|
554
603
|
err: String(err),
|
|
555
|
-
promptLength: typeof optionsOrPrompt === "string"
|
|
556
|
-
? optionsOrPrompt.length
|
|
557
|
-
: optionsOrPrompt.prompt.length,
|
|
558
604
|
});
|
|
559
605
|
throw err; // Re-throw error to trigger fallback
|
|
560
606
|
}
|
|
561
607
|
}
|
|
562
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
563
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
564
|
-
}
|
|
565
608
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
566
|
-
return this.
|
|
609
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
567
610
|
}
|
|
568
611
|
}
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type
|
|
3
|
-
import type {
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
4
6
|
export declare class OpenAI implements AIProvider {
|
|
5
7
|
private modelName;
|
|
6
8
|
private model;
|
|
@@ -9,20 +11,17 @@ export declare class OpenAI implements AIProvider {
|
|
|
9
11
|
* Get the underlying model for function calling
|
|
10
12
|
*/
|
|
11
13
|
getModel(): LanguageModelV1;
|
|
12
|
-
|
|
13
|
-
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
14
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
14
15
|
/**
|
|
15
|
-
*
|
|
16
|
-
*
|
|
17
|
-
* @param analysisSchema - Optional schema for output validation
|
|
18
|
-
* @returns Promise resolving to GenerateTextResult or null
|
|
16
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
17
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
19
18
|
*/
|
|
20
|
-
|
|
19
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
21
20
|
/**
|
|
22
|
-
* Short alias for
|
|
21
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
23
22
|
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
24
23
|
* @param analysisSchema - Optional schema for output validation
|
|
25
|
-
* @returns Promise resolving to
|
|
24
|
+
* @returns Promise resolving to GenerateResult or null
|
|
26
25
|
*/
|
|
27
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<
|
|
26
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
|
|
28
27
|
}
|