@juspay/neurolink 3.0.1 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +57 -6
- package/README.md +235 -2
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/chat/client-utils.d.ts +92 -0
- package/dist/chat/client-utils.js +298 -0
- package/dist/chat/index.d.ts +27 -0
- package/dist/chat/index.js +41 -0
- package/dist/chat/session-storage.d.ts +77 -0
- package/dist/chat/session-storage.js +233 -0
- package/dist/chat/session.d.ts +95 -0
- package/dist/chat/session.js +257 -0
- package/dist/chat/sse-handler.d.ts +49 -0
- package/dist/chat/sse-handler.js +266 -0
- package/dist/chat/types.d.ts +73 -0
- package/dist/chat/types.js +5 -0
- package/dist/chat/websocket-chat-handler.d.ts +36 -0
- package/dist/chat/websocket-chat-handler.js +262 -0
- package/dist/cli/commands/config.js +12 -12
- package/dist/cli/commands/mcp.js +3 -4
- package/dist/cli/index.d.ts +0 -7
- package/dist/cli/index.js +247 -28
- package/dist/config/configManager.d.ts +60 -0
- package/dist/config/configManager.js +300 -0
- package/dist/config/types.d.ts +136 -0
- package/dist/config/types.js +43 -0
- package/dist/core/analytics.d.ts +23 -0
- package/dist/core/analytics.js +131 -0
- package/dist/core/constants.d.ts +41 -0
- package/dist/core/constants.js +50 -0
- package/dist/core/defaults.d.ts +18 -0
- package/dist/core/defaults.js +29 -0
- package/dist/core/evaluation-config.d.ts +29 -0
- package/dist/core/evaluation-config.js +144 -0
- package/dist/core/evaluation-providers.d.ts +30 -0
- package/dist/core/evaluation-providers.js +187 -0
- package/dist/core/evaluation.d.ts +117 -0
- package/dist/core/evaluation.js +528 -0
- package/dist/core/factory.js +33 -25
- package/dist/core/types.d.ts +165 -6
- package/dist/core/types.js +3 -4
- package/dist/index.d.ts +9 -4
- package/dist/index.js +25 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/chat/client-utils.d.ts +92 -0
- package/dist/lib/chat/client-utils.js +298 -0
- package/dist/lib/chat/index.d.ts +27 -0
- package/dist/lib/chat/index.js +41 -0
- package/dist/lib/chat/session-storage.d.ts +77 -0
- package/dist/lib/chat/session-storage.js +233 -0
- package/dist/lib/chat/session.d.ts +95 -0
- package/dist/lib/chat/session.js +257 -0
- package/dist/lib/chat/sse-handler.d.ts +49 -0
- package/dist/lib/chat/sse-handler.js +266 -0
- package/dist/lib/chat/types.d.ts +73 -0
- package/dist/lib/chat/types.js +5 -0
- package/dist/lib/chat/websocket-chat-handler.d.ts +36 -0
- package/dist/lib/chat/websocket-chat-handler.js +262 -0
- package/dist/lib/config/configManager.d.ts +60 -0
- package/dist/lib/config/configManager.js +300 -0
- package/dist/lib/config/types.d.ts +136 -0
- package/dist/lib/config/types.js +43 -0
- package/dist/lib/core/analytics.d.ts +23 -0
- package/dist/lib/core/analytics.js +131 -0
- package/dist/lib/core/constants.d.ts +41 -0
- package/dist/lib/core/constants.js +50 -0
- package/dist/lib/core/defaults.d.ts +18 -0
- package/dist/lib/core/defaults.js +29 -0
- package/dist/lib/core/evaluation-config.d.ts +29 -0
- package/dist/lib/core/evaluation-config.js +144 -0
- package/dist/lib/core/evaluation-providers.d.ts +30 -0
- package/dist/lib/core/evaluation-providers.js +187 -0
- package/dist/lib/core/evaluation.d.ts +117 -0
- package/dist/lib/core/evaluation.js +528 -0
- package/dist/lib/core/factory.js +33 -26
- package/dist/lib/core/types.d.ts +165 -6
- package/dist/lib/core/types.js +3 -4
- package/dist/lib/index.d.ts +9 -4
- package/dist/lib/index.js +25 -4
- package/dist/lib/mcp/contracts/mcpContract.d.ts +118 -0
- package/dist/lib/mcp/contracts/mcpContract.js +5 -0
- package/dist/lib/mcp/function-calling.js +11 -3
- package/dist/lib/mcp/logging.js +5 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +2 -1
- package/dist/lib/mcp/orchestrator.js +18 -9
- package/dist/lib/mcp/registry.d.ts +49 -16
- package/dist/lib/mcp/registry.js +80 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
- package/dist/lib/mcp/tool-integration.js +1 -1
- package/dist/lib/mcp/tool-registry.d.ts +55 -34
- package/dist/lib/mcp/tool-registry.js +111 -97
- package/dist/lib/mcp/unified-mcp.js +6 -1
- package/dist/lib/mcp/unified-registry.d.ts +12 -4
- package/dist/lib/mcp/unified-registry.js +17 -4
- package/dist/lib/neurolink.d.ts +26 -0
- package/dist/lib/neurolink.js +43 -1
- package/dist/lib/providers/agent-enhanced-provider.d.ts +11 -2
- package/dist/lib/providers/agent-enhanced-provider.js +86 -15
- package/dist/lib/providers/amazonBedrock.d.ts +9 -1
- package/dist/lib/providers/amazonBedrock.js +26 -2
- package/dist/lib/providers/analytics-helper.d.ts +53 -0
- package/dist/lib/providers/analytics-helper.js +151 -0
- package/dist/lib/providers/anthropic.d.ts +11 -1
- package/dist/lib/providers/anthropic.js +29 -4
- package/dist/lib/providers/azureOpenAI.d.ts +3 -1
- package/dist/lib/providers/azureOpenAI.js +28 -4
- package/dist/lib/providers/function-calling-provider.d.ts +9 -1
- package/dist/lib/providers/function-calling-provider.js +14 -1
- package/dist/lib/providers/googleAIStudio.d.ts +15 -1
- package/dist/lib/providers/googleAIStudio.js +32 -2
- package/dist/lib/providers/googleVertexAI.d.ts +9 -1
- package/dist/lib/providers/googleVertexAI.js +31 -2
- package/dist/lib/providers/huggingFace.d.ts +3 -1
- package/dist/lib/providers/huggingFace.js +26 -3
- package/dist/lib/providers/mcp-provider.d.ts +9 -1
- package/dist/lib/providers/mcp-provider.js +12 -0
- package/dist/lib/providers/mistralAI.d.ts +3 -1
- package/dist/lib/providers/mistralAI.js +25 -2
- package/dist/lib/providers/ollama.d.ts +3 -1
- package/dist/lib/providers/ollama.js +27 -4
- package/dist/lib/providers/openAI.d.ts +15 -1
- package/dist/lib/providers/openAI.js +32 -2
- package/dist/lib/proxy/proxy-fetch.js +8 -7
- package/dist/lib/services/streaming/streaming-manager.d.ts +29 -0
- package/dist/lib/services/streaming/streaming-manager.js +244 -0
- package/dist/lib/services/types.d.ts +155 -0
- package/dist/lib/services/types.js +2 -0
- package/dist/lib/services/websocket/websocket-server.d.ts +34 -0
- package/dist/lib/services/websocket/websocket-server.js +304 -0
- package/dist/lib/telemetry/index.d.ts +15 -0
- package/dist/lib/telemetry/index.js +22 -0
- package/dist/lib/telemetry/telemetry-service.d.ts +47 -0
- package/dist/lib/telemetry/telemetry-service.js +259 -0
- package/dist/lib/utils/streaming-utils.d.ts +67 -0
- package/dist/lib/utils/streaming-utils.js +201 -0
- package/dist/mcp/contracts/mcpContract.d.ts +118 -0
- package/dist/mcp/contracts/mcpContract.js +5 -0
- package/dist/mcp/function-calling.js +11 -3
- package/dist/mcp/logging.js +5 -0
- package/dist/mcp/neurolink-mcp-client.js +2 -1
- package/dist/mcp/orchestrator.js +18 -9
- package/dist/mcp/registry.d.ts +49 -16
- package/dist/mcp/registry.js +80 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
- package/dist/mcp/tool-integration.js +1 -1
- package/dist/mcp/tool-registry.d.ts +55 -34
- package/dist/mcp/tool-registry.js +111 -97
- package/dist/mcp/unified-mcp.js +6 -1
- package/dist/mcp/unified-registry.d.ts +12 -4
- package/dist/mcp/unified-registry.js +17 -4
- package/dist/neurolink.d.ts +26 -0
- package/dist/neurolink.js +43 -1
- package/dist/providers/agent-enhanced-provider.d.ts +11 -2
- package/dist/providers/agent-enhanced-provider.js +86 -15
- package/dist/providers/amazonBedrock.d.ts +9 -1
- package/dist/providers/amazonBedrock.js +26 -2
- package/dist/providers/analytics-helper.d.ts +53 -0
- package/dist/providers/analytics-helper.js +151 -0
- package/dist/providers/anthropic.d.ts +11 -1
- package/dist/providers/anthropic.js +29 -4
- package/dist/providers/azureOpenAI.d.ts +3 -1
- package/dist/providers/azureOpenAI.js +29 -4
- package/dist/providers/function-calling-provider.d.ts +9 -1
- package/dist/providers/function-calling-provider.js +14 -1
- package/dist/providers/googleAIStudio.d.ts +15 -1
- package/dist/providers/googleAIStudio.js +32 -2
- package/dist/providers/googleVertexAI.d.ts +9 -1
- package/dist/providers/googleVertexAI.js +31 -2
- package/dist/providers/huggingFace.d.ts +3 -1
- package/dist/providers/huggingFace.js +26 -3
- package/dist/providers/mcp-provider.d.ts +9 -1
- package/dist/providers/mcp-provider.js +12 -0
- package/dist/providers/mistralAI.d.ts +3 -1
- package/dist/providers/mistralAI.js +25 -2
- package/dist/providers/ollama.d.ts +3 -1
- package/dist/providers/ollama.js +27 -4
- package/dist/providers/openAI.d.ts +15 -1
- package/dist/providers/openAI.js +33 -2
- package/dist/proxy/proxy-fetch.js +8 -7
- package/dist/services/streaming/streaming-manager.d.ts +29 -0
- package/dist/services/streaming/streaming-manager.js +244 -0
- package/dist/services/types.d.ts +155 -0
- package/dist/services/types.js +2 -0
- package/dist/services/websocket/websocket-server.d.ts +34 -0
- package/dist/services/websocket/websocket-server.js +304 -0
- package/dist/telemetry/index.d.ts +15 -0
- package/dist/telemetry/index.js +22 -0
- package/dist/telemetry/telemetry-service.d.ts +47 -0
- package/dist/telemetry/telemetry-service.js +261 -0
- package/dist/utils/streaming-utils.d.ts +67 -0
- package/dist/utils/streaming-utils.js +201 -0
- package/package.json +18 -2
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
* - Health checking and service validation
|
|
11
11
|
* - Streaming and non-streaming text generation
|
|
12
12
|
*/
|
|
13
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
13
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
14
14
|
import type { GenerateTextResult, StreamTextResult, ToolSet } from "ai";
|
|
15
15
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
16
16
|
import type { Schema } from "ai";
|
|
@@ -48,4 +48,6 @@ export declare class Ollama implements AIProvider {
|
|
|
48
48
|
* Generate streaming text using Ollama local models
|
|
49
49
|
*/
|
|
50
50
|
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
51
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
52
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
51
53
|
}
|
|
@@ -13,6 +13,8 @@
|
|
|
13
13
|
import { streamText, generateText, Output } from "ai";
|
|
14
14
|
import { logger } from "../utils/logger.js";
|
|
15
15
|
import { getDefaultTimeout } from "../utils/timeout.js";
|
|
16
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
17
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
16
18
|
// Default system context
|
|
17
19
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
18
20
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -106,7 +108,7 @@ class OllamaLanguageModel {
|
|
|
106
108
|
stream: false,
|
|
107
109
|
options: {
|
|
108
110
|
temperature: options.temperature || 0.7,
|
|
109
|
-
num_predict: options.maxTokens
|
|
111
|
+
num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
110
112
|
},
|
|
111
113
|
};
|
|
112
114
|
const controller = new AbortController();
|
|
@@ -176,7 +178,7 @@ class OllamaLanguageModel {
|
|
|
176
178
|
stream: true,
|
|
177
179
|
options: {
|
|
178
180
|
temperature: options.temperature || 0.7,
|
|
179
|
-
num_predict: options.maxTokens
|
|
181
|
+
num_predict: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
180
182
|
},
|
|
181
183
|
};
|
|
182
184
|
const controller = new AbortController();
|
|
@@ -391,12 +393,13 @@ export class Ollama {
|
|
|
391
393
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
392
394
|
const functionTag = "Ollama.generateText";
|
|
393
395
|
const provider = "ollama";
|
|
396
|
+
const startTime = Date.now();
|
|
394
397
|
try {
|
|
395
398
|
// Parse parameters - support both string and options object
|
|
396
399
|
const options = typeof optionsOrPrompt === "string"
|
|
397
400
|
? { prompt: optionsOrPrompt }
|
|
398
401
|
: optionsOrPrompt;
|
|
399
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
402
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
|
|
400
403
|
// Use schema from options or fallback parameter
|
|
401
404
|
const finalSchema = schema || analysisSchema;
|
|
402
405
|
// Convert timeout to milliseconds if provided as string
|
|
@@ -437,6 +440,20 @@ export class Ollama {
|
|
|
437
440
|
finishReason: result.finishReason,
|
|
438
441
|
responseLength: result.text?.length || 0,
|
|
439
442
|
});
|
|
443
|
+
// Add analytics if enabled
|
|
444
|
+
if (options.enableAnalytics) {
|
|
445
|
+
result.analytics = {
|
|
446
|
+
provider,
|
|
447
|
+
model: this.modelName,
|
|
448
|
+
tokens: result.usage,
|
|
449
|
+
responseTime: Date.now() - startTime,
|
|
450
|
+
context: options.context,
|
|
451
|
+
};
|
|
452
|
+
}
|
|
453
|
+
// Add evaluation if enabled
|
|
454
|
+
if (options.enableEvaluation) {
|
|
455
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
456
|
+
}
|
|
440
457
|
return result;
|
|
441
458
|
}
|
|
442
459
|
catch (err) {
|
|
@@ -461,7 +478,7 @@ export class Ollama {
|
|
|
461
478
|
const options = typeof optionsOrPrompt === "string"
|
|
462
479
|
? { prompt: optionsOrPrompt }
|
|
463
480
|
: optionsOrPrompt;
|
|
464
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
481
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout, } = options;
|
|
465
482
|
// Use schema from options or fallback parameter
|
|
466
483
|
const finalSchema = schema || analysisSchema;
|
|
467
484
|
// Convert timeout to milliseconds if provided as string
|
|
@@ -542,4 +559,10 @@ export class Ollama {
|
|
|
542
559
|
throw err; // Re-throw error to trigger fallback
|
|
543
560
|
}
|
|
544
561
|
}
|
|
562
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
563
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
564
|
+
}
|
|
565
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
566
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
567
|
+
}
|
|
545
568
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
4
4
|
export declare class OpenAI implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private model;
|
|
@@ -11,4 +11,18 @@ export declare class OpenAI implements AIProvider {
|
|
|
11
11
|
getModel(): LanguageModelV1;
|
|
12
12
|
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
13
13
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
14
|
+
/**
|
|
15
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
16
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
17
|
+
* @param analysisSchema - Optional schema for output validation
|
|
18
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
19
|
+
*/
|
|
20
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
21
|
+
/**
|
|
22
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
23
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
24
|
+
* @param analysisSchema - Optional schema for output validation
|
|
25
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
26
|
+
*/
|
|
27
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
14
28
|
}
|
|
@@ -2,6 +2,8 @@ import { openai } from "@ai-sdk/openai";
|
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
4
|
import { createTimeoutController, getDefaultTimeout, TimeoutError, } from "../utils/timeout.js";
|
|
5
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
5
7
|
// Default system context
|
|
6
8
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
7
9
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -60,7 +62,7 @@ export class OpenAI {
|
|
|
60
62
|
const options = typeof optionsOrPrompt === "string"
|
|
61
63
|
? { prompt: optionsOrPrompt }
|
|
62
64
|
: optionsOrPrompt;
|
|
63
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
65
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
64
66
|
// Use schema from options or fallback parameter
|
|
65
67
|
const finalSchema = schema || analysisSchema;
|
|
66
68
|
logger.debug(`[${functionTag}] Stream text started`, {
|
|
@@ -152,12 +154,13 @@ export class OpenAI {
|
|
|
152
154
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
153
155
|
const functionTag = "OpenAI.generateText";
|
|
154
156
|
const provider = "openai";
|
|
157
|
+
const startTime = Date.now();
|
|
155
158
|
try {
|
|
156
159
|
// Parse parameters - support both string and options object
|
|
157
160
|
const options = typeof optionsOrPrompt === "string"
|
|
158
161
|
? { prompt: optionsOrPrompt }
|
|
159
162
|
: optionsOrPrompt;
|
|
160
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
163
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
161
164
|
// Use schema from options or fallback parameter
|
|
162
165
|
const finalSchema = schema || analysisSchema;
|
|
163
166
|
logger.debug(`[${functionTag}] Generate text started`, {
|
|
@@ -198,6 +201,15 @@ export class OpenAI {
|
|
|
198
201
|
responseLength: result.text?.length || 0,
|
|
199
202
|
timeout,
|
|
200
203
|
});
|
|
204
|
+
// Add analytics if enabled
|
|
205
|
+
if (options.enableAnalytics) {
|
|
206
|
+
const { createAnalytics } = await import("./analytics-helper.js");
|
|
207
|
+
result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
|
|
208
|
+
}
|
|
209
|
+
// Add evaluation if enabled
|
|
210
|
+
if (options.enableEvaluation) {
|
|
211
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
212
|
+
}
|
|
201
213
|
return result;
|
|
202
214
|
}
|
|
203
215
|
finally {
|
|
@@ -226,4 +238,22 @@ export class OpenAI {
|
|
|
226
238
|
throw err; // Re-throw error to trigger fallback
|
|
227
239
|
}
|
|
228
240
|
}
|
|
241
|
+
/**
|
|
242
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
243
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
244
|
+
* @param analysisSchema - Optional schema for output validation
|
|
245
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
246
|
+
*/
|
|
247
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
248
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
249
|
+
}
|
|
250
|
+
/**
|
|
251
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
252
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
253
|
+
* @param analysisSchema - Optional schema for output validation
|
|
254
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
255
|
+
*/
|
|
256
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
257
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
258
|
+
}
|
|
229
259
|
}
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* Proxy-aware fetch implementation for AI SDK providers
|
|
3
3
|
* Implements the proven Vercel AI SDK proxy pattern using undici
|
|
4
4
|
*/
|
|
5
|
+
import { logger } from "../utils/logger.js";
|
|
5
6
|
/**
|
|
6
7
|
* Create a proxy-aware fetch function
|
|
7
8
|
* This implements the community-validated approach for Vercel AI SDK
|
|
@@ -11,12 +12,12 @@ export function createProxyFetch() {
|
|
|
11
12
|
const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
|
|
12
13
|
// If no proxy configured, return standard fetch
|
|
13
14
|
if (!httpsProxy && !httpProxy) {
|
|
14
|
-
|
|
15
|
+
logger.debug("[Proxy Fetch] No proxy environment variables found - using standard fetch");
|
|
15
16
|
return fetch;
|
|
16
17
|
}
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
logger.debug(`[Proxy Fetch] Configuring proxy with undici ProxyAgent`);
|
|
19
|
+
logger.debug(`[Proxy Fetch] HTTP_PROXY: ${httpProxy || "not set"}`);
|
|
20
|
+
logger.debug(`[Proxy Fetch] HTTPS_PROXY: ${httpsProxy || "not set"}`);
|
|
20
21
|
// Return proxy-aware fetch function
|
|
21
22
|
return async (input, init) => {
|
|
22
23
|
try {
|
|
@@ -30,7 +31,7 @@ export function createProxyFetch() {
|
|
|
30
31
|
: new URL(input.url);
|
|
31
32
|
const proxyUrl = url.protocol === "https:" ? httpsProxy : httpProxy;
|
|
32
33
|
if (proxyUrl) {
|
|
33
|
-
|
|
34
|
+
logger.debug(`[Proxy Fetch] Creating ProxyAgent for ${url.hostname} via ${proxyUrl}`);
|
|
34
35
|
// Create ProxyAgent
|
|
35
36
|
const dispatcher = new ProxyAgent(proxyUrl);
|
|
36
37
|
// Use undici fetch with dispatcher
|
|
@@ -38,12 +39,12 @@ export function createProxyFetch() {
|
|
|
38
39
|
...init,
|
|
39
40
|
dispatcher: dispatcher,
|
|
40
41
|
});
|
|
41
|
-
|
|
42
|
+
logger.debug(`[Proxy Fetch] ✅ Request proxied successfully to ${url.hostname}`);
|
|
42
43
|
return response; // Type assertion to avoid complex type issues
|
|
43
44
|
}
|
|
44
45
|
}
|
|
45
46
|
catch (error) {
|
|
46
|
-
|
|
47
|
+
logger.warn(`[Proxy Fetch] Proxy failed (${error.message}), falling back to direct connection`);
|
|
47
48
|
}
|
|
48
49
|
// Fallback to standard fetch
|
|
49
50
|
return fetch(input, init);
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { EventEmitter } from "events";
|
|
2
|
+
import type { StreamingSession, StreamingConfig, StreamingPoolConfig, StreamingMetrics, StreamingHealthStatus, BufferConfig } from "../types.js";
|
|
3
|
+
export declare class StreamingManager extends EventEmitter {
|
|
4
|
+
private activeSessions;
|
|
5
|
+
private streamingPools;
|
|
6
|
+
private metrics;
|
|
7
|
+
private healthCheckInterval?;
|
|
8
|
+
private startTime;
|
|
9
|
+
constructor();
|
|
10
|
+
createStreamingSession(config: StreamingConfig): Promise<StreamingSession>;
|
|
11
|
+
terminateStreamingSession(sessionId: string): Promise<void>;
|
|
12
|
+
pauseStreamingSession(sessionId: string): Promise<void>;
|
|
13
|
+
resumeStreamingSession(sessionId: string): Promise<void>;
|
|
14
|
+
optimizeStreamingLatency(sessionId: string): Promise<void>;
|
|
15
|
+
enableStreamingCompression(sessionId: string): Promise<void>;
|
|
16
|
+
configureStreamingBuffering(sessionId: string, bufferConfig: BufferConfig): Promise<void>;
|
|
17
|
+
createStreamingPool(poolId: string, config: StreamingPoolConfig): Promise<void>;
|
|
18
|
+
balanceStreamingLoad(poolId: string): Promise<void>;
|
|
19
|
+
scaleStreamingCapacity(poolId: string, scale: number): Promise<void>;
|
|
20
|
+
getStreamingMetrics(sessionId?: string): StreamingMetrics;
|
|
21
|
+
getStreamingHealthStatus(): StreamingHealthStatus;
|
|
22
|
+
private updateGlobalMetrics;
|
|
23
|
+
private startHealthMonitoring;
|
|
24
|
+
private roundRobinBalance;
|
|
25
|
+
private leastConnectionsBalance;
|
|
26
|
+
private weightedBalance;
|
|
27
|
+
private adaptiveBalance;
|
|
28
|
+
destroy(): void;
|
|
29
|
+
}
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
import { EventEmitter } from "events";
|
|
2
|
+
import { randomUUID } from "crypto";
|
|
3
|
+
export class StreamingManager extends EventEmitter {
|
|
4
|
+
activeSessions = new Map();
|
|
5
|
+
streamingPools = new Map();
|
|
6
|
+
metrics;
|
|
7
|
+
healthCheckInterval;
|
|
8
|
+
startTime;
|
|
9
|
+
constructor() {
|
|
10
|
+
super();
|
|
11
|
+
this.startTime = Date.now();
|
|
12
|
+
this.metrics = {
|
|
13
|
+
activeSessions: 0,
|
|
14
|
+
totalBytesTransferred: 0,
|
|
15
|
+
averageLatency: 0,
|
|
16
|
+
throughputBps: 0,
|
|
17
|
+
errorRate: 0,
|
|
18
|
+
connectionCount: 0,
|
|
19
|
+
uptime: 0,
|
|
20
|
+
};
|
|
21
|
+
this.startHealthMonitoring();
|
|
22
|
+
}
|
|
23
|
+
// Stream Lifecycle Management
|
|
24
|
+
async createStreamingSession(config) {
|
|
25
|
+
const sessionId = randomUUID();
|
|
26
|
+
const session = {
|
|
27
|
+
id: sessionId,
|
|
28
|
+
connectionId: config.provider, // Temporary, should be actual connection ID
|
|
29
|
+
provider: config.provider,
|
|
30
|
+
status: "active",
|
|
31
|
+
startTime: Date.now(),
|
|
32
|
+
lastActivity: Date.now(),
|
|
33
|
+
config,
|
|
34
|
+
metrics: {
|
|
35
|
+
bytesTransferred: 0,
|
|
36
|
+
messagesCount: 0,
|
|
37
|
+
averageLatency: 0,
|
|
38
|
+
errorCount: 0,
|
|
39
|
+
},
|
|
40
|
+
};
|
|
41
|
+
this.activeSessions.set(sessionId, session);
|
|
42
|
+
this.updateGlobalMetrics();
|
|
43
|
+
console.log(`[Streaming Manager] Created session ${sessionId} for provider ${config.provider}`);
|
|
44
|
+
this.emit("session-created", session);
|
|
45
|
+
return session;
|
|
46
|
+
}
|
|
47
|
+
async terminateStreamingSession(sessionId) {
|
|
48
|
+
const session = this.activeSessions.get(sessionId);
|
|
49
|
+
if (!session) {
|
|
50
|
+
throw new Error(`Session ${sessionId} not found`);
|
|
51
|
+
}
|
|
52
|
+
session.status = "terminated";
|
|
53
|
+
this.activeSessions.delete(sessionId);
|
|
54
|
+
this.updateGlobalMetrics();
|
|
55
|
+
console.log(`[Streaming Manager] Terminated session ${sessionId}`);
|
|
56
|
+
this.emit("session-terminated", session);
|
|
57
|
+
}
|
|
58
|
+
async pauseStreamingSession(sessionId) {
|
|
59
|
+
const session = this.activeSessions.get(sessionId);
|
|
60
|
+
if (!session) {
|
|
61
|
+
throw new Error(`Session ${sessionId} not found`);
|
|
62
|
+
}
|
|
63
|
+
if (session.status === "active") {
|
|
64
|
+
session.status = "paused";
|
|
65
|
+
console.log(`[Streaming Manager] Paused session ${sessionId}`);
|
|
66
|
+
this.emit("session-paused", session);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
async resumeStreamingSession(sessionId) {
|
|
70
|
+
const session = this.activeSessions.get(sessionId);
|
|
71
|
+
if (!session) {
|
|
72
|
+
throw new Error(`Session ${sessionId} not found`);
|
|
73
|
+
}
|
|
74
|
+
if (session.status === "paused") {
|
|
75
|
+
session.status = "active";
|
|
76
|
+
session.lastActivity = Date.now();
|
|
77
|
+
console.log(`[Streaming Manager] Resumed session ${sessionId}`);
|
|
78
|
+
this.emit("session-resumed", session);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
// Stream Optimization
|
|
82
|
+
async optimizeStreamingLatency(sessionId) {
|
|
83
|
+
const session = this.activeSessions.get(sessionId);
|
|
84
|
+
if (!session) {
|
|
85
|
+
return;
|
|
86
|
+
}
|
|
87
|
+
// Adaptive optimization based on current metrics
|
|
88
|
+
const currentLatency = session.metrics.averageLatency;
|
|
89
|
+
const targetLatency = session.config.latencyTarget;
|
|
90
|
+
if (currentLatency > targetLatency * 1.2) {
|
|
91
|
+
// Increase buffer size for better throughput
|
|
92
|
+
session.config.bufferSize = Math.min(session.config.bufferSize * 1.5, 16384);
|
|
93
|
+
session.config.streamingMode = "buffered";
|
|
94
|
+
}
|
|
95
|
+
else if (currentLatency < targetLatency * 0.8) {
|
|
96
|
+
// Decrease buffer size for better latency
|
|
97
|
+
session.config.bufferSize = Math.max(session.config.bufferSize * 0.8, 1024);
|
|
98
|
+
session.config.streamingMode = "real-time";
|
|
99
|
+
}
|
|
100
|
+
console.log(`[Streaming Manager] Optimized session ${sessionId}: latency=${currentLatency}ms, mode=${session.config.streamingMode}`);
|
|
101
|
+
}
|
|
102
|
+
async enableStreamingCompression(sessionId) {
|
|
103
|
+
const session = this.activeSessions.get(sessionId);
|
|
104
|
+
if (!session) {
|
|
105
|
+
return;
|
|
106
|
+
}
|
|
107
|
+
session.config.compressionEnabled = true;
|
|
108
|
+
console.log(`[Streaming Manager] Enabled compression for session ${sessionId}`);
|
|
109
|
+
}
|
|
110
|
+
async configureStreamingBuffering(sessionId, bufferConfig) {
|
|
111
|
+
const session = this.activeSessions.get(sessionId);
|
|
112
|
+
if (!session) {
|
|
113
|
+
return;
|
|
114
|
+
}
|
|
115
|
+
session.config.bufferSize = bufferConfig.maxSize;
|
|
116
|
+
session.config.maxChunkSize = Math.min(session.config.maxChunkSize, bufferConfig.flushThreshold);
|
|
117
|
+
console.log(`[Streaming Manager] Updated buffer config for session ${sessionId}:`, bufferConfig);
|
|
118
|
+
}
|
|
119
|
+
// Multi-Stream Coordination
|
|
120
|
+
async createStreamingPool(poolId, config) {
|
|
121
|
+
const pool = {
|
|
122
|
+
id: poolId,
|
|
123
|
+
maxSessions: config.maxConcurrentSessions,
|
|
124
|
+
activeSessions: new Set(),
|
|
125
|
+
config,
|
|
126
|
+
loadBalancer: config.loadBalancing,
|
|
127
|
+
};
|
|
128
|
+
this.streamingPools.set(poolId, pool);
|
|
129
|
+
console.log(`[Streaming Manager] Created pool ${poolId} with max ${config.maxConcurrentSessions} sessions`);
|
|
130
|
+
}
|
|
131
|
+
async balanceStreamingLoad(poolId) {
|
|
132
|
+
const pool = this.streamingPools.get(poolId);
|
|
133
|
+
if (!pool) {
|
|
134
|
+
return;
|
|
135
|
+
}
|
|
136
|
+
const activeSessions = Array.from(pool.activeSessions)
|
|
137
|
+
.map((sessionId) => this.activeSessions.get(sessionId))
|
|
138
|
+
.filter((session) => session && session.status === "active");
|
|
139
|
+
// Implement load balancing strategies
|
|
140
|
+
switch (pool.loadBalancer) {
|
|
141
|
+
case "round-robin":
|
|
142
|
+
this.roundRobinBalance(activeSessions);
|
|
143
|
+
break;
|
|
144
|
+
case "least-connections":
|
|
145
|
+
this.leastConnectionsBalance(activeSessions);
|
|
146
|
+
break;
|
|
147
|
+
case "weighted":
|
|
148
|
+
this.weightedBalance(activeSessions);
|
|
149
|
+
break;
|
|
150
|
+
case "adaptive":
|
|
151
|
+
this.adaptiveBalance(activeSessions);
|
|
152
|
+
break;
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
async scaleStreamingCapacity(poolId, scale) {
|
|
156
|
+
const pool = this.streamingPools.get(poolId);
|
|
157
|
+
if (!pool) {
|
|
158
|
+
return;
|
|
159
|
+
}
|
|
160
|
+
const newMaxSessions = Math.max(1, Math.floor(pool.maxSessions * scale));
|
|
161
|
+
pool.maxSessions = newMaxSessions;
|
|
162
|
+
pool.config.maxConcurrentSessions = newMaxSessions;
|
|
163
|
+
console.log(`[Streaming Manager] Scaled pool ${poolId} to ${newMaxSessions} max sessions (${scale}x)`);
|
|
164
|
+
}
|
|
165
|
+
// Performance Monitoring
|
|
166
|
+
getStreamingMetrics(sessionId) {
|
|
167
|
+
if (sessionId) {
|
|
168
|
+
const session = this.activeSessions.get(sessionId);
|
|
169
|
+
if (session) {
|
|
170
|
+
return {
|
|
171
|
+
sessionId,
|
|
172
|
+
activeSessions: 1,
|
|
173
|
+
totalBytesTransferred: session.metrics.bytesTransferred,
|
|
174
|
+
averageLatency: session.metrics.averageLatency,
|
|
175
|
+
throughputBps: session.metrics.bytesTransferred /
|
|
176
|
+
((Date.now() - session.startTime) / 1000),
|
|
177
|
+
errorRate: session.metrics.errorCount /
|
|
178
|
+
Math.max(session.metrics.messagesCount, 1),
|
|
179
|
+
connectionCount: 1,
|
|
180
|
+
uptime: Date.now() - session.startTime,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
return { ...this.metrics, uptime: Date.now() - this.startTime };
|
|
185
|
+
}
|
|
186
|
+
getStreamingHealthStatus() {
|
|
187
|
+
const metrics = this.getStreamingMetrics();
|
|
188
|
+
const issues = [];
|
|
189
|
+
let status = "healthy";
|
|
190
|
+
if (metrics.errorRate > 0.1) {
|
|
191
|
+
issues.push(`High error rate: ${(metrics.errorRate * 100).toFixed(1)}%`);
|
|
192
|
+
status = "degraded";
|
|
193
|
+
}
|
|
194
|
+
if (metrics.averageLatency > 1000) {
|
|
195
|
+
issues.push(`High latency: ${metrics.averageLatency}ms`);
|
|
196
|
+
status = status === "healthy" ? "degraded" : "unhealthy";
|
|
197
|
+
}
|
|
198
|
+
if (metrics.activeSessions === 0 && this.activeSessions.size > 0) {
|
|
199
|
+
issues.push("Session count mismatch");
|
|
200
|
+
status = "unhealthy";
|
|
201
|
+
}
|
|
202
|
+
return {
|
|
203
|
+
status,
|
|
204
|
+
activeSessions: metrics.activeSessions,
|
|
205
|
+
errorRate: metrics.errorRate,
|
|
206
|
+
averageLatency: metrics.averageLatency,
|
|
207
|
+
lastHealthCheck: Date.now(),
|
|
208
|
+
issues,
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
// Private helper methods
|
|
212
|
+
updateGlobalMetrics() {
|
|
213
|
+
this.metrics.activeSessions = this.activeSessions.size;
|
|
214
|
+
this.metrics.connectionCount = this.activeSessions.size;
|
|
215
|
+
}
|
|
216
|
+
startHealthMonitoring() {
|
|
217
|
+
this.healthCheckInterval = setInterval(() => {
|
|
218
|
+
const health = this.getStreamingHealthStatus();
|
|
219
|
+
if (health.status !== "healthy") {
|
|
220
|
+
console.warn("[Streaming Manager] Health check:", health);
|
|
221
|
+
this.emit("health-warning", health);
|
|
222
|
+
}
|
|
223
|
+
}, 30000); // Check every 30 seconds
|
|
224
|
+
}
|
|
225
|
+
roundRobinBalance(sessions) {
|
|
226
|
+
// Round-robin implementation
|
|
227
|
+
}
|
|
228
|
+
leastConnectionsBalance(sessions) {
|
|
229
|
+
// Least connections implementation
|
|
230
|
+
}
|
|
231
|
+
weightedBalance(sessions) {
|
|
232
|
+
// Weighted load balancing implementation
|
|
233
|
+
}
|
|
234
|
+
adaptiveBalance(sessions) {
|
|
235
|
+
// Adaptive load balancing implementation
|
|
236
|
+
}
|
|
237
|
+
destroy() {
|
|
238
|
+
if (this.healthCheckInterval) {
|
|
239
|
+
clearInterval(this.healthCheckInterval);
|
|
240
|
+
}
|
|
241
|
+
this.activeSessions.clear();
|
|
242
|
+
this.streamingPools.clear();
|
|
243
|
+
}
|
|
244
|
+
}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
export interface StreamingSession {
|
|
2
|
+
id: string;
|
|
3
|
+
connectionId: string;
|
|
4
|
+
provider: string;
|
|
5
|
+
status: "active" | "paused" | "terminated";
|
|
6
|
+
startTime: number;
|
|
7
|
+
lastActivity: number;
|
|
8
|
+
config: StreamingConfig;
|
|
9
|
+
metrics: {
|
|
10
|
+
bytesTransferred: number;
|
|
11
|
+
messagesCount: number;
|
|
12
|
+
averageLatency: number;
|
|
13
|
+
errorCount: number;
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
export interface WebSocketOptions {
|
|
17
|
+
port?: number;
|
|
18
|
+
maxConnections?: number;
|
|
19
|
+
heartbeatInterval?: number;
|
|
20
|
+
enableCompression?: boolean;
|
|
21
|
+
enableBackpressure?: boolean;
|
|
22
|
+
bufferSize?: number;
|
|
23
|
+
timeoutMs?: number;
|
|
24
|
+
}
|
|
25
|
+
export interface StreamingConfig {
|
|
26
|
+
provider: string;
|
|
27
|
+
model: string;
|
|
28
|
+
streamingMode: "real-time" | "buffered" | "adaptive";
|
|
29
|
+
compressionEnabled: boolean;
|
|
30
|
+
maxChunkSize: number;
|
|
31
|
+
bufferSize: number;
|
|
32
|
+
latencyTarget: number;
|
|
33
|
+
}
|
|
34
|
+
export interface StreamingPool {
|
|
35
|
+
id: string;
|
|
36
|
+
maxSessions: number;
|
|
37
|
+
activeSessions: Set<string>;
|
|
38
|
+
config: StreamingPoolConfig;
|
|
39
|
+
loadBalancer: LoadBalancingStrategy;
|
|
40
|
+
}
|
|
41
|
+
export interface StreamingPoolConfig {
|
|
42
|
+
maxConcurrentSessions: number;
|
|
43
|
+
sessionTimeout: number;
|
|
44
|
+
loadBalancing: LoadBalancingStrategy;
|
|
45
|
+
autoScaling: {
|
|
46
|
+
enabled: boolean;
|
|
47
|
+
minSessions: number;
|
|
48
|
+
maxSessions: number;
|
|
49
|
+
scaleUpThreshold: number;
|
|
50
|
+
scaleDownThreshold: number;
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
export type LoadBalancingStrategy = "round-robin" | "least-connections" | "weighted" | "adaptive";
|
|
54
|
+
export interface StreamingChannel {
|
|
55
|
+
id: string;
|
|
56
|
+
connectionId: string;
|
|
57
|
+
type: "ai-response" | "mcp-tool" | "chat" | "notification";
|
|
58
|
+
status: "open" | "closed" | "error";
|
|
59
|
+
buffer: StreamingBuffer;
|
|
60
|
+
onData: (data: any) => void;
|
|
61
|
+
onError: (error: Error) => void;
|
|
62
|
+
onClose: () => void;
|
|
63
|
+
}
|
|
64
|
+
export interface StreamingBuffer {
|
|
65
|
+
data: any[];
|
|
66
|
+
maxSize: number;
|
|
67
|
+
currentSize: number;
|
|
68
|
+
flushThreshold: number;
|
|
69
|
+
lastFlush: number;
|
|
70
|
+
}
|
|
71
|
+
export interface StreamingMetrics {
|
|
72
|
+
sessionId?: string;
|
|
73
|
+
activeSessions: number;
|
|
74
|
+
totalBytesTransferred: number;
|
|
75
|
+
averageLatency: number;
|
|
76
|
+
throughputBps: number;
|
|
77
|
+
errorRate: number;
|
|
78
|
+
connectionCount: number;
|
|
79
|
+
uptime: number;
|
|
80
|
+
}
|
|
81
|
+
export interface StreamingHealthStatus {
|
|
82
|
+
status: "healthy" | "degraded" | "unhealthy";
|
|
83
|
+
activeSessions: number;
|
|
84
|
+
errorRate: number;
|
|
85
|
+
averageLatency: number;
|
|
86
|
+
lastHealthCheck: number;
|
|
87
|
+
issues: string[];
|
|
88
|
+
}
|
|
89
|
+
export interface WebSocketMessage {
|
|
90
|
+
id: string;
|
|
91
|
+
type: "chat" | "ai-response" | "tool-result" | "system" | "heartbeat" | "error";
|
|
92
|
+
connectionId: string;
|
|
93
|
+
roomId?: string;
|
|
94
|
+
timestamp: number;
|
|
95
|
+
data: any;
|
|
96
|
+
metadata?: {
|
|
97
|
+
provider?: string;
|
|
98
|
+
model?: string;
|
|
99
|
+
tokens?: number;
|
|
100
|
+
latency?: number;
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
export interface ChatRequest {
|
|
104
|
+
prompt: string;
|
|
105
|
+
sessionId?: string;
|
|
106
|
+
options?: {
|
|
107
|
+
temperature?: number;
|
|
108
|
+
maxTokens?: number;
|
|
109
|
+
streaming?: boolean;
|
|
110
|
+
enableTools?: boolean;
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
export interface GroupChatRequest extends ChatRequest {
|
|
114
|
+
roomId: string;
|
|
115
|
+
userId?: string;
|
|
116
|
+
broadcastToRoom?: boolean;
|
|
117
|
+
}
|
|
118
|
+
export interface StreamingChatRequest extends ChatRequest {
|
|
119
|
+
streamingOptions?: {
|
|
120
|
+
chunkSize?: number;
|
|
121
|
+
flushInterval?: number;
|
|
122
|
+
enableCompression?: boolean;
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
export interface MultiModalContent {
|
|
126
|
+
type: "text" | "image" | "audio" | "video" | "file";
|
|
127
|
+
content: string | Buffer;
|
|
128
|
+
metadata?: {
|
|
129
|
+
mimeType?: string;
|
|
130
|
+
size?: number;
|
|
131
|
+
duration?: number;
|
|
132
|
+
dimensions?: {
|
|
133
|
+
width: number;
|
|
134
|
+
height: number;
|
|
135
|
+
};
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
export interface BufferConfig {
|
|
139
|
+
maxSize: number;
|
|
140
|
+
flushThreshold: number;
|
|
141
|
+
flushInterval: number;
|
|
142
|
+
compressionEnabled: boolean;
|
|
143
|
+
persistToDisk: boolean;
|
|
144
|
+
}
|
|
145
|
+
export interface ConnectionInfo {
|
|
146
|
+
id: string;
|
|
147
|
+
userId?: string;
|
|
148
|
+
userAgent?: string;
|
|
149
|
+
ipAddress?: string;
|
|
150
|
+
connectedAt: number;
|
|
151
|
+
lastActivity: number;
|
|
152
|
+
rooms: Set<string>;
|
|
153
|
+
subscriptions: Set<string>;
|
|
154
|
+
metadata: Record<string, any>;
|
|
155
|
+
}
|