@backtest-kit/ollama 0.0.1 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +206 -205
- package/build/index.cjs +2588 -36
- package/build/index.mjs +2588 -37
- package/package.json +13 -6
- package/types.d.ts +787 -1
package/build/index.cjs
CHANGED
|
@@ -20,20 +20,69 @@ var ollama$1 = require('ollama');
|
|
|
20
20
|
var zod$1 = require('openai/helpers/zod');
|
|
21
21
|
var zod = require('zod');
|
|
22
22
|
|
|
23
|
+
/**
|
|
24
|
+
* Enumeration of completion strategy types.
|
|
25
|
+
*
|
|
26
|
+
* Defines unique identifiers for different completion execution modes.
|
|
27
|
+
* Used internally for routing completion requests to appropriate handlers.
|
|
28
|
+
*
|
|
29
|
+
* @example
|
|
30
|
+
* ```typescript
|
|
31
|
+
* import { CompletionName } from '@backtest-kit/ollama';
|
|
32
|
+
*
|
|
33
|
+
* const completionType = CompletionName.RunnerCompletion;
|
|
34
|
+
* ```
|
|
35
|
+
*/
|
|
23
36
|
var CompletionName;
|
|
24
37
|
(function (CompletionName) {
|
|
38
|
+
/** Standard completion mode (full response at once) */
|
|
25
39
|
CompletionName["RunnerCompletion"] = "runner_completion";
|
|
40
|
+
/** Streaming completion mode (progressive response chunks) */
|
|
26
41
|
CompletionName["RunnerStreamCompletion"] = "runner_stream_completion";
|
|
42
|
+
/** Outline completion mode (structured JSON with schema validation) */
|
|
27
43
|
CompletionName["RunnerOutlineCompletion"] = "runner_outline_completion";
|
|
28
44
|
})(CompletionName || (CompletionName = {}));
|
|
29
45
|
var CompletionName$1 = CompletionName;
|
|
30
46
|
|
|
47
|
+
/**
|
|
48
|
+
* Scoped context service for isolated execution contexts.
|
|
49
|
+
*
|
|
50
|
+
* Provides context isolation using async local storage through the di-scoped library.
|
|
51
|
+
* Each operation runs with its own context containing provider, model, and API key configuration.
|
|
52
|
+
* This enables multi-tenant scenarios where different requests use different AI providers or keys.
|
|
53
|
+
*
|
|
54
|
+
* Key features:
|
|
55
|
+
* - Scoped context isolation per execution
|
|
56
|
+
* - Support for single or multiple API keys (token rotation)
|
|
57
|
+
* - Thread-safe context propagation
|
|
58
|
+
* - Automatic cleanup after execution
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* ```typescript
|
|
62
|
+
* import ContextService from "./services/base/ContextService";
|
|
63
|
+
*
|
|
64
|
+
* // Execute operation within scoped context
|
|
65
|
+
* const result = await ContextService.runInContext(async () => {
|
|
66
|
+
* // Code here has access to the context
|
|
67
|
+
* const model = contextService.context.model;
|
|
68
|
+
* return await someAiOperation();
|
|
69
|
+
* }, {
|
|
70
|
+
* inference: InferenceName.GPT5Inference,
|
|
71
|
+
* model: "gpt-5o-mini",
|
|
72
|
+
* apiKey: "sk-..."
|
|
73
|
+
* });
|
|
74
|
+
* ```
|
|
75
|
+
*/
|
|
31
76
|
const ContextService = diScoped.scoped(class {
|
|
32
77
|
constructor(context) {
|
|
33
78
|
this.context = context;
|
|
34
79
|
}
|
|
35
80
|
});
|
|
36
81
|
|
|
82
|
+
/**
|
|
83
|
+
* No-operation logger that silently discards all log messages.
|
|
84
|
+
* Used as default logger before a real logger is configured.
|
|
85
|
+
*/
|
|
37
86
|
const NOOP_LOGGER = {
|
|
38
87
|
log() {
|
|
39
88
|
},
|
|
@@ -44,43 +93,181 @@ const NOOP_LOGGER = {
|
|
|
44
93
|
warn() {
|
|
45
94
|
},
|
|
46
95
|
};
|
|
96
|
+
/**
|
|
97
|
+
* Centralized logging service for the Ollama package.
|
|
98
|
+
*
|
|
99
|
+
* Provides a unified interface for logging operations across the application.
|
|
100
|
+
* Uses a delegate pattern to forward log calls to a configured logger implementation.
|
|
101
|
+
* Defaults to a no-op logger if no logger is set.
|
|
102
|
+
*
|
|
103
|
+
* Key features:
|
|
104
|
+
* - Supports multiple log levels: log, debug, info, warn
|
|
105
|
+
* - Configurable logger backend via setLogger
|
|
106
|
+
* - Async logging support
|
|
107
|
+
* - Safe default (no-op) when unconfigured
|
|
108
|
+
*
|
|
109
|
+
* @example
|
|
110
|
+
* ```typescript
|
|
111
|
+
* import { LoggerService } from "./services/common/LoggerService";
|
|
112
|
+
* import { setLogger } from "./function/setup.function";
|
|
113
|
+
*
|
|
114
|
+
* // Configure custom logger
|
|
115
|
+
* setLogger({
|
|
116
|
+
* log: async (topic, ...args) => console.log(topic, ...args),
|
|
117
|
+
* debug: async (topic, ...args) => console.debug(topic, ...args),
|
|
118
|
+
* info: async (topic, ...args) => console.info(topic, ...args),
|
|
119
|
+
* warn: async (topic, ...args) => console.warn(topic, ...args),
|
|
120
|
+
* });
|
|
121
|
+
*
|
|
122
|
+
* const loggerService = inject<LoggerService>(TYPES.loggerService);
|
|
123
|
+
* await loggerService.info("Operation completed", { status: "success" });
|
|
124
|
+
* ```
|
|
125
|
+
*/
|
|
47
126
|
class LoggerService {
|
|
48
127
|
constructor() {
|
|
128
|
+
/** Internal logger instance, defaults to NOOP_LOGGER */
|
|
49
129
|
this._commonLogger = NOOP_LOGGER;
|
|
130
|
+
/**
|
|
131
|
+
* Logs a general message with optional arguments.
|
|
132
|
+
*
|
|
133
|
+
* @param topic - Message topic or category
|
|
134
|
+
* @param args - Additional arguments to log
|
|
135
|
+
*/
|
|
50
136
|
this.log = async (topic, ...args) => {
|
|
51
137
|
await this._commonLogger.log(topic, ...args);
|
|
52
138
|
};
|
|
139
|
+
/**
|
|
140
|
+
* Logs a debug message with optional arguments.
|
|
141
|
+
* Used for detailed diagnostic information.
|
|
142
|
+
*
|
|
143
|
+
* @param topic - Message topic or category
|
|
144
|
+
* @param args - Additional arguments to log
|
|
145
|
+
*/
|
|
53
146
|
this.debug = async (topic, ...args) => {
|
|
54
147
|
await this._commonLogger.debug(topic, ...args);
|
|
55
148
|
};
|
|
149
|
+
/**
|
|
150
|
+
* Logs an informational message with optional arguments.
|
|
151
|
+
* Used for general operational information.
|
|
152
|
+
*
|
|
153
|
+
* @param topic - Message topic or category
|
|
154
|
+
* @param args - Additional arguments to log
|
|
155
|
+
*/
|
|
56
156
|
this.info = async (topic, ...args) => {
|
|
57
157
|
await this._commonLogger.info(topic, ...args);
|
|
58
158
|
};
|
|
159
|
+
/**
|
|
160
|
+
* Logs a warning message with optional arguments.
|
|
161
|
+
* Used for potentially problematic situations.
|
|
162
|
+
*
|
|
163
|
+
* @param topic - Message topic or category
|
|
164
|
+
* @param args - Additional arguments to log
|
|
165
|
+
*/
|
|
59
166
|
this.warn = async (topic, ...args) => {
|
|
60
167
|
await this._commonLogger.warn(topic, ...args);
|
|
61
168
|
};
|
|
169
|
+
/**
|
|
170
|
+
* Sets the logger implementation to use for all logging operations.
|
|
171
|
+
*
|
|
172
|
+
* @param logger - Logger implementation conforming to ILogger interface
|
|
173
|
+
*
|
|
174
|
+
* @example
|
|
175
|
+
* ```typescript
|
|
176
|
+
* const logger = new LoggerService();
|
|
177
|
+
* logger.setLogger({
|
|
178
|
+
* log: async (topic, ...args) => console.log(topic, ...args),
|
|
179
|
+
* debug: async (topic, ...args) => console.debug(topic, ...args),
|
|
180
|
+
* info: async (topic, ...args) => console.info(topic, ...args),
|
|
181
|
+
* warn: async (topic, ...args) => console.warn(topic, ...args),
|
|
182
|
+
* });
|
|
183
|
+
* ```
|
|
184
|
+
*/
|
|
62
185
|
this.setLogger = (logger) => {
|
|
63
186
|
this._commonLogger = logger;
|
|
64
187
|
};
|
|
65
188
|
}
|
|
66
189
|
}
|
|
67
190
|
|
|
191
|
+
/**
|
|
192
|
+
* Dependency injection activator for the Ollama package.
|
|
193
|
+
*
|
|
194
|
+
* Creates a scoped DI container using di-kit with the namespace "ollama".
|
|
195
|
+
* Provides functions for service registration, injection, initialization, and overriding.
|
|
196
|
+
*
|
|
197
|
+
* Exported functions:
|
|
198
|
+
* - provide: Register a service implementation in the container
|
|
199
|
+
* - inject: Retrieve a service instance from the container
|
|
200
|
+
* - init: Initialize the DI container (must be called before using services)
|
|
201
|
+
* - override: Replace an existing service registration with a new implementation
|
|
202
|
+
*
|
|
203
|
+
* @example
|
|
204
|
+
* ```typescript
|
|
205
|
+
* import { provide, inject, init } from "./core/di";
|
|
206
|
+
* import { TYPES } from "./core/types";
|
|
207
|
+
*
|
|
208
|
+
* // Register service
|
|
209
|
+
* provide(TYPES.loggerService, () => new LoggerService());
|
|
210
|
+
*
|
|
211
|
+
* // Initialize container
|
|
212
|
+
* init();
|
|
213
|
+
*
|
|
214
|
+
* // Inject service
|
|
215
|
+
* const logger = inject<LoggerService>(TYPES.loggerService);
|
|
216
|
+
* ```
|
|
217
|
+
*/
|
|
68
218
|
const { provide, inject, init, override } = diKit.createActivator("ollama");
|
|
69
219
|
|
|
220
|
+
/**
|
|
221
|
+
* Common service type identifiers.
|
|
222
|
+
* Services used across the entire application.
|
|
223
|
+
*/
|
|
70
224
|
const commonServices$1 = {
|
|
225
|
+
/** Logger service for application-wide logging */
|
|
71
226
|
loggerService: Symbol("loggerService"),
|
|
72
227
|
};
|
|
228
|
+
/**
|
|
229
|
+
* Base service type identifiers.
|
|
230
|
+
* Core foundational services.
|
|
231
|
+
*/
|
|
73
232
|
const baseServices$1 = {
|
|
233
|
+
/** Context service for scoped execution contexts */
|
|
74
234
|
contextService: Symbol('contextService'),
|
|
75
235
|
};
|
|
236
|
+
/**
|
|
237
|
+
* Private service type identifiers.
|
|
238
|
+
* Internal services not exposed in public API.
|
|
239
|
+
*/
|
|
76
240
|
const privateServices$1 = {
|
|
241
|
+
/** Runner private service for AI provider operations */
|
|
77
242
|
runnerPrivateService: Symbol('runnerPrivateService'),
|
|
243
|
+
/** Outline private service for structured completions */
|
|
78
244
|
outlinePrivateService: Symbol('outlinePrivateService'),
|
|
79
245
|
};
|
|
246
|
+
/**
|
|
247
|
+
* Public service type identifiers.
|
|
248
|
+
* Services exposed in the public API.
|
|
249
|
+
*/
|
|
80
250
|
const publicServices$1 = {
|
|
251
|
+
/** Runner public service for context-managed AI operations */
|
|
81
252
|
runnerPublicService: Symbol('runnerPublicService'),
|
|
253
|
+
/** Outline public service for simplified structured completions */
|
|
82
254
|
outlinePublicService: Symbol('outlinePublicService'),
|
|
83
255
|
};
|
|
256
|
+
/**
|
|
257
|
+
* Service type identifier registry for dependency injection.
|
|
258
|
+
*
|
|
259
|
+
* Centralizes all Symbol-based type identifiers used for DI container registration.
|
|
260
|
+
* Organized by service layer: common, base, private, and public services.
|
|
261
|
+
*
|
|
262
|
+
* @example
|
|
263
|
+
* ```typescript
|
|
264
|
+
* import { inject } from "./di";
|
|
265
|
+
* import { TYPES } from "./types";
|
|
266
|
+
* import LoggerService from "../services/common/LoggerService";
|
|
267
|
+
*
|
|
268
|
+
* const logger = inject<LoggerService>(TYPES.loggerService);
|
|
269
|
+
* ```
|
|
270
|
+
*/
|
|
84
271
|
const TYPES = {
|
|
85
272
|
...commonServices$1,
|
|
86
273
|
...baseServices$1,
|
|
@@ -88,12 +275,44 @@ const TYPES = {
|
|
|
88
275
|
...publicServices$1,
|
|
89
276
|
};
|
|
90
277
|
|
|
278
|
+
/**
|
|
279
|
+
* Enumeration of supported JSON schema outlines.
|
|
280
|
+
*
|
|
281
|
+
* Defines unique identifiers for structured output schemas used with
|
|
282
|
+
* LLM providers. Outlines enforce JSON schema validation for critical
|
|
283
|
+
* data structures like trading signals.
|
|
284
|
+
*
|
|
285
|
+
* @example
|
|
286
|
+
* ```typescript
|
|
287
|
+
* import { OutlineName } from '@backtest-kit/ollama';
|
|
288
|
+
*
|
|
289
|
+
* const outlineName = OutlineName.SignalOutline;
|
|
290
|
+
* ```
|
|
291
|
+
*/
|
|
91
292
|
var OutlineName;
|
|
92
293
|
(function (OutlineName) {
|
|
294
|
+
/** Trading signal JSON schema for position, TP/SL, and risk parameters */
|
|
93
295
|
OutlineName["SignalOutline"] = "signal_outline";
|
|
94
296
|
})(OutlineName || (OutlineName = {}));
|
|
95
297
|
var OutlineName$1 = OutlineName;
|
|
96
298
|
|
|
299
|
+
/**
|
|
300
|
+
* Lints and auto-fixes markdown content using markdownlint rules.
|
|
301
|
+
*
|
|
302
|
+
* Validates markdown syntax and applies automatic fixes for common issues
|
|
303
|
+
* like inconsistent list markers, trailing spaces, and heading styles.
|
|
304
|
+
* Returns the original content if no errors found or fixes cannot be applied.
|
|
305
|
+
*
|
|
306
|
+
* @param content - Raw markdown content to lint
|
|
307
|
+
* @returns Promise resolving to linted markdown content
|
|
308
|
+
*
|
|
309
|
+
* @example
|
|
310
|
+
* ```typescript
|
|
311
|
+
* const markdown = "# Title\n\n\n## Subtitle"; // Multiple blank lines
|
|
312
|
+
* const linted = await toLintMarkdown(markdown);
|
|
313
|
+
* // Returns: "# Title\n\n## Subtitle" (extra blank line removed)
|
|
314
|
+
* ```
|
|
315
|
+
*/
|
|
97
316
|
const toLintMarkdown = async (content) => {
|
|
98
317
|
if (!content) {
|
|
99
318
|
return "";
|
|
@@ -105,8 +324,28 @@ const toLintMarkdown = async (content) => {
|
|
|
105
324
|
const value = markdownlint.applyFixes(content, errors);
|
|
106
325
|
return value ? value : content;
|
|
107
326
|
};
|
|
108
|
-
globalThis.toLintMarkdown = toLintMarkdown;
|
|
109
327
|
|
|
328
|
+
/**
|
|
329
|
+
* Converts markdown content to plain text with Telegram-compatible HTML formatting.
|
|
330
|
+
*
|
|
331
|
+
* Processes markdown through three stages:
|
|
332
|
+
* 1. Lints and fixes markdown using markdownlint
|
|
333
|
+
* 2. Renders markdown to HTML using markdown-it
|
|
334
|
+
* 3. Sanitizes HTML to Telegram-compatible subset
|
|
335
|
+
*
|
|
336
|
+
* Supported tags: b, i, a, code, pre, s, u, tg-spoiler, blockquote, br
|
|
337
|
+
* Transforms: headings removed, lists to bullets, multiple newlines collapsed
|
|
338
|
+
*
|
|
339
|
+
* @param content - Raw markdown content
|
|
340
|
+
* @returns Promise resolving to sanitized plain text with HTML formatting
|
|
341
|
+
*
|
|
342
|
+
* @example
|
|
343
|
+
* ```typescript
|
|
344
|
+
* const markdown = "# Title\n**Bold** and *italic*\n- Item 1\n- Item 2";
|
|
345
|
+
* const plain = await toPlainString(markdown);
|
|
346
|
+
* // Returns: "Bold and italic\n• Item 1\n• Item 2"
|
|
347
|
+
* ```
|
|
348
|
+
*/
|
|
110
349
|
const toPlainString = async (content) => {
|
|
111
350
|
if (!content) {
|
|
112
351
|
return "";
|
|
@@ -157,9 +396,58 @@ const toPlainString = async (content) => {
|
|
|
157
396
|
return telegramHtml.replaceAll(/\n[\s\n]*\n/g, "\n").trim();
|
|
158
397
|
};
|
|
159
398
|
|
|
399
|
+
/**
|
|
400
|
+
* Private service for processing structured outline completions.
|
|
401
|
+
*
|
|
402
|
+
* Handles the core logic for executing outline-based AI completions with schema validation.
|
|
403
|
+
* Processes AI responses through the agent-swarm-kit json function to extract and validate
|
|
404
|
+
* structured trading signal data.
|
|
405
|
+
*
|
|
406
|
+
* Key features:
|
|
407
|
+
* - JSON schema validation using agent-swarm-kit
|
|
408
|
+
* - Trading signal extraction and transformation
|
|
409
|
+
* - Type conversion for numeric fields
|
|
410
|
+
* - Markdown formatting cleanup for notes
|
|
411
|
+
* - Error handling for validation failures
|
|
412
|
+
*
|
|
413
|
+
* @example
|
|
414
|
+
* ```typescript
|
|
415
|
+
* const outlinePrivate = inject<OutlinePrivateService>(TYPES.outlinePrivateService);
|
|
416
|
+
* const signal = await outlinePrivate.getCompletion([
|
|
417
|
+
* { role: "user", content: "Analyze market" }
|
|
418
|
+
* ]);
|
|
419
|
+
* ```
|
|
420
|
+
*/
|
|
160
421
|
class OutlinePrivateService {
|
|
161
422
|
constructor() {
|
|
423
|
+
/** Logger service for operation tracking */
|
|
162
424
|
this.loggerService = inject(TYPES.loggerService);
|
|
425
|
+
/**
|
|
426
|
+
* Processes outline completion messages and extracts structured signal data.
|
|
427
|
+
*
|
|
428
|
+
* Sends messages to the AI provider, validates the response against the signal schema,
|
|
429
|
+
* and transforms the data into a structured format. Returns null if the AI decides
|
|
430
|
+
* to wait (no position).
|
|
431
|
+
*
|
|
432
|
+
* @param messages - Array of conversation messages for the AI
|
|
433
|
+
* @returns Promise resolving to structured signal data or null if position is "wait"
|
|
434
|
+
* @throws Error if validation fails or AI returns an error
|
|
435
|
+
*
|
|
436
|
+
* @example
|
|
437
|
+
* ```typescript
|
|
438
|
+
* const signal = await outlinePrivateService.getCompletion([
|
|
439
|
+
* { role: "system", content: "Trading analyst role" },
|
|
440
|
+
* { role: "user", content: "Market analysis data..." }
|
|
441
|
+
* ]);
|
|
442
|
+
*
|
|
443
|
+
* if (signal) {
|
|
444
|
+
* console.log(`Position: ${signal.position}`);
|
|
445
|
+
* console.log(`Entry: ${signal.priceOpen}`);
|
|
446
|
+
* console.log(`SL: ${signal.priceStopLoss}`);
|
|
447
|
+
* console.log(`TP: ${signal.priceTakeProfit}`);
|
|
448
|
+
* }
|
|
449
|
+
* ```
|
|
450
|
+
*/
|
|
163
451
|
this.getCompletion = async (messages) => {
|
|
164
452
|
this.loggerService.log("outlinePrivateService getCompletion", {
|
|
165
453
|
messages,
|
|
@@ -184,40 +472,194 @@ class OutlinePrivateService {
|
|
|
184
472
|
}
|
|
185
473
|
}
|
|
186
474
|
|
|
475
|
+
/**
|
|
476
|
+
* Private service managing AI inference provider registry and execution.
|
|
477
|
+
*
|
|
478
|
+
* Coordinates AI operations across multiple inference providers (OpenAI, Claude, Ollama, etc.).
|
|
479
|
+
* Maintains a registry of provider implementations and instantiates them on-demand.
|
|
480
|
+
* Uses memoization to cache provider instances for better performance.
|
|
481
|
+
*
|
|
482
|
+
* Key features:
|
|
483
|
+
* - Dynamic provider registration for multiple AI services
|
|
484
|
+
* - Lazy instantiation with memoization for performance
|
|
485
|
+
* - Context-aware provider selection based on inference type
|
|
486
|
+
* - Support for standard, streaming, and structured completions
|
|
487
|
+
* - Type-safe provider interface
|
|
488
|
+
*
|
|
489
|
+
* @example
|
|
490
|
+
* ```typescript
|
|
491
|
+
* // Provider registration (typically done at startup)
|
|
492
|
+
* const runnerPrivate = inject<RunnerPrivateService>(TYPES.runnerPrivateService);
|
|
493
|
+
* runnerPrivate.registerRunner(InferenceName.ClaudeInference, ClaudeProvider);
|
|
494
|
+
* runnerPrivate.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
|
|
495
|
+
*
|
|
496
|
+
* // Provider usage (automatically selected based on context)
|
|
497
|
+
* const result = await runnerPrivate.getCompletion({
|
|
498
|
+
* messages: [{ role: "user", content: "Analyze trade" }]
|
|
499
|
+
* });
|
|
500
|
+
* ```
|
|
501
|
+
*/
|
|
187
502
|
class RunnerPrivateService {
|
|
188
503
|
constructor() {
|
|
504
|
+
/** Context service providing execution context (model, API key, provider) */
|
|
189
505
|
this.contextService = inject(TYPES.contextService);
|
|
506
|
+
/** Logger service for operation tracking */
|
|
190
507
|
this.loggerService = inject(TYPES.loggerService);
|
|
508
|
+
/** Registry storing provider class constructors by inference name */
|
|
191
509
|
this._registry = new functoolsKit.ToolRegistry("runner_registry");
|
|
510
|
+
/**
|
|
511
|
+
* Memoized provider instance getter.
|
|
512
|
+
* Creates and caches provider instances per inference type.
|
|
513
|
+
*/
|
|
192
514
|
this.getRunner = functoolsKit.memoize(([inference]) => `${inference}`, (inference) => {
|
|
193
515
|
const Runner = this._registry.get(inference);
|
|
194
516
|
return new Runner(this.contextService, this.loggerService);
|
|
195
517
|
});
|
|
518
|
+
/**
|
|
519
|
+
* Executes a standard AI completion using the provider specified in context.
|
|
520
|
+
*
|
|
521
|
+
* @param params - Completion parameters including messages and options
|
|
522
|
+
* @returns Promise resolving to AI response message
|
|
523
|
+
*
|
|
524
|
+
* @example
|
|
525
|
+
* ```typescript
|
|
526
|
+
* const result = await runnerPrivateService.getCompletion({
|
|
527
|
+
* messages: [
|
|
528
|
+
* { role: "system", content: "You are a trading assistant" },
|
|
529
|
+
* { role: "user", content: "Analyze BTC market" }
|
|
530
|
+
* ]
|
|
531
|
+
* });
|
|
532
|
+
* ```
|
|
533
|
+
*/
|
|
196
534
|
this.getCompletion = async (params) => {
|
|
197
535
|
this.loggerService.log("runnerPrivateService getCompletion");
|
|
198
536
|
const runner = this.getRunner(this.contextService.context.inference);
|
|
199
537
|
return await runner.getCompletion(params);
|
|
200
538
|
};
|
|
539
|
+
/**
|
|
540
|
+
* Executes a streaming AI completion using the provider specified in context.
|
|
541
|
+
*
|
|
542
|
+
* @param params - Completion parameters including messages and options
|
|
543
|
+
* @returns Promise resolving to accumulated AI response message
|
|
544
|
+
*
|
|
545
|
+
* @example
|
|
546
|
+
* ```typescript
|
|
547
|
+
* const result = await runnerPrivateService.getStreamCompletion({
|
|
548
|
+
* messages: [{ role: "user", content: "Generate signal" }]
|
|
549
|
+
* });
|
|
550
|
+
* ```
|
|
551
|
+
*/
|
|
201
552
|
this.getStreamCompletion = async (params) => {
|
|
202
553
|
this.loggerService.log("runnerPrivateService getStreamCompletion");
|
|
203
554
|
const runner = this.getRunner(this.contextService.context.inference);
|
|
204
555
|
return await runner.getStreamCompletion(params);
|
|
205
556
|
};
|
|
557
|
+
/**
|
|
558
|
+
* Executes a structured outline completion using the provider specified in context.
|
|
559
|
+
*
|
|
560
|
+
* @param params - Outline completion parameters including messages and schema
|
|
561
|
+
* @returns Promise resolving to structured AI response
|
|
562
|
+
*
|
|
563
|
+
* @example
|
|
564
|
+
* ```typescript
|
|
565
|
+
* const signal = await runnerPrivateService.getOutlineCompletion({
|
|
566
|
+
* messages: [{ role: "user", content: "Trading decision for ETH" }]
|
|
567
|
+
* });
|
|
568
|
+
* ```
|
|
569
|
+
*/
|
|
206
570
|
this.getOutlineCompletion = async (params) => {
|
|
207
571
|
this.loggerService.log("runnerPrivateService getOutlineCompletion");
|
|
208
572
|
const runner = this.getRunner(this.contextService.context.inference);
|
|
209
573
|
return await runner.getOutlineCompletion(params);
|
|
210
574
|
};
|
|
575
|
+
/**
|
|
576
|
+
* Registers a new AI provider implementation in the registry.
|
|
577
|
+
*
|
|
578
|
+
* @param name - Inference provider identifier
|
|
579
|
+
* @param runner - Provider class constructor
|
|
580
|
+
*
|
|
581
|
+
* @example
|
|
582
|
+
* ```typescript
|
|
583
|
+
* runnerPrivateService.registerRunner(
|
|
584
|
+
* InferenceName.ClaudeInference,
|
|
585
|
+
* ClaudeProvider
|
|
586
|
+
* );
|
|
587
|
+
* ```
|
|
588
|
+
*/
|
|
211
589
|
this.registerRunner = (name, runner) => {
|
|
212
590
|
this._registry = this._registry.register(name, runner);
|
|
213
591
|
};
|
|
214
592
|
}
|
|
215
593
|
}
|
|
216
594
|
|
|
595
|
+
/**
|
|
596
|
+
* Public-facing service for structured AI outline completions.
|
|
597
|
+
*
|
|
598
|
+
* Provides a simplified interface for executing structured AI completions with schema validation.
|
|
599
|
+
* Handles context creation and isolation for outline-based operations.
|
|
600
|
+
* Used for extracting structured data from AI responses (e.g., trading signals).
|
|
601
|
+
*
|
|
602
|
+
* Key features:
|
|
603
|
+
* - Simplified API with automatic context management
|
|
604
|
+
* - JSON schema validation for structured outputs
|
|
605
|
+
* - Support for multiple AI providers
|
|
606
|
+
* - Optional API key parameter with fallback
|
|
607
|
+
* - Logging integration
|
|
608
|
+
*
|
|
609
|
+
* @example
|
|
610
|
+
* ```typescript
|
|
611
|
+
* import { engine } from "./lib";
|
|
612
|
+
* import { InferenceName } from "./enum/InferenceName";
|
|
613
|
+
*
|
|
614
|
+
* const signal = await engine.outlinePublicService.getCompletion(
|
|
615
|
+
* [{ role: "user", content: "Analyze BTC/USDT and decide position" }],
|
|
616
|
+
* InferenceName.ClaudeInference,
|
|
617
|
+
* "claude-3-5-sonnet-20240620",
|
|
618
|
+
* "sk-ant-..."
|
|
619
|
+
* );
|
|
620
|
+
*
|
|
621
|
+
* // Returns structured signal:
|
|
622
|
+
* // {
|
|
623
|
+
* // position: "long",
|
|
624
|
+
* // priceOpen: 50000,
|
|
625
|
+
* // priceStopLoss: 48000,
|
|
626
|
+
* // priceTakeProfit: 52000,
|
|
627
|
+
* // minuteEstimatedTime: 120,
|
|
628
|
+
* // note: "Strong bullish momentum..."
|
|
629
|
+
* // }
|
|
630
|
+
* ```
|
|
631
|
+
*/
|
|
217
632
|
class OutlinePublicService {
|
|
218
633
|
constructor() {
|
|
634
|
+
/** Logger service for operation tracking */
|
|
219
635
|
this.loggerService = inject(TYPES.loggerService);
|
|
636
|
+
/** Private service handling outline completion logic */
|
|
220
637
|
this.outlinePrivateService = inject(TYPES.outlinePrivateService);
|
|
638
|
+
/**
|
|
639
|
+
* Executes a structured outline completion with schema validation.
|
|
640
|
+
*
|
|
641
|
+
* Creates an isolated execution context and processes messages through the AI provider
|
|
642
|
+
* to generate a structured response conforming to a predefined schema.
|
|
643
|
+
*
|
|
644
|
+
* @param messages - Array of conversation messages for the AI
|
|
645
|
+
* @param inference - AI provider identifier
|
|
646
|
+
* @param model - Model name/identifier
|
|
647
|
+
* @param apiKey - Optional API key(s), required for most providers
|
|
648
|
+
* @returns Promise resolving to structured signal data or null if position is "wait"
|
|
649
|
+
*
|
|
650
|
+
* @example
|
|
651
|
+
* ```typescript
|
|
652
|
+
* const result = await outlinePublicService.getCompletion(
|
|
653
|
+
* [
|
|
654
|
+
* { role: "system", content: "You are a trading analyst" },
|
|
655
|
+
* { role: "user", content: "Analyze current BTC market" }
|
|
656
|
+
* ],
|
|
657
|
+
* InferenceName.DeepseekInference,
|
|
658
|
+
* "deepseek-chat",
|
|
659
|
+
* "sk-..."
|
|
660
|
+
* );
|
|
661
|
+
* ```
|
|
662
|
+
*/
|
|
221
663
|
this.getCompletion = async (messages, inference, model, apiKey) => {
|
|
222
664
|
this.loggerService.log("outlinePublicService getCompletion", {
|
|
223
665
|
messages,
|
|
@@ -236,22 +678,134 @@ class OutlinePublicService {
|
|
|
236
678
|
}
|
|
237
679
|
}
|
|
238
680
|
|
|
681
|
+
/**
|
|
682
|
+
* Public-facing service for AI inference operations with context management.
|
|
683
|
+
*
|
|
684
|
+
* Provides context-scoped access to AI completion operations.
|
|
685
|
+
* Acts as a facade that wraps RunnerPrivateService methods with context isolation.
|
|
686
|
+
* Each operation runs within a dedicated execution context to ensure proper API key
|
|
687
|
+
* and model configuration isolation.
|
|
688
|
+
*
|
|
689
|
+
* Key features:
|
|
690
|
+
* - Context-isolated execution for multi-tenant scenarios
|
|
691
|
+
* - Support for standard, streaming, and structured (outline) completions
|
|
692
|
+
* - Automatic context propagation to private service layer
|
|
693
|
+
* - Logging integration for operation tracking
|
|
694
|
+
*
|
|
695
|
+
* @example
|
|
696
|
+
* ```typescript
|
|
697
|
+
* import { engine } from "./lib";
|
|
698
|
+
* import { InferenceName } from "./enum/InferenceName";
|
|
699
|
+
*
|
|
700
|
+
* const context = {
|
|
701
|
+
* inference: InferenceName.ClaudeInference,
|
|
702
|
+
* model: "claude-3-5-sonnet-20240620",
|
|
703
|
+
* apiKey: "sk-ant-..."
|
|
704
|
+
* };
|
|
705
|
+
*
|
|
706
|
+
* // Standard completion
|
|
707
|
+
* const result = await engine.runnerPublicService.getCompletion({
|
|
708
|
+
* messages: [{ role: "user", content: "Analyze this trade..." }]
|
|
709
|
+
* }, context);
|
|
710
|
+
*
|
|
711
|
+
* // Streaming completion
|
|
712
|
+
* const stream = await engine.runnerPublicService.getStreamCompletion({
|
|
713
|
+
* messages: [{ role: "user", content: "Generate signal..." }]
|
|
714
|
+
* }, context);
|
|
715
|
+
*
|
|
716
|
+
* // Structured outline completion
|
|
717
|
+
* const outline = await engine.runnerPublicService.getOutlineCompletion({
|
|
718
|
+
* messages: [{ role: "user", content: "Trading decision..." }]
|
|
719
|
+
* }, context);
|
|
720
|
+
* ```
|
|
721
|
+
*/
|
|
239
722
|
class RunnerPublicService {
|
|
240
723
|
constructor() {
|
|
724
|
+
/** Private service handling AI provider operations */
|
|
241
725
|
this.runnerPrivateService = inject(TYPES.runnerPrivateService);
|
|
726
|
+
/** Logger service for operation tracking */
|
|
242
727
|
this.loggerService = inject(TYPES.loggerService);
|
|
728
|
+
/**
|
|
729
|
+
* Executes a standard AI completion within the specified context.
|
|
730
|
+
*
|
|
731
|
+
* @param params - Completion parameters including messages and options
|
|
732
|
+
* @param context - Execution context with inference provider, model, and API key
|
|
733
|
+
* @returns Promise resolving to AI response message
|
|
734
|
+
*
|
|
735
|
+
* @example
|
|
736
|
+
* ```typescript
|
|
737
|
+
* const result = await runnerPublicService.getCompletion({
|
|
738
|
+
* messages: [
|
|
739
|
+
* { role: "system", content: "You are a trading analyst" },
|
|
740
|
+
* { role: "user", content: "Analyze BTC/USDT" }
|
|
741
|
+
* ]
|
|
742
|
+
* }, {
|
|
743
|
+
* inference: InferenceName.ClaudeInference,
|
|
744
|
+
* model: "claude-3-5-sonnet-20240620",
|
|
745
|
+
* apiKey: "sk-ant-..."
|
|
746
|
+
* });
|
|
747
|
+
* ```
|
|
748
|
+
*/
|
|
243
749
|
this.getCompletion = async (params, context) => {
|
|
244
750
|
this.loggerService.log("runnerPublicService getCompletion");
|
|
245
751
|
return await ContextService.runInContext(async () => {
|
|
246
752
|
return await this.runnerPrivateService.getCompletion(params);
|
|
247
753
|
}, context);
|
|
248
754
|
};
|
|
755
|
+
/**
|
|
756
|
+
* Executes a streaming AI completion within the specified context.
|
|
757
|
+
*
|
|
758
|
+
* Similar to getCompletion but enables streaming mode where supported by the provider.
|
|
759
|
+
* The response is accumulated and returned as a complete message once streaming finishes.
|
|
760
|
+
*
|
|
761
|
+
* @param params - Completion parameters including messages and options
|
|
762
|
+
* @param context - Execution context with inference provider, model, and API key
|
|
763
|
+
* @returns Promise resolving to accumulated AI response message
|
|
764
|
+
*
|
|
765
|
+
* @example
|
|
766
|
+
* ```typescript
|
|
767
|
+
* const result = await runnerPublicService.getStreamCompletion({
|
|
768
|
+
* messages: [
|
|
769
|
+
* { role: "user", content: "Generate trading signal for ETH/USDT" }
|
|
770
|
+
* ]
|
|
771
|
+
* }, {
|
|
772
|
+
* inference: InferenceName.GPT5Inference,
|
|
773
|
+
* model: "gpt-5o-mini",
|
|
774
|
+
* apiKey: "sk-..."
|
|
775
|
+
* });
|
|
776
|
+
* ```
|
|
777
|
+
*/
|
|
249
778
|
this.getStreamCompletion = async (params, context) => {
|
|
250
779
|
this.loggerService.log("runnerPublicService getStreamCompletion");
|
|
251
780
|
return await ContextService.runInContext(async () => {
|
|
252
781
|
return await this.runnerPrivateService.getStreamCompletion(params);
|
|
253
782
|
}, context);
|
|
254
783
|
};
|
|
784
|
+
/**
|
|
785
|
+
* Executes a structured outline completion within the specified context.
|
|
786
|
+
*
|
|
787
|
+
* Uses structured output (JSON schema validation) to ensure the AI response
|
|
788
|
+
* conforms to a predefined format. Ideal for extracting structured data
|
|
789
|
+
* from AI responses (e.g., trading signals with specific fields).
|
|
790
|
+
*
|
|
791
|
+
* @param params - Outline completion parameters including messages and schema
|
|
792
|
+
* @param context - Execution context with inference provider, model, and API key
|
|
793
|
+
* @returns Promise resolving to structured AI response
|
|
794
|
+
*
|
|
795
|
+
* @example
|
|
796
|
+
* ```typescript
|
|
797
|
+
* const signal = await runnerPublicService.getOutlineCompletion({
|
|
798
|
+
* messages: [
|
|
799
|
+
* { role: "user", content: "Decide position for BTC/USDT" }
|
|
800
|
+
* ]
|
|
801
|
+
* }, {
|
|
802
|
+
* inference: InferenceName.DeepseekInference,
|
|
803
|
+
* model: "deepseek-chat",
|
|
804
|
+
* apiKey: "sk-..."
|
|
805
|
+
* });
|
|
806
|
+
* // Returns: { position: "long", price_open: 50000, ... }
|
|
807
|
+
* ```
|
|
808
|
+
*/
|
|
255
809
|
this.getOutlineCompletion = async (params, context) => {
|
|
256
810
|
this.loggerService.log("runnerPublicService getOutlineCompletion");
|
|
257
811
|
return await ContextService.runInContext(async () => {
|
|
@@ -261,36 +815,116 @@ class RunnerPublicService {
|
|
|
261
815
|
}
|
|
262
816
|
}
|
|
263
817
|
|
|
818
|
+
/**
|
|
819
|
+
* Service registration module for dependency injection.
|
|
820
|
+
*
|
|
821
|
+
* Registers all service implementations in the DI container during application startup.
|
|
822
|
+
* Services are organized by layer: common, base, private, and public services.
|
|
823
|
+
* Each service is registered with a factory function that creates new instances.
|
|
824
|
+
*
|
|
825
|
+
* Registration order:
|
|
826
|
+
* 1. Common services (LoggerService)
|
|
827
|
+
* 2. Base services (ContextService)
|
|
828
|
+
* 3. Private services (RunnerPrivateService, OutlinePrivateService)
|
|
829
|
+
* 4. Public services (RunnerPublicService, OutlinePublicService)
|
|
830
|
+
*
|
|
831
|
+
* This file is imported by lib/index.ts to ensure services are registered
|
|
832
|
+
* before the DI container is initialized.
|
|
833
|
+
*/
|
|
834
|
+
/**
|
|
835
|
+
* Register common services.
|
|
836
|
+
*/
|
|
264
837
|
{
|
|
265
838
|
provide(TYPES.loggerService, () => new LoggerService());
|
|
266
839
|
}
|
|
840
|
+
/**
|
|
841
|
+
* Register base services.
|
|
842
|
+
*/
|
|
267
843
|
{
|
|
268
844
|
provide(TYPES.contextService, () => new ContextService());
|
|
269
845
|
}
|
|
846
|
+
/**
|
|
847
|
+
* Register private services.
|
|
848
|
+
*/
|
|
270
849
|
{
|
|
271
850
|
provide(TYPES.runnerPrivateService, () => new RunnerPrivateService());
|
|
272
851
|
provide(TYPES.outlinePrivateService, () => new OutlinePrivateService());
|
|
273
852
|
}
|
|
853
|
+
/**
|
|
854
|
+
* Register public services.
|
|
855
|
+
*/
|
|
274
856
|
{
|
|
275
857
|
provide(TYPES.runnerPublicService, () => new RunnerPublicService());
|
|
276
858
|
provide(TYPES.outlinePublicService, () => new OutlinePublicService());
|
|
277
859
|
}
|
|
278
860
|
|
|
861
|
+
/**
|
|
862
|
+
* Enumeration of supported LLM inference providers.
|
|
863
|
+
*
|
|
864
|
+
* Defines unique identifiers for each LLM provider supported by the library.
|
|
865
|
+
* Used internally for dependency injection and provider resolution.
|
|
866
|
+
*
|
|
867
|
+
* @example
|
|
868
|
+
* ```typescript
|
|
869
|
+
* import { InferenceName } from '@backtest-kit/ollama';
|
|
870
|
+
*
|
|
871
|
+
* const providerName = InferenceName.GPT5Inference;
|
|
872
|
+
* ```
|
|
873
|
+
*/
|
|
279
874
|
var InferenceName;
|
|
280
875
|
(function (InferenceName) {
|
|
876
|
+
/** Ollama provider for local/cloud LLM inference */
|
|
281
877
|
InferenceName["OllamaInference"] = "ollama_inference";
|
|
878
|
+
/** Grok provider by X.AI (api.x.ai) */
|
|
282
879
|
InferenceName["GrokInference"] = "grok_inference";
|
|
880
|
+
/** Hugging Face Inference API provider */
|
|
283
881
|
InferenceName["HfInference"] = "hf_inference";
|
|
882
|
+
/** Claude provider by Anthropic (api.anthropic.com) */
|
|
284
883
|
InferenceName["ClaudeInference"] = "claude_inference";
|
|
884
|
+
/** OpenAI GPT provider (api.openai.com) */
|
|
285
885
|
InferenceName["GPT5Inference"] = "gpt5_inference";
|
|
886
|
+
/** Z.ai GPT Provider (api.z.ai/api/paas/v4) */
|
|
887
|
+
InferenceName["GLM4Inference"] = "glm4_inference";
|
|
888
|
+
/** DeepSeek provider (api.deepseek.com) */
|
|
286
889
|
InferenceName["DeepseekInference"] = "deepseek_inference";
|
|
890
|
+
/** Mistral AI provider (api.mistral.ai) */
|
|
287
891
|
InferenceName["MistralInference"] = "mistral_inference";
|
|
892
|
+
/** Perplexity AI provider (api.perplexity.ai) */
|
|
288
893
|
InferenceName["PerplexityInference"] = "perplexity_inference";
|
|
894
|
+
/** Cohere provider (api.cohere.ai) */
|
|
289
895
|
InferenceName["CohereInference"] = "cohere_inference";
|
|
896
|
+
/** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
|
|
290
897
|
InferenceName["AlibabaInference"] = "alibaba_inference";
|
|
291
898
|
})(InferenceName || (InferenceName = {}));
|
|
292
899
|
var InferenceName$1 = InferenceName;
|
|
293
900
|
|
|
901
|
+
/**
|
|
902
|
+
* Creates and caches an OpenAI-compatible client for Grok (xAI) API.
|
|
903
|
+
*
|
|
904
|
+
* Uses OpenAI SDK with Grok's API endpoint.
|
|
905
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
906
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
907
|
+
*
|
|
908
|
+
* Key features:
|
|
909
|
+
* - OpenAI SDK compatibility layer
|
|
910
|
+
* - Single API key support only
|
|
911
|
+
* - Instance caching with singleshot
|
|
912
|
+
* - Automatic cache clearing on error
|
|
913
|
+
*
|
|
914
|
+
* @returns OpenAI client configured for Grok API
|
|
915
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
916
|
+
*
|
|
917
|
+
* @example
|
|
918
|
+
* ```typescript
|
|
919
|
+
* import { getGrok } from "./config/grok";
|
|
920
|
+
*
|
|
921
|
+
* const client = getGrok();
|
|
922
|
+
* const completion = await client.chat.completions.create({
|
|
923
|
+
* model: "grok-beta",
|
|
924
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
925
|
+
* });
|
|
926
|
+
* ```
|
|
927
|
+
*/
|
|
294
928
|
const getGrok = functoolsKit.singleshot(() => {
|
|
295
929
|
const apiKey = lib.contextService.context.apiKey;
|
|
296
930
|
if (Array.isArray(apiKey)) {
|
|
@@ -303,8 +937,54 @@ const getGrok = functoolsKit.singleshot(() => {
|
|
|
303
937
|
});
|
|
304
938
|
});
|
|
305
939
|
|
|
306
|
-
|
|
940
|
+
/**
|
|
941
|
+
* Global configuration parameters for the Ollama package.
|
|
942
|
+
*
|
|
943
|
+
* Provides runtime configuration via environment variables with sensible defaults.
|
|
944
|
+
* All configuration values are immutable once initialized.
|
|
945
|
+
*
|
|
946
|
+
* Available configurations:
|
|
947
|
+
* - CC_ENABLE_DEBUG: Enable detailed debug logging
|
|
948
|
+
* - CC_ENABLE_THINKING: Enable AI extended reasoning mode
|
|
949
|
+
*
|
|
950
|
+
* @example
|
|
951
|
+
* ```typescript
|
|
952
|
+
* import { GLOBAL_CONFIG } from "./config/params";
|
|
953
|
+
*
|
|
954
|
+
* if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
955
|
+
* console.log("Debug mode enabled");
|
|
956
|
+
* }
|
|
957
|
+
*
|
|
958
|
+
* if (GLOBAL_CONFIG.CC_ENABLE_THINKING) {
|
|
959
|
+
* // AI will provide reasoning before responses
|
|
960
|
+
* }
|
|
961
|
+
* ```
|
|
962
|
+
*/
|
|
963
|
+
/**
|
|
964
|
+
* Mutable global configuration object.
|
|
965
|
+
* Values are read from environment variables at initialization.
|
|
966
|
+
*/
|
|
967
|
+
const GLOBAL_CONFIG = {
|
|
968
|
+
/**
|
|
969
|
+
* Enable debug mode for detailed logging.
|
|
970
|
+
* When enabled, additional debug information will be logged.
|
|
971
|
+
* Can be set via CC_ENABLE_DEBUG environment variable.
|
|
972
|
+
* Default: false
|
|
973
|
+
*/
|
|
974
|
+
CC_ENABLE_DEBUG: "CC_ENABLE_DEBUG" in process.env ? !!parseInt(process.env.CC_ENABLE_DEBUG) : false,
|
|
975
|
+
/**
|
|
976
|
+
* Enable thinking mode for AI responses.
|
|
977
|
+
* When enabled, the AI will provide extended reasoning before answering.
|
|
978
|
+
* Can be set via CC_ENABLE_THINKING environment variable.
|
|
979
|
+
* Default: false
|
|
980
|
+
*/
|
|
981
|
+
CC_ENABLE_THINKING: "CC_ENABLE_THINKING" in process.env ? !!parseInt(process.env.CC_ENABLE_THINKING) : false,
|
|
982
|
+
};
|
|
307
983
|
|
|
984
|
+
/**
|
|
985
|
+
* Custom ChatXAI implementation with simplified token counting.
|
|
986
|
+
* Estimates tokens as content.length / 4 for compatibility.
|
|
987
|
+
*/
|
|
308
988
|
class CustomChat extends xai.ChatXAI {
|
|
309
989
|
async getNumTokens(content) {
|
|
310
990
|
if (typeof content !== "string") {
|
|
@@ -313,16 +993,54 @@ class CustomChat extends xai.ChatXAI {
|
|
|
313
993
|
return Math.ceil(content.length / 4);
|
|
314
994
|
}
|
|
315
995
|
}
|
|
996
|
+
/**
|
|
997
|
+
* Creates configured ChatXAI instance for Grok streaming.
|
|
998
|
+
*/
|
|
316
999
|
const getChat$1 = (model, apiKey) => new CustomChat({
|
|
317
1000
|
apiKey,
|
|
318
1001
|
model,
|
|
319
1002
|
streaming: true,
|
|
320
1003
|
});
|
|
321
|
-
|
|
1004
|
+
/**
|
|
1005
|
+
* Provider for xAI Grok models via LangChain ChatXAI.
|
|
1006
|
+
*
|
|
1007
|
+
* Uses LangChain's ChatXAI integration for xAI Grok models.
|
|
1008
|
+
* Provides true token-by-token streaming via LangChain callbacks and OpenAI SDK for standard requests.
|
|
1009
|
+
*
|
|
1010
|
+
* Key features:
|
|
1011
|
+
* - LangChain ChatXAI for true streaming
|
|
1012
|
+
* - OpenAI SDK via getGrok() for standard completion
|
|
1013
|
+
* - Direct xAI API access for outline completion
|
|
1014
|
+
* - Tool calling via bindTools (streaming) or tools parameter (standard)
|
|
1015
|
+
* - Real-time token emission via stream callbacks
|
|
1016
|
+
* - No token rotation support (single API key only)
|
|
1017
|
+
*
|
|
1018
|
+
* @example
|
|
1019
|
+
* ```typescript
|
|
1020
|
+
* const provider = new GrokProvider(contextService, logger);
|
|
1021
|
+
* const response = await provider.getStreamCompletion({
|
|
1022
|
+
* agentName: "grok",
|
|
1023
|
+
* messages: [{ role: "user", content: "Latest AI news?" }],
|
|
1024
|
+
* mode: "direct",
|
|
1025
|
+
* tools: [searchTool],
|
|
1026
|
+
* clientId: "client-888"
|
|
1027
|
+
* });
|
|
1028
|
+
* ```
|
|
1029
|
+
*/
|
|
1030
|
+
class GrokProvider {
|
|
1031
|
+
/**
|
|
1032
|
+
* Creates a new GrokProvider instance.
|
|
1033
|
+
*/
|
|
322
1034
|
constructor(contextService, logger) {
|
|
323
1035
|
this.contextService = contextService;
|
|
324
1036
|
this.logger = logger;
|
|
325
1037
|
}
|
|
1038
|
+
/**
|
|
1039
|
+
* Performs standard completion request via OpenAI SDK.
|
|
1040
|
+
*
|
|
1041
|
+
* @param params - Completion parameters
|
|
1042
|
+
* @returns Promise resolving to assistant's response
|
|
1043
|
+
*/
|
|
326
1044
|
async getCompletion(params) {
|
|
327
1045
|
const grok = getGrok();
|
|
328
1046
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -366,11 +1084,19 @@ let GrokProvider$1 = class GrokProvider {
|
|
|
366
1084
|
})),
|
|
367
1085
|
};
|
|
368
1086
|
// Debug logging
|
|
369
|
-
if (CC_ENABLE_DEBUG) {
|
|
1087
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
370
1088
|
await fs.appendFile("./debug_grok_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
371
1089
|
}
|
|
372
1090
|
return result;
|
|
373
1091
|
}
|
|
1092
|
+
/**
|
|
1093
|
+
* Performs true streaming completion via LangChain ChatXAI.
|
|
1094
|
+
* Emits tokens in real-time as they are generated.
|
|
1095
|
+
*
|
|
1096
|
+
* @param params - Completion parameters
|
|
1097
|
+
* @returns Promise resolving to complete response after streaming
|
|
1098
|
+
* @throws Error if token rotation attempted
|
|
1099
|
+
*/
|
|
374
1100
|
async getStreamCompletion(params) {
|
|
375
1101
|
if (Array.isArray(this.contextService.context.apiKey)) {
|
|
376
1102
|
throw new Error("Grok provider does not support token rotation");
|
|
@@ -465,11 +1191,19 @@ let GrokProvider$1 = class GrokProvider {
|
|
|
465
1191
|
tool_calls: formattedToolCalls,
|
|
466
1192
|
};
|
|
467
1193
|
// Debug logging
|
|
468
|
-
if (CC_ENABLE_DEBUG) {
|
|
1194
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
469
1195
|
await fs.appendFile("./debug_grok_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
470
1196
|
}
|
|
471
1197
|
return result;
|
|
472
1198
|
}
|
|
1199
|
+
/**
|
|
1200
|
+
* Performs structured output completion via direct xAI API.
|
|
1201
|
+
* Uses response_format parameter for schema enforcement.
|
|
1202
|
+
*
|
|
1203
|
+
* @param params - Outline completion parameters
|
|
1204
|
+
* @returns Promise resolving to validated JSON string
|
|
1205
|
+
* @throws Error if model returns refusal or token rotation attempted
|
|
1206
|
+
*/
|
|
473
1207
|
async getOutlineCompletion(params) {
|
|
474
1208
|
const { messages: rawMessages, format } = params;
|
|
475
1209
|
this.logger.log("grokProvider getOutlineCompletion", {
|
|
@@ -512,14 +1246,21 @@ let GrokProvider$1 = class GrokProvider {
|
|
|
512
1246
|
content: json,
|
|
513
1247
|
};
|
|
514
1248
|
// Debug logging
|
|
515
|
-
if (CC_ENABLE_DEBUG) {
|
|
1249
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
516
1250
|
await fs.appendFile("./debug_grok_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
517
1251
|
}
|
|
518
1252
|
return result;
|
|
519
1253
|
}
|
|
520
|
-
}
|
|
1254
|
+
}
|
|
521
1255
|
|
|
1256
|
+
/**
|
|
1257
|
+
* Maximum number of retry attempts for outline completion.
|
|
1258
|
+
*/
|
|
522
1259
|
const MAX_ATTEMPTS$5 = 5;
|
|
1260
|
+
/**
|
|
1261
|
+
* Custom ChatOpenAI implementation for HuggingFace with simplified token counting.
|
|
1262
|
+
* Routes requests to HuggingFace Router endpoint.
|
|
1263
|
+
*/
|
|
523
1264
|
class HuggingFaceChat extends openai.ChatOpenAI {
|
|
524
1265
|
async getNumTokens(content) {
|
|
525
1266
|
if (typeof content !== "string") {
|
|
@@ -528,6 +1269,9 @@ class HuggingFaceChat extends openai.ChatOpenAI {
|
|
|
528
1269
|
return Math.ceil(content.length / 4);
|
|
529
1270
|
}
|
|
530
1271
|
}
|
|
1272
|
+
/**
|
|
1273
|
+
* Creates configured HuggingFaceChat instance for streaming.
|
|
1274
|
+
*/
|
|
531
1275
|
const getChat = (model, apiKey) => new HuggingFaceChat({
|
|
532
1276
|
configuration: {
|
|
533
1277
|
baseURL: "https://router.huggingface.co/v1",
|
|
@@ -536,12 +1280,52 @@ const getChat = (model, apiKey) => new HuggingFaceChat({
|
|
|
536
1280
|
model,
|
|
537
1281
|
streaming: true,
|
|
538
1282
|
});
|
|
1283
|
+
/**
|
|
1284
|
+
* Creates HuggingFace InferenceClient for standard completion.
|
|
1285
|
+
*/
|
|
539
1286
|
const getInference = (apiKey) => new inference.InferenceClient(apiKey);
|
|
1287
|
+
/**
|
|
1288
|
+
* Provider for HuggingFace models via HuggingFace Router API.
|
|
1289
|
+
*
|
|
1290
|
+
* Implements HuggingFace API access using both InferenceClient (standard completion)
|
|
1291
|
+
* and LangChain ChatOpenAI (streaming). Supports thinking mode via reasoning_content.
|
|
1292
|
+
* Does NOT support token rotation (single API key only).
|
|
1293
|
+
*
|
|
1294
|
+
* Key features:
|
|
1295
|
+
* - HuggingFace InferenceClient for standard completion
|
|
1296
|
+
* - LangChain ChatOpenAI for true streaming
|
|
1297
|
+
* - Tool calling support with proper message conversion
|
|
1298
|
+
* - Reasoning/thinking content capture (_thinking field)
|
|
1299
|
+
* - Direct API access for outline completion
|
|
1300
|
+
* - No token rotation support
|
|
1301
|
+
*
|
|
1302
|
+
* @example
|
|
1303
|
+
* ```typescript
|
|
1304
|
+
* const provider = new HfProvider(contextService, logger);
|
|
1305
|
+
* const response = await provider.getStreamCompletion({
|
|
1306
|
+
* agentName: "hf-assistant",
|
|
1307
|
+
* messages: [{ role: "user", content: "Explain attention mechanism" }],
|
|
1308
|
+
* mode: "direct",
|
|
1309
|
+
* tools: [codeTool],
|
|
1310
|
+
* clientId: "client-777"
|
|
1311
|
+
* });
|
|
1312
|
+
* ```
|
|
1313
|
+
*/
|
|
540
1314
|
class HfProvider {
|
|
1315
|
+
/**
|
|
1316
|
+
* Creates a new HfProvider instance.
|
|
1317
|
+
*/
|
|
541
1318
|
constructor(contextService, logger) {
|
|
542
1319
|
this.contextService = contextService;
|
|
543
1320
|
this.logger = logger;
|
|
544
1321
|
}
|
|
1322
|
+
/**
|
|
1323
|
+
* Performs standard completion using HuggingFace InferenceClient.
|
|
1324
|
+
*
|
|
1325
|
+
* @param params - Completion parameters
|
|
1326
|
+
* @returns Promise resolving to assistant's response
|
|
1327
|
+
* @throws Error if token rotation attempted
|
|
1328
|
+
*/
|
|
545
1329
|
async getCompletion(params) {
|
|
546
1330
|
if (Array.isArray(this.contextService.context.apiKey)) {
|
|
547
1331
|
throw new Error("Hf provider does not support token rotation");
|
|
@@ -616,7 +1400,7 @@ class HfProvider {
|
|
|
616
1400
|
})),
|
|
617
1401
|
};
|
|
618
1402
|
// Debug logging
|
|
619
|
-
if (CC_ENABLE_DEBUG) {
|
|
1403
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
620
1404
|
await fs.appendFile("./debug_hf_provider.txt", JSON.stringify({
|
|
621
1405
|
params,
|
|
622
1406
|
answer: result,
|
|
@@ -624,6 +1408,14 @@ class HfProvider {
|
|
|
624
1408
|
}
|
|
625
1409
|
return result;
|
|
626
1410
|
}
|
|
1411
|
+
/**
|
|
1412
|
+
* Performs true streaming completion using LangChain ChatOpenAI.
|
|
1413
|
+
* Emits tokens in real-time via callbacks.
|
|
1414
|
+
*
|
|
1415
|
+
* @param params - Completion parameters
|
|
1416
|
+
* @returns Promise resolving to complete response after streaming
|
|
1417
|
+
* @throws Error if token rotation attempted
|
|
1418
|
+
*/
|
|
627
1419
|
async getStreamCompletion(params) {
|
|
628
1420
|
if (Array.isArray(this.contextService.context.apiKey)) {
|
|
629
1421
|
throw new Error("Hf provider does not support token rotation");
|
|
@@ -711,7 +1503,7 @@ class HfProvider {
|
|
|
711
1503
|
})),
|
|
712
1504
|
};
|
|
713
1505
|
// Debug logging
|
|
714
|
-
if (CC_ENABLE_DEBUG) {
|
|
1506
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
715
1507
|
await fs.appendFile("./debug_hf_provider_stream.txt", JSON.stringify({
|
|
716
1508
|
params,
|
|
717
1509
|
answer: result,
|
|
@@ -719,6 +1511,14 @@ class HfProvider {
|
|
|
719
1511
|
}
|
|
720
1512
|
return result;
|
|
721
1513
|
}
|
|
1514
|
+
/**
|
|
1515
|
+
* Performs structured output completion using tool calling with extended retry logic.
|
|
1516
|
+
* Captures reasoning_content as _thinking field in response.
|
|
1517
|
+
*
|
|
1518
|
+
* @param params - Outline completion parameters
|
|
1519
|
+
* @returns Promise resolving to validated JSON string with thinking
|
|
1520
|
+
* @throws Error if model fails after 5 attempts or token rotation attempted
|
|
1521
|
+
*/
|
|
722
1522
|
async getOutlineCompletion(params) {
|
|
723
1523
|
const { messages: rawMessages, format } = params;
|
|
724
1524
|
this.logger.log("hfProvider getOutlineCompletion", {
|
|
@@ -824,7 +1624,7 @@ class HfProvider {
|
|
|
824
1624
|
content: JSON.stringify(validation.data),
|
|
825
1625
|
};
|
|
826
1626
|
// Debug logging
|
|
827
|
-
if (CC_ENABLE_DEBUG) {
|
|
1627
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
828
1628
|
await fs.appendFile("./debug_hf_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
829
1629
|
}
|
|
830
1630
|
return result;
|
|
@@ -837,9 +1637,25 @@ class HfProvider {
|
|
|
837
1637
|
}
|
|
838
1638
|
}
|
|
839
1639
|
|
|
1640
|
+
/**
|
|
1641
|
+
* Wrapper class for Ollama client with token rotation support.
|
|
1642
|
+
*
|
|
1643
|
+
* Implements round-robin API key rotation for high-volume Ollama usage.
|
|
1644
|
+
* Each request automatically rotates through the provided API keys to
|
|
1645
|
+
* distribute load and avoid rate limiting.
|
|
1646
|
+
*
|
|
1647
|
+
* Key features:
|
|
1648
|
+
* - Round-robin token rotation using RoundRobin from agent-swarm-kit
|
|
1649
|
+
* - Streaming and non-streaming support
|
|
1650
|
+
* - Type-safe method overloads
|
|
1651
|
+
* - Automatic Ollama client creation per token
|
|
1652
|
+
*
|
|
1653
|
+
* @throws Error if no API keys are provided in context
|
|
1654
|
+
*/
|
|
840
1655
|
class OllamaWrapper {
|
|
841
1656
|
constructor(_config) {
|
|
842
1657
|
this._config = _config;
|
|
1658
|
+
/** Round-robin chat function factory */
|
|
843
1659
|
this._chatFn = agentSwarmKit.RoundRobin.create(lib.contextService.context.apiKey, (token) => {
|
|
844
1660
|
const ollama = new ollama$1.Ollama({
|
|
845
1661
|
...this._config,
|
|
@@ -860,14 +1676,86 @@ class OllamaWrapper {
|
|
|
860
1676
|
throw new Error("OllamaRotate required apiKey[] to process token rotation");
|
|
861
1677
|
}
|
|
862
1678
|
}
|
|
1679
|
+
/**
|
|
1680
|
+
* Executes a chat request with automatic token rotation.
|
|
1681
|
+
*
|
|
1682
|
+
* @param request - Chat request configuration
|
|
1683
|
+
* @returns Chat response or async iterable (for streaming)
|
|
1684
|
+
*/
|
|
863
1685
|
async chat(request) {
|
|
864
1686
|
return await this._chatFn(request);
|
|
865
1687
|
}
|
|
866
1688
|
}
|
|
1689
|
+
/**
|
|
1690
|
+
* Creates and caches an Ollama wrapper with token rotation enabled.
|
|
1691
|
+
*
|
|
1692
|
+
* Requires an array of API keys in the execution context.
|
|
1693
|
+
* The wrapper automatically rotates through keys using round-robin strategy.
|
|
1694
|
+
*
|
|
1695
|
+
* @returns OllamaWrapper instance with token rotation
|
|
1696
|
+
*
|
|
1697
|
+
* @example
|
|
1698
|
+
* ```typescript
|
|
1699
|
+
* import { getOllamaRotate } from "./config/ollama.rotate";
|
|
1700
|
+
*
|
|
1701
|
+
* // Context must have array of API keys
|
|
1702
|
+
* const client = getOllamaRotate();
|
|
1703
|
+
* const response = await client.chat({
|
|
1704
|
+
* model: "llama2",
|
|
1705
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
1706
|
+
* });
|
|
1707
|
+
* // Next request will use a different API key
|
|
1708
|
+
* ```
|
|
1709
|
+
*/
|
|
867
1710
|
const getOllamaRotate = functoolsKit.singleshot(() => new OllamaWrapper({
|
|
868
1711
|
host: "https://ollama.com",
|
|
869
1712
|
}));
|
|
870
1713
|
|
|
1714
|
+
/**
|
|
1715
|
+
* Creates and caches an Ollama client with flexible configuration.
|
|
1716
|
+
*
|
|
1717
|
+
* Supports three modes of operation:
|
|
1718
|
+
* 1. Token rotation mode: Array of API keys enables automatic rotation
|
|
1719
|
+
* 2. Cloud mode: Single API key connects to ollama.com
|
|
1720
|
+
* 3. Local mode: No API key connects to local Ollama instance
|
|
1721
|
+
*
|
|
1722
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
1723
|
+
* Automatically selects the appropriate client based on API key configuration.
|
|
1724
|
+
*
|
|
1725
|
+
* Key features:
|
|
1726
|
+
* - Token rotation support for high-volume usage
|
|
1727
|
+
* - Cloud and local Ollama support
|
|
1728
|
+
* - Instance caching with singleshot
|
|
1729
|
+
* - Automatic mode detection
|
|
1730
|
+
*
|
|
1731
|
+
* @returns Ollama client or OllamaWrapper (for token rotation)
|
|
1732
|
+
*
|
|
1733
|
+
* @example
|
|
1734
|
+
* ```typescript
|
|
1735
|
+
* import { getOllama } from "./config/ollama";
|
|
1736
|
+
*
|
|
1737
|
+
* // Local mode (no API key)
|
|
1738
|
+
* const localClient = getOllama();
|
|
1739
|
+
* const response = await localClient.chat({
|
|
1740
|
+
* model: "llama2",
|
|
1741
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
1742
|
+
* });
|
|
1743
|
+
*
|
|
1744
|
+
* // Cloud mode (single API key)
|
|
1745
|
+
* const cloudClient = getOllama();
|
|
1746
|
+
* const response = await cloudClient.chat({
|
|
1747
|
+
* model: "llama2",
|
|
1748
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
1749
|
+
* });
|
|
1750
|
+
*
|
|
1751
|
+
* // Token rotation mode (array of API keys)
|
|
1752
|
+
* const rotateClient = getOllama();
|
|
1753
|
+
* const response = await rotateClient.chat({
|
|
1754
|
+
* model: "llama2",
|
|
1755
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
1756
|
+
* });
|
|
1757
|
+
* ```
|
|
1758
|
+
*/
|
|
871
1759
|
const getOllama = functoolsKit.singleshot(() => {
|
|
872
1760
|
const apiKey = lib.contextService.context.apiKey;
|
|
873
1761
|
if (Array.isArray(apiKey)) {
|
|
@@ -884,12 +1772,98 @@ const getOllama = functoolsKit.singleshot(() => {
|
|
|
884
1772
|
});
|
|
885
1773
|
});
|
|
886
1774
|
|
|
1775
|
+
/**
|
|
1776
|
+
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
1777
|
+
*/
|
|
887
1778
|
const MAX_ATTEMPTS$4 = 3;
|
|
1779
|
+
/**
|
|
1780
|
+
* Provider for Ollama LLM completions.
|
|
1781
|
+
*
|
|
1782
|
+
* Supports local and remote Ollama models with full tool calling capabilities.
|
|
1783
|
+
* Provides both standard and streaming completion modes, as well as structured
|
|
1784
|
+
* output through the outline completion method.
|
|
1785
|
+
*
|
|
1786
|
+
* Key features:
|
|
1787
|
+
* - Native Ollama protocol support
|
|
1788
|
+
* - Real-time streaming with token-by-token delivery
|
|
1789
|
+
* - Tool calling with automatic retry logic
|
|
1790
|
+
* - JSON schema validation for structured outputs
|
|
1791
|
+
* - Optional thinking mode support (via CC_ENABLE_THINKING)
|
|
1792
|
+
* - Debug logging when CC_ENABLE_DEBUG is enabled
|
|
1793
|
+
*
|
|
1794
|
+
* @example
|
|
1795
|
+
* ```typescript
|
|
1796
|
+
* const provider = new OllamaProvider(contextService, logger);
|
|
1797
|
+
*
|
|
1798
|
+
* // Standard completion
|
|
1799
|
+
* const response = await provider.getCompletion({
|
|
1800
|
+
* agentName: "assistant",
|
|
1801
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
1802
|
+
* mode: "direct",
|
|
1803
|
+
* tools: [],
|
|
1804
|
+
* clientId: "client-123"
|
|
1805
|
+
* });
|
|
1806
|
+
*
|
|
1807
|
+
* // Streaming completion
|
|
1808
|
+
* const streamResponse = await provider.getStreamCompletion({
|
|
1809
|
+
* agentName: "assistant",
|
|
1810
|
+
* messages: [{ role: "user", content: "Explain AI" }],
|
|
1811
|
+
* mode: "direct",
|
|
1812
|
+
* tools: [],
|
|
1813
|
+
* clientId: "client-123"
|
|
1814
|
+
* });
|
|
1815
|
+
*
|
|
1816
|
+
* // Structured output with schema enforcement
|
|
1817
|
+
* const outlineResponse = await provider.getOutlineCompletion({
|
|
1818
|
+
* messages: [{ role: "user", content: "Analyze sentiment" }],
|
|
1819
|
+
* format: {
|
|
1820
|
+
* type: "object",
|
|
1821
|
+
* properties: {
|
|
1822
|
+
* sentiment: { type: "string" },
|
|
1823
|
+
* confidence: { type: "number" }
|
|
1824
|
+
* }
|
|
1825
|
+
* }
|
|
1826
|
+
* });
|
|
1827
|
+
* ```
|
|
1828
|
+
*/
|
|
888
1829
|
class OllamaProvider {
|
|
1830
|
+
/**
|
|
1831
|
+
* Creates a new OllamaProvider instance.
|
|
1832
|
+
*
|
|
1833
|
+
* @param contextService - Context service managing model configuration and API settings
|
|
1834
|
+
* @param logger - Logger instance for tracking provider operations
|
|
1835
|
+
*/
|
|
889
1836
|
constructor(contextService, logger) {
|
|
890
1837
|
this.contextService = contextService;
|
|
891
1838
|
this.logger = logger;
|
|
892
1839
|
}
|
|
1840
|
+
/**
|
|
1841
|
+
* Performs a standard (non-streaming) completion request to Ollama.
|
|
1842
|
+
*
|
|
1843
|
+
* Sends messages and tools to the Ollama model and returns the complete response.
|
|
1844
|
+
* Supports tool calling with automatic ID generation for tool calls.
|
|
1845
|
+
*
|
|
1846
|
+
* @param params - Completion parameters including messages, tools, and agent configuration
|
|
1847
|
+
* @param params.agentName - Name of the agent making the request
|
|
1848
|
+
* @param params.messages - Conversation history with roles and content
|
|
1849
|
+
* @param params.mode - Completion mode (e.g., "direct", "delegated")
|
|
1850
|
+
* @param params.tools - Available tools for the model to call
|
|
1851
|
+
* @param params.clientId - Client identifier for tracking requests
|
|
1852
|
+
* @returns Promise resolving to the assistant's response message with optional tool calls
|
|
1853
|
+
*
|
|
1854
|
+
* @example
|
|
1855
|
+
* ```typescript
|
|
1856
|
+
* const response = await provider.getCompletion({
|
|
1857
|
+
* agentName: "assistant",
|
|
1858
|
+
* messages: [
|
|
1859
|
+
* { role: "user", content: "What's the weather in Tokyo?" }
|
|
1860
|
+
* ],
|
|
1861
|
+
* mode: "direct",
|
|
1862
|
+
* tools: [weatherTool],
|
|
1863
|
+
* clientId: "client-123"
|
|
1864
|
+
* });
|
|
1865
|
+
* ```
|
|
1866
|
+
*/
|
|
893
1867
|
async getCompletion(params) {
|
|
894
1868
|
const { agentName, messages: rawMessages, mode, tools, clientId } = params;
|
|
895
1869
|
const ollama = getOllama();
|
|
@@ -910,6 +1884,7 @@ class OllamaProvider {
|
|
|
910
1884
|
})),
|
|
911
1885
|
})),
|
|
912
1886
|
tools,
|
|
1887
|
+
think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
|
|
913
1888
|
});
|
|
914
1889
|
const message = response.message;
|
|
915
1890
|
const result = {
|
|
@@ -924,11 +1899,40 @@ class OllamaProvider {
|
|
|
924
1899
|
role: response.message.role,
|
|
925
1900
|
};
|
|
926
1901
|
// Debug logging
|
|
927
|
-
if (CC_ENABLE_DEBUG) {
|
|
1902
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
928
1903
|
await fs.appendFile("./debug_ollama_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
929
1904
|
}
|
|
930
1905
|
return result;
|
|
931
1906
|
}
|
|
1907
|
+
/**
|
|
1908
|
+
* Performs a streaming completion request to Ollama.
|
|
1909
|
+
*
|
|
1910
|
+
* Sends messages and tools to the Ollama model and streams the response token by token.
|
|
1911
|
+
* Emits "llm-new-token" events for each token and "llm-completion" when finished.
|
|
1912
|
+
* Accumulates tool calls and content chunks from the stream.
|
|
1913
|
+
*
|
|
1914
|
+
* @param params - Completion parameters including messages, tools, and agent configuration
|
|
1915
|
+
* @param params.agentName - Name of the agent making the request
|
|
1916
|
+
* @param params.messages - Conversation history with roles and content
|
|
1917
|
+
* @param params.mode - Completion mode (e.g., "direct", "delegated")
|
|
1918
|
+
* @param params.tools - Available tools for the model to call
|
|
1919
|
+
* @param params.clientId - Client identifier for event emission
|
|
1920
|
+
* @returns Promise resolving to the complete assistant's response after streaming finishes
|
|
1921
|
+
*
|
|
1922
|
+
* @example
|
|
1923
|
+
* ```typescript
|
|
1924
|
+
* const response = await provider.getStreamCompletion({
|
|
1925
|
+
* agentName: "assistant",
|
|
1926
|
+
* messages: [
|
|
1927
|
+
* { role: "user", content: "Explain quantum computing" }
|
|
1928
|
+
* ],
|
|
1929
|
+
* mode: "direct",
|
|
1930
|
+
* tools: [],
|
|
1931
|
+
* clientId: "client-123"
|
|
1932
|
+
* });
|
|
1933
|
+
* // Client receives "llm-new-token" events during generation
|
|
1934
|
+
* ```
|
|
1935
|
+
*/
|
|
932
1936
|
async getStreamCompletion(params) {
|
|
933
1937
|
const { agentName, messages: rawMessages, mode, tools, clientId } = params;
|
|
934
1938
|
const ollama = getOllama();
|
|
@@ -953,6 +1957,7 @@ class OllamaProvider {
|
|
|
953
1957
|
messages,
|
|
954
1958
|
tools,
|
|
955
1959
|
stream: true,
|
|
1960
|
+
think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
|
|
956
1961
|
});
|
|
957
1962
|
for await (const chunk of stream) {
|
|
958
1963
|
if (chunk.message.tool_calls) {
|
|
@@ -984,11 +1989,45 @@ class OllamaProvider {
|
|
|
984
1989
|
})),
|
|
985
1990
|
};
|
|
986
1991
|
// Debug logging
|
|
987
|
-
if (CC_ENABLE_DEBUG) {
|
|
1992
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
988
1993
|
await fs.appendFile("./debug_ollama_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
989
1994
|
}
|
|
990
1995
|
return result;
|
|
991
1996
|
}
|
|
1997
|
+
/**
|
|
1998
|
+
* Performs structured output completion using JSON schema enforcement via tool calling.
|
|
1999
|
+
*
|
|
2000
|
+
* Forces the model to use a specific tool ("provide_answer") to ensure response
|
|
2001
|
+
* conforms to the provided JSON schema. Implements retry logic with up to MAX_ATTEMPTS
|
|
2002
|
+
* attempts if the model fails to use the tool correctly or returns invalid JSON.
|
|
2003
|
+
*
|
|
2004
|
+
* Uses jsonrepair to fix malformed JSON and validates the output against the schema.
|
|
2005
|
+
* Adds context information to the returned data structure.
|
|
2006
|
+
*
|
|
2007
|
+
* @param params - Outline completion parameters
|
|
2008
|
+
* @param params.messages - Conversation history for context
|
|
2009
|
+
* @param params.format - JSON schema or response format definition
|
|
2010
|
+
* @returns Promise resolving to validated JSON string conforming to the schema
|
|
2011
|
+
* @throws Error if model fails to use tool after MAX_ATTEMPTS attempts
|
|
2012
|
+
*
|
|
2013
|
+
* @example
|
|
2014
|
+
* ```typescript
|
|
2015
|
+
* const response = await provider.getOutlineCompletion({
|
|
2016
|
+
* messages: [
|
|
2017
|
+
* { role: "user", content: "Analyze: 'Great product!'" }
|
|
2018
|
+
* ],
|
|
2019
|
+
* format: {
|
|
2020
|
+
* type: "object",
|
|
2021
|
+
* properties: {
|
|
2022
|
+
* sentiment: { type: "string", enum: ["positive", "negative", "neutral"] },
|
|
2023
|
+
* confidence: { type: "number", minimum: 0, maximum: 1 }
|
|
2024
|
+
* },
|
|
2025
|
+
* required: ["sentiment", "confidence"]
|
|
2026
|
+
* }
|
|
2027
|
+
* });
|
|
2028
|
+
* // response.content = '{"sentiment":"positive","confidence":0.95,"_context":{...}}'
|
|
2029
|
+
* ```
|
|
2030
|
+
*/
|
|
992
2031
|
async getOutlineCompletion(params) {
|
|
993
2032
|
const { messages: rawMessages, format } = params;
|
|
994
2033
|
const ollama = getOllama();
|
|
@@ -1031,6 +2070,7 @@ class OllamaProvider {
|
|
|
1031
2070
|
model: this.contextService.context.model,
|
|
1032
2071
|
messages,
|
|
1033
2072
|
tools: [toolDefinition],
|
|
2073
|
+
think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
|
|
1034
2074
|
});
|
|
1035
2075
|
const { tool_calls } = response.message;
|
|
1036
2076
|
if (!tool_calls?.length) {
|
|
@@ -1070,7 +2110,7 @@ class OllamaProvider {
|
|
|
1070
2110
|
content: JSON.stringify(validation.data),
|
|
1071
2111
|
};
|
|
1072
2112
|
// Debug logging
|
|
1073
|
-
if (CC_ENABLE_DEBUG) {
|
|
2113
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1074
2114
|
await fs.appendFile("./debug_ollama_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1075
2115
|
}
|
|
1076
2116
|
return result;
|
|
@@ -1083,6 +2123,33 @@ class OllamaProvider {
|
|
|
1083
2123
|
}
|
|
1084
2124
|
}
|
|
1085
2125
|
|
|
2126
|
+
/**
|
|
2127
|
+
* Creates and caches an OpenAI-compatible client for Claude (Anthropic) API.
|
|
2128
|
+
*
|
|
2129
|
+
* Uses OpenAI SDK with Claude's API endpoint for compatibility.
|
|
2130
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
2131
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
2132
|
+
*
|
|
2133
|
+
* Key features:
|
|
2134
|
+
* - OpenAI SDK compatibility layer
|
|
2135
|
+
* - Single API key support only
|
|
2136
|
+
* - Instance caching with singleshot
|
|
2137
|
+
* - Automatic cache clearing on error
|
|
2138
|
+
*
|
|
2139
|
+
* @returns OpenAI client configured for Claude API
|
|
2140
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
2141
|
+
*
|
|
2142
|
+
* @example
|
|
2143
|
+
* ```typescript
|
|
2144
|
+
* import { getClaude } from "./config/claude";
|
|
2145
|
+
*
|
|
2146
|
+
* const client = getClaude();
|
|
2147
|
+
* const completion = await client.chat.completions.create({
|
|
2148
|
+
* model: "claude-3-5-sonnet-20240620",
|
|
2149
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
2150
|
+
* });
|
|
2151
|
+
* ```
|
|
2152
|
+
*/
|
|
1086
2153
|
const getClaude = functoolsKit.singleshot(() => {
|
|
1087
2154
|
const apiKey = lib.contextService.context.apiKey;
|
|
1088
2155
|
if (Array.isArray(apiKey)) {
|
|
@@ -1095,12 +2162,72 @@ const getClaude = functoolsKit.singleshot(() => {
|
|
|
1095
2162
|
});
|
|
1096
2163
|
});
|
|
1097
2164
|
|
|
2165
|
+
/**
|
|
2166
|
+
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
2167
|
+
*/
|
|
1098
2168
|
const MAX_ATTEMPTS$3 = 5;
|
|
1099
|
-
|
|
2169
|
+
/**
|
|
2170
|
+
* Provider for Anthropic Claude models via OpenAI-compatible API.
|
|
2171
|
+
*
|
|
2172
|
+
* Note: This file exports ClaudeProvider class name but implements Claude functionality.
|
|
2173
|
+
* This appears to be a naming inconsistency that should be addressed.
|
|
2174
|
+
*
|
|
2175
|
+
* Implements Claude API access through OpenAI-compatible endpoint with full tool calling support.
|
|
2176
|
+
* Supports both standard and simulated streaming modes, as well as structured output
|
|
2177
|
+
* through tool-based schema enforcement.
|
|
2178
|
+
*
|
|
2179
|
+
* Key features:
|
|
2180
|
+
* - Claude API via OpenAI-compatible endpoint
|
|
2181
|
+
* - Tool calling with retry logic and validation
|
|
2182
|
+
* - Simulated streaming (returns complete response)
|
|
2183
|
+
* - JSON schema enforcement via tool calling
|
|
2184
|
+
* - Conditional tool parameter (only adds if tools present)
|
|
2185
|
+
* - Debug logging when CC_ENABLE_DEBUG is enabled
|
|
2186
|
+
*
|
|
2187
|
+
* @example
|
|
2188
|
+
* ```typescript
|
|
2189
|
+
* const provider = new ClaudeProvider(contextService, logger); // Note: Should be ClaudeProvider
|
|
2190
|
+
*
|
|
2191
|
+
* // Standard completion
|
|
2192
|
+
* const response = await provider.getCompletion({
|
|
2193
|
+
* agentName: "claude-assistant",
|
|
2194
|
+
* messages: [{ role: "user", content: "Explain neural networks" }],
|
|
2195
|
+
* mode: "direct",
|
|
2196
|
+
* tools: [searchTool],
|
|
2197
|
+
* clientId: "client-789"
|
|
2198
|
+
* });
|
|
2199
|
+
*
|
|
2200
|
+
* // Structured output with schema validation
|
|
2201
|
+
* const structured = await provider.getOutlineCompletion({
|
|
2202
|
+
* messages: [{ role: "user", content: "Classify: 'Best purchase ever!'" }],
|
|
2203
|
+
* format: {
|
|
2204
|
+
* type: "object",
|
|
2205
|
+
* properties: {
|
|
2206
|
+
* category: { type: "string" },
|
|
2207
|
+
* confidence: { type: "number" }
|
|
2208
|
+
* }
|
|
2209
|
+
* }
|
|
2210
|
+
* });
|
|
2211
|
+
* ```
|
|
2212
|
+
*/
|
|
2213
|
+
class ClaudeProvider {
|
|
2214
|
+
/**
|
|
2215
|
+
* Creates a new ClaudeProvider instance (implements Claude functionality).
|
|
2216
|
+
*
|
|
2217
|
+
* @param contextService - Context service managing model configuration and API key
|
|
2218
|
+
* @param logger - Logger instance for tracking provider operations
|
|
2219
|
+
*/
|
|
1100
2220
|
constructor(contextService, logger) {
|
|
1101
2221
|
this.contextService = contextService;
|
|
1102
2222
|
this.logger = logger;
|
|
1103
2223
|
}
|
|
2224
|
+
/**
|
|
2225
|
+
* Performs a standard completion request to Claude via OpenAI-compatible API.
|
|
2226
|
+
* Only adds tools parameter if tools array is non-empty.
|
|
2227
|
+
*
|
|
2228
|
+
* @param params - Completion parameters
|
|
2229
|
+
* @returns Promise resolving to assistant's response message
|
|
2230
|
+
*/
|
|
1104
2231
|
async getCompletion(params) {
|
|
1105
2232
|
const claude = getClaude();
|
|
1106
2233
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1149,11 +2276,17 @@ class GrokProvider {
|
|
|
1149
2276
|
})),
|
|
1150
2277
|
};
|
|
1151
2278
|
// Debug logging
|
|
1152
|
-
if (CC_ENABLE_DEBUG) {
|
|
2279
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1153
2280
|
await fs.appendFile("./debug_claude_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1154
2281
|
}
|
|
1155
2282
|
return result;
|
|
1156
2283
|
}
|
|
2284
|
+
/**
|
|
2285
|
+
* Performs simulated streaming completion (returns complete response, emits completion event).
|
|
2286
|
+
*
|
|
2287
|
+
* @param params - Completion parameters
|
|
2288
|
+
* @returns Promise resolving to complete assistant's response
|
|
2289
|
+
*/
|
|
1157
2290
|
async getStreamCompletion(params) {
|
|
1158
2291
|
const openai = getClaude();
|
|
1159
2292
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1210,11 +2343,19 @@ class GrokProvider {
|
|
|
1210
2343
|
})),
|
|
1211
2344
|
};
|
|
1212
2345
|
// Debug logging
|
|
1213
|
-
if (CC_ENABLE_DEBUG) {
|
|
2346
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1214
2347
|
await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1215
2348
|
}
|
|
1216
2349
|
return result;
|
|
1217
2350
|
}
|
|
2351
|
+
/**
|
|
2352
|
+
* Performs structured output completion using tool calling with retry logic.
|
|
2353
|
+
* Uses tool_choice to force model to use the provide_answer tool.
|
|
2354
|
+
*
|
|
2355
|
+
* @param params - Outline completion parameters
|
|
2356
|
+
* @returns Promise resolving to validated JSON string
|
|
2357
|
+
* @throws Error if model fails after MAX_ATTEMPTS attempts
|
|
2358
|
+
*/
|
|
1218
2359
|
async getOutlineCompletion(params) {
|
|
1219
2360
|
const { messages: rawMessages, format } = params;
|
|
1220
2361
|
const claude = getClaude();
|
|
@@ -1312,7 +2453,7 @@ class GrokProvider {
|
|
|
1312
2453
|
content: JSON.stringify(validation.data),
|
|
1313
2454
|
};
|
|
1314
2455
|
// Debug logging
|
|
1315
|
-
if (CC_ENABLE_DEBUG) {
|
|
2456
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1316
2457
|
await fs.appendFile("./debug_claude_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1317
2458
|
}
|
|
1318
2459
|
return result;
|
|
@@ -1325,6 +2466,33 @@ class GrokProvider {
|
|
|
1325
2466
|
}
|
|
1326
2467
|
}
|
|
1327
2468
|
|
|
2469
|
+
/**
|
|
2470
|
+
* Creates and caches an OpenAI client for OpenAI API.
|
|
2471
|
+
*
|
|
2472
|
+
* Uses the official OpenAI SDK with default settings.
|
|
2473
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
2474
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
2475
|
+
*
|
|
2476
|
+
* Key features:
|
|
2477
|
+
* - Official OpenAI SDK
|
|
2478
|
+
* - Single API key support only
|
|
2479
|
+
* - Instance caching with singleshot
|
|
2480
|
+
* - Automatic cache clearing on error
|
|
2481
|
+
*
|
|
2482
|
+
* @returns OpenAI client configured for OpenAI API
|
|
2483
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
2484
|
+
*
|
|
2485
|
+
* @example
|
|
2486
|
+
* ```typescript
|
|
2487
|
+
* import { getOpenAi } from "./config/openai";
|
|
2488
|
+
*
|
|
2489
|
+
* const client = getOpenAi();
|
|
2490
|
+
* const completion = await client.chat.completions.create({
|
|
2491
|
+
* model: "gpt-5o-mini",
|
|
2492
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
2493
|
+
* });
|
|
2494
|
+
* ```
|
|
2495
|
+
*/
|
|
1328
2496
|
const getOpenAi = functoolsKit.singleshot(() => {
|
|
1329
2497
|
const apiKey = lib.contextService.context.apiKey;
|
|
1330
2498
|
if (Array.isArray(apiKey)) {
|
|
@@ -1336,11 +2504,93 @@ const getOpenAi = functoolsKit.singleshot(() => {
|
|
|
1336
2504
|
});
|
|
1337
2505
|
});
|
|
1338
2506
|
|
|
2507
|
+
/**
|
|
2508
|
+
* Provider for OpenAI GPT models (GPT-4, GPT-4 Turbo, GPT-3.5, etc.).
|
|
2509
|
+
*
|
|
2510
|
+
* Implements the OpenAI Chat Completions API with full tool calling support.
|
|
2511
|
+
* Uses the official OpenAI SDK for reliable communication with OpenAI's API.
|
|
2512
|
+
* Supports both standard and simulated streaming modes.
|
|
2513
|
+
*
|
|
2514
|
+
* Key features:
|
|
2515
|
+
* - OpenAI Chat Completions API via official SDK
|
|
2516
|
+
* - Tool calling with automatic argument serialization
|
|
2517
|
+
* - Simulated streaming (returns complete response, emits completion event)
|
|
2518
|
+
* - JSON schema enforcement for structured outputs
|
|
2519
|
+
* - Debug logging when CC_ENABLE_DEBUG is enabled
|
|
2520
|
+
*
|
|
2521
|
+
* Note: This provider does not implement true token-by-token streaming.
|
|
2522
|
+
* The getStreamCompletion method returns the complete response and emits
|
|
2523
|
+
* a single completion event to maintain interface compatibility.
|
|
2524
|
+
*
|
|
2525
|
+
* @example
|
|
2526
|
+
* ```typescript
|
|
2527
|
+
* const provider = new GPT5Provider(contextService, logger);
|
|
2528
|
+
*
|
|
2529
|
+
* // Standard completion with GPT-4
|
|
2530
|
+
* const response = await provider.getCompletion({
|
|
2531
|
+
* agentName: "assistant",
|
|
2532
|
+
* messages: [{ role: "user", content: "Explain relativity" }],
|
|
2533
|
+
* mode: "direct",
|
|
2534
|
+
* tools: [],
|
|
2535
|
+
* clientId: "client-123"
|
|
2536
|
+
* });
|
|
2537
|
+
*
|
|
2538
|
+
* // Structured output with JSON schema
|
|
2539
|
+
* const analysis = await provider.getOutlineCompletion({
|
|
2540
|
+
* messages: [{ role: "user", content: "Analyze sentiment" }],
|
|
2541
|
+
* format: {
|
|
2542
|
+
* type: "json_schema",
|
|
2543
|
+
* json_schema: {
|
|
2544
|
+
* schema: {
|
|
2545
|
+
* type: "object",
|
|
2546
|
+
* properties: {
|
|
2547
|
+
* sentiment: { type: "string" },
|
|
2548
|
+
* score: { type: "number" }
|
|
2549
|
+
* }
|
|
2550
|
+
* }
|
|
2551
|
+
* }
|
|
2552
|
+
* }
|
|
2553
|
+
* });
|
|
2554
|
+
* ```
|
|
2555
|
+
*/
|
|
1339
2556
|
class GPT5Provider {
|
|
2557
|
+
/**
|
|
2558
|
+
* Creates a new GPT5Provider instance.
|
|
2559
|
+
*
|
|
2560
|
+
* @param contextService - Context service managing model configuration and API key
|
|
2561
|
+
* @param logger - Logger instance for tracking provider operations
|
|
2562
|
+
*/
|
|
1340
2563
|
constructor(contextService, logger) {
|
|
1341
2564
|
this.contextService = contextService;
|
|
1342
2565
|
this.logger = logger;
|
|
1343
2566
|
}
|
|
2567
|
+
/**
|
|
2568
|
+
* Performs a standard completion request to OpenAI.
|
|
2569
|
+
*
|
|
2570
|
+
* Sends messages and tools to the OpenAI API and returns the complete response.
|
|
2571
|
+
* Automatically serializes tool call arguments to JSON strings for API compatibility.
|
|
2572
|
+
*
|
|
2573
|
+
* @param params - Completion parameters including messages, tools, and agent configuration
|
|
2574
|
+
* @param params.agentName - Name of the agent making the request
|
|
2575
|
+
* @param params.messages - Conversation history with roles and content
|
|
2576
|
+
* @param params.mode - Completion mode (e.g., "direct", "delegated")
|
|
2577
|
+
* @param params.tools - Available tools for the model to call
|
|
2578
|
+
* @param params.clientId - Client identifier for tracking requests
|
|
2579
|
+
* @returns Promise resolving to the assistant's response message with optional tool calls
|
|
2580
|
+
*
|
|
2581
|
+
* @example
|
|
2582
|
+
* ```typescript
|
|
2583
|
+
* const response = await provider.getCompletion({
|
|
2584
|
+
* agentName: "gpt-assistant",
|
|
2585
|
+
* messages: [
|
|
2586
|
+
* { role: "user", content: "Calculate 15% tip on $85" }
|
|
2587
|
+
* ],
|
|
2588
|
+
* mode: "direct",
|
|
2589
|
+
* tools: [calculatorTool],
|
|
2590
|
+
* clientId: "client-456"
|
|
2591
|
+
* });
|
|
2592
|
+
* ```
|
|
2593
|
+
*/
|
|
1344
2594
|
async getCompletion(params) {
|
|
1345
2595
|
const openai = getOpenAi();
|
|
1346
2596
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1382,11 +2632,43 @@ class GPT5Provider {
|
|
|
1382
2632
|
})),
|
|
1383
2633
|
};
|
|
1384
2634
|
// Debug logging
|
|
1385
|
-
if (CC_ENABLE_DEBUG) {
|
|
2635
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1386
2636
|
await fs.appendFile("./debug_gpt5_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1387
2637
|
}
|
|
1388
2638
|
return result;
|
|
1389
2639
|
}
|
|
2640
|
+
/**
|
|
2641
|
+
* Performs a simulated streaming completion request to OpenAI.
|
|
2642
|
+
*
|
|
2643
|
+
* Note: This method does NOT implement true token-by-token streaming.
|
|
2644
|
+
* It performs a standard completion and emits a single "llm-completion"
|
|
2645
|
+
* event with the full response to maintain interface compatibility.
|
|
2646
|
+
*
|
|
2647
|
+
* For true streaming, the OpenAI SDK streaming API would need to be used
|
|
2648
|
+
* with "stream: true" parameter.
|
|
2649
|
+
*
|
|
2650
|
+
* @param params - Completion parameters including messages, tools, and agent configuration
|
|
2651
|
+
* @param params.agentName - Name of the agent making the request
|
|
2652
|
+
* @param params.messages - Conversation history with roles and content
|
|
2653
|
+
* @param params.mode - Completion mode (e.g., "direct", "delegated")
|
|
2654
|
+
* @param params.tools - Available tools for the model to call
|
|
2655
|
+
* @param params.clientId - Client identifier for event emission
|
|
2656
|
+
* @returns Promise resolving to the complete assistant's response
|
|
2657
|
+
*
|
|
2658
|
+
* @example
|
|
2659
|
+
* ```typescript
|
|
2660
|
+
* const response = await provider.getStreamCompletion({
|
|
2661
|
+
* agentName: "gpt-assistant",
|
|
2662
|
+
* messages: [
|
|
2663
|
+
* { role: "user", content: "Write a haiku about coding" }
|
|
2664
|
+
* ],
|
|
2665
|
+
* mode: "direct",
|
|
2666
|
+
* tools: [],
|
|
2667
|
+
* clientId: "client-456"
|
|
2668
|
+
* });
|
|
2669
|
+
* // Client receives single "llm-completion" event with full response
|
|
2670
|
+
* ```
|
|
2671
|
+
*/
|
|
1390
2672
|
async getStreamCompletion(params) {
|
|
1391
2673
|
const openai = getOpenAi();
|
|
1392
2674
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1443,11 +2725,53 @@ class GPT5Provider {
|
|
|
1443
2725
|
})),
|
|
1444
2726
|
};
|
|
1445
2727
|
// Debug logging
|
|
1446
|
-
if (CC_ENABLE_DEBUG) {
|
|
2728
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1447
2729
|
await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1448
2730
|
}
|
|
1449
2731
|
return result;
|
|
1450
2732
|
}
|
|
2733
|
+
/**
|
|
2734
|
+
* Performs structured output completion using OpenAI's response_format parameter.
|
|
2735
|
+
*
|
|
2736
|
+
* Uses OpenAI's native JSON schema mode to enforce structured output.
|
|
2737
|
+
* The model is instructed to respond in a specific JSON format matching
|
|
2738
|
+
* the provided schema. Uses jsonrepair to handle any JSON formatting issues.
|
|
2739
|
+
*
|
|
2740
|
+
* @param params - Outline completion parameters
|
|
2741
|
+
* @param params.messages - Conversation history for context
|
|
2742
|
+
* @param params.format - JSON schema or response format definition (supports both formats)
|
|
2743
|
+
* @returns Promise resolving to validated JSON string conforming to the schema
|
|
2744
|
+
* @throws Error if model returns a refusal message
|
|
2745
|
+
*
|
|
2746
|
+
* @example
|
|
2747
|
+
* ```typescript
|
|
2748
|
+
* const response = await provider.getOutlineCompletion({
|
|
2749
|
+
* messages: [
|
|
2750
|
+
* { role: "user", content: "Extract entities from: 'Apple released iPhone in Cupertino'" }
|
|
2751
|
+
* ],
|
|
2752
|
+
* format: {
|
|
2753
|
+
* type: "json_schema",
|
|
2754
|
+
* json_schema: {
|
|
2755
|
+
* schema: {
|
|
2756
|
+
* type: "object",
|
|
2757
|
+
* properties: {
|
|
2758
|
+
* entities: {
|
|
2759
|
+
* type: "array",
|
|
2760
|
+
* items: {
|
|
2761
|
+
* type: "object",
|
|
2762
|
+
* properties: {
|
|
2763
|
+
* text: { type: "string" },
|
|
2764
|
+
* type: { type: "string" }
|
|
2765
|
+
* }
|
|
2766
|
+
* }
|
|
2767
|
+
* }
|
|
2768
|
+
* }
|
|
2769
|
+
* }
|
|
2770
|
+
* }
|
|
2771
|
+
* }
|
|
2772
|
+
* });
|
|
2773
|
+
* ```
|
|
2774
|
+
*/
|
|
1451
2775
|
async getOutlineCompletion(params) {
|
|
1452
2776
|
const { messages: rawMessages, format } = params;
|
|
1453
2777
|
const openai = getOpenAi();
|
|
@@ -1486,13 +2810,40 @@ class GPT5Provider {
|
|
|
1486
2810
|
content: json,
|
|
1487
2811
|
};
|
|
1488
2812
|
// Debug logging
|
|
1489
|
-
if (CC_ENABLE_DEBUG) {
|
|
2813
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1490
2814
|
await fs.appendFile("./debug_gpt5_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1491
2815
|
}
|
|
1492
2816
|
return result;
|
|
1493
2817
|
}
|
|
1494
2818
|
}
|
|
1495
2819
|
|
|
2820
|
+
/**
|
|
2821
|
+
* Creates and caches an OpenAI-compatible client for Deepseek API.
|
|
2822
|
+
*
|
|
2823
|
+
* Uses OpenAI SDK with Deepseek's API endpoint.
|
|
2824
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
2825
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
2826
|
+
*
|
|
2827
|
+
* Key features:
|
|
2828
|
+
* - OpenAI SDK compatibility layer
|
|
2829
|
+
* - Single API key support only
|
|
2830
|
+
* - Instance caching with singleshot
|
|
2831
|
+
* - Automatic cache clearing on error
|
|
2832
|
+
*
|
|
2833
|
+
* @returns OpenAI client configured for Deepseek API
|
|
2834
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
2835
|
+
*
|
|
2836
|
+
* @example
|
|
2837
|
+
* ```typescript
|
|
2838
|
+
* import { getDeepseek } from "./config/deepseek";
|
|
2839
|
+
*
|
|
2840
|
+
* const client = getDeepseek();
|
|
2841
|
+
* const completion = await client.chat.completions.create({
|
|
2842
|
+
* model: "deepseek-chat",
|
|
2843
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
2844
|
+
* });
|
|
2845
|
+
* ```
|
|
2846
|
+
*/
|
|
1496
2847
|
const getDeepseek = functoolsKit.singleshot(() => {
|
|
1497
2848
|
const apiKey = lib.contextService.context.apiKey;
|
|
1498
2849
|
if (Array.isArray(apiKey)) {
|
|
@@ -1505,12 +2856,52 @@ const getDeepseek = functoolsKit.singleshot(() => {
|
|
|
1505
2856
|
});
|
|
1506
2857
|
});
|
|
1507
2858
|
|
|
2859
|
+
/**
|
|
2860
|
+
* Maximum number of retry attempts for outline completion.
|
|
2861
|
+
*/
|
|
1508
2862
|
const MAX_ATTEMPTS$2 = 3;
|
|
2863
|
+
/**
|
|
2864
|
+
* Provider for Deepseek AI models via OpenAI-compatible API.
|
|
2865
|
+
*
|
|
2866
|
+
* Supports Deepseek models through OpenAI-compatible endpoint with tool calling.
|
|
2867
|
+
* Features simulated streaming and structured output via tool-based schema enforcement.
|
|
2868
|
+
*
|
|
2869
|
+
* Key features:
|
|
2870
|
+
* - OpenAI-compatible API endpoint
|
|
2871
|
+
* - Tool calling with conditional inclusion (only if tools present)
|
|
2872
|
+
* - Simulated streaming (returns complete response)
|
|
2873
|
+
* - Schema enforcement via tool calling with retry logic
|
|
2874
|
+
* - Debug logging support
|
|
2875
|
+
*
|
|
2876
|
+
* @example
|
|
2877
|
+
* ```typescript
|
|
2878
|
+
* const provider = new DeepseekProvider(contextService, logger);
|
|
2879
|
+
* const response = await provider.getCompletion({
|
|
2880
|
+
* agentName: "deepseek-assistant",
|
|
2881
|
+
* messages: [{ role: "user", content: "Explain transformers" }],
|
|
2882
|
+
* mode: "direct",
|
|
2883
|
+
* tools: [],
|
|
2884
|
+
* clientId: "client-001"
|
|
2885
|
+
* });
|
|
2886
|
+
* ```
|
|
2887
|
+
*/
|
|
1509
2888
|
class DeepseekProvider {
|
|
2889
|
+
/**
|
|
2890
|
+
* Creates a new DeepseekProvider instance.
|
|
2891
|
+
*
|
|
2892
|
+
* @param contextService - Context service with model configuration
|
|
2893
|
+
* @param logger - Logger for operation tracking
|
|
2894
|
+
*/
|
|
1510
2895
|
constructor(contextService, logger) {
|
|
1511
2896
|
this.contextService = contextService;
|
|
1512
2897
|
this.logger = logger;
|
|
1513
2898
|
}
|
|
2899
|
+
/**
|
|
2900
|
+
* Performs standard completion request to Deepseek.
|
|
2901
|
+
*
|
|
2902
|
+
* @param params - Completion parameters
|
|
2903
|
+
* @returns Promise resolving to assistant's response
|
|
2904
|
+
*/
|
|
1514
2905
|
async getCompletion(params) {
|
|
1515
2906
|
const deepseek = getDeepseek();
|
|
1516
2907
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1559,11 +2950,17 @@ class DeepseekProvider {
|
|
|
1559
2950
|
})),
|
|
1560
2951
|
};
|
|
1561
2952
|
// Debug logging
|
|
1562
|
-
if (CC_ENABLE_DEBUG) {
|
|
2953
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1563
2954
|
await fs.appendFile("./debug_deepseek_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1564
2955
|
}
|
|
1565
2956
|
return result;
|
|
1566
2957
|
}
|
|
2958
|
+
/**
|
|
2959
|
+
* Performs simulated streaming completion.
|
|
2960
|
+
*
|
|
2961
|
+
* @param params - Completion parameters
|
|
2962
|
+
* @returns Promise resolving to complete response
|
|
2963
|
+
*/
|
|
1567
2964
|
async getStreamCompletion(params) {
|
|
1568
2965
|
const deepseek = getDeepseek();
|
|
1569
2966
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1620,11 +3017,18 @@ class DeepseekProvider {
|
|
|
1620
3017
|
})),
|
|
1621
3018
|
};
|
|
1622
3019
|
// Debug logging
|
|
1623
|
-
if (CC_ENABLE_DEBUG) {
|
|
3020
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1624
3021
|
await fs.appendFile("./debug_deepseek_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1625
3022
|
}
|
|
1626
3023
|
return result;
|
|
1627
3024
|
}
|
|
3025
|
+
/**
|
|
3026
|
+
* Performs structured output completion with schema validation.
|
|
3027
|
+
*
|
|
3028
|
+
* @param params - Outline completion parameters
|
|
3029
|
+
* @returns Promise resolving to validated JSON string
|
|
3030
|
+
* @throws Error if model fails after MAX_ATTEMPTS
|
|
3031
|
+
*/
|
|
1628
3032
|
async getOutlineCompletion(params) {
|
|
1629
3033
|
const { messages: rawMessages, format } = params;
|
|
1630
3034
|
const deepseek = getDeepseek();
|
|
@@ -1722,7 +3126,7 @@ class DeepseekProvider {
|
|
|
1722
3126
|
content: JSON.stringify(validation.data),
|
|
1723
3127
|
};
|
|
1724
3128
|
// Debug logging
|
|
1725
|
-
if (CC_ENABLE_DEBUG) {
|
|
3129
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1726
3130
|
await fs.appendFile("./debug_deepseek_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1727
3131
|
}
|
|
1728
3132
|
return result;
|
|
@@ -1735,6 +3139,33 @@ class DeepseekProvider {
|
|
|
1735
3139
|
}
|
|
1736
3140
|
}
|
|
1737
3141
|
|
|
3142
|
+
/**
|
|
3143
|
+
* Creates and caches an OpenAI-compatible client for Mistral API.
|
|
3144
|
+
*
|
|
3145
|
+
* Uses OpenAI SDK with Mistral's API endpoint.
|
|
3146
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
3147
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
3148
|
+
*
|
|
3149
|
+
* Key features:
|
|
3150
|
+
* - OpenAI SDK compatibility layer
|
|
3151
|
+
* - Single API key support only
|
|
3152
|
+
* - Instance caching with singleshot
|
|
3153
|
+
* - Automatic cache clearing on error
|
|
3154
|
+
*
|
|
3155
|
+
* @returns OpenAI client configured for Mistral API
|
|
3156
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
3157
|
+
*
|
|
3158
|
+
* @example
|
|
3159
|
+
* ```typescript
|
|
3160
|
+
* import { getMistral } from "./config/mistral";
|
|
3161
|
+
*
|
|
3162
|
+
* const client = getMistral();
|
|
3163
|
+
* const completion = await client.chat.completions.create({
|
|
3164
|
+
* model: "mistral-large-latest",
|
|
3165
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
3166
|
+
* });
|
|
3167
|
+
* ```
|
|
3168
|
+
*/
|
|
1738
3169
|
const getMistral = functoolsKit.singleshot(() => {
|
|
1739
3170
|
const apiKey = lib.contextService.context.apiKey;
|
|
1740
3171
|
if (Array.isArray(apiKey)) {
|
|
@@ -1747,12 +3178,52 @@ const getMistral = functoolsKit.singleshot(() => {
|
|
|
1747
3178
|
});
|
|
1748
3179
|
});
|
|
1749
3180
|
|
|
3181
|
+
/**
|
|
3182
|
+
* Maximum number of retry attempts for outline completion.
|
|
3183
|
+
*/
|
|
1750
3184
|
const MAX_ATTEMPTS$1 = 3;
|
|
3185
|
+
/**
|
|
3186
|
+
* Provider for Mistral AI models via OpenAI-compatible API.
|
|
3187
|
+
*
|
|
3188
|
+
* Implements Mistral API access through OpenAI-compatible endpoint.
|
|
3189
|
+
* Supports tool calling, simulated streaming, and structured output.
|
|
3190
|
+
*
|
|
3191
|
+
* Key features:
|
|
3192
|
+
* - Mistral AI API via OpenAI-compatible endpoint
|
|
3193
|
+
* - Tool calling with conditional inclusion
|
|
3194
|
+
* - Simulated streaming (complete response)
|
|
3195
|
+
* - Schema enforcement via tool calling with retry
|
|
3196
|
+
* - Debug logging support
|
|
3197
|
+
*
|
|
3198
|
+
* @example
|
|
3199
|
+
* ```typescript
|
|
3200
|
+
* const provider = new MistralProvider(contextService, logger);
|
|
3201
|
+
* const response = await provider.getCompletion({
|
|
3202
|
+
* agentName: "mistral-assistant",
|
|
3203
|
+
* messages: [{ role: "user", content: "Summarize quantum physics" }],
|
|
3204
|
+
* mode: "direct",
|
|
3205
|
+
* tools: [],
|
|
3206
|
+
* clientId: "client-555"
|
|
3207
|
+
* });
|
|
3208
|
+
* ```
|
|
3209
|
+
*/
|
|
1751
3210
|
class MistralProvider {
|
|
3211
|
+
/**
|
|
3212
|
+
* Creates a new MistralProvider instance.
|
|
3213
|
+
*
|
|
3214
|
+
* @param contextService - Context service with model configuration
|
|
3215
|
+
* @param logger - Logger for operation tracking
|
|
3216
|
+
*/
|
|
1752
3217
|
constructor(contextService, logger) {
|
|
1753
3218
|
this.contextService = contextService;
|
|
1754
3219
|
this.logger = logger;
|
|
1755
3220
|
}
|
|
3221
|
+
/**
|
|
3222
|
+
* Performs standard completion request to Mistral.
|
|
3223
|
+
*
|
|
3224
|
+
* @param params - Completion parameters
|
|
3225
|
+
* @returns Promise resolving to assistant's response
|
|
3226
|
+
*/
|
|
1756
3227
|
async getCompletion(params) {
|
|
1757
3228
|
const mistral = getMistral();
|
|
1758
3229
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1801,11 +3272,17 @@ class MistralProvider {
|
|
|
1801
3272
|
})),
|
|
1802
3273
|
};
|
|
1803
3274
|
// Debug logging
|
|
1804
|
-
if (CC_ENABLE_DEBUG) {
|
|
3275
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1805
3276
|
await fs.appendFile("./debug_mistral_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1806
3277
|
}
|
|
1807
3278
|
return result;
|
|
1808
3279
|
}
|
|
3280
|
+
/**
|
|
3281
|
+
* Performs simulated streaming completion.
|
|
3282
|
+
*
|
|
3283
|
+
* @param params - Completion parameters
|
|
3284
|
+
* @returns Promise resolving to complete response
|
|
3285
|
+
*/
|
|
1809
3286
|
async getStreamCompletion(params) {
|
|
1810
3287
|
const mistral = getMistral();
|
|
1811
3288
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -1862,11 +3339,18 @@ class MistralProvider {
|
|
|
1862
3339
|
})),
|
|
1863
3340
|
};
|
|
1864
3341
|
// Debug logging
|
|
1865
|
-
if (CC_ENABLE_DEBUG) {
|
|
3342
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1866
3343
|
await fs.appendFile("./debug_mistral_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1867
3344
|
}
|
|
1868
3345
|
return result;
|
|
1869
3346
|
}
|
|
3347
|
+
/**
|
|
3348
|
+
* Performs structured output completion with schema validation.
|
|
3349
|
+
*
|
|
3350
|
+
* @param params - Outline completion parameters
|
|
3351
|
+
* @returns Promise resolving to validated JSON string
|
|
3352
|
+
* @throws Error if model fails after MAX_ATTEMPTS
|
|
3353
|
+
*/
|
|
1870
3354
|
async getOutlineCompletion(params) {
|
|
1871
3355
|
const { messages: rawMessages, format } = params;
|
|
1872
3356
|
const mistral = getMistral();
|
|
@@ -1964,7 +3448,7 @@ class MistralProvider {
|
|
|
1964
3448
|
content: JSON.stringify(validation.data),
|
|
1965
3449
|
};
|
|
1966
3450
|
// Debug logging
|
|
1967
|
-
if (CC_ENABLE_DEBUG) {
|
|
3451
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
1968
3452
|
await fs.appendFile("./debug_mistral_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
1969
3453
|
}
|
|
1970
3454
|
return result;
|
|
@@ -1977,6 +3461,33 @@ class MistralProvider {
|
|
|
1977
3461
|
}
|
|
1978
3462
|
}
|
|
1979
3463
|
|
|
3464
|
+
/**
|
|
3465
|
+
* Creates and caches an OpenAI-compatible client for Perplexity API.
|
|
3466
|
+
*
|
|
3467
|
+
* Uses OpenAI SDK with Perplexity's API endpoint.
|
|
3468
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
3469
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
3470
|
+
*
|
|
3471
|
+
* Key features:
|
|
3472
|
+
* - OpenAI SDK compatibility layer
|
|
3473
|
+
* - Single API key support only
|
|
3474
|
+
* - Instance caching with singleshot
|
|
3475
|
+
* - Automatic cache clearing on error
|
|
3476
|
+
*
|
|
3477
|
+
* @returns OpenAI client configured for Perplexity API
|
|
3478
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
3479
|
+
*
|
|
3480
|
+
* @example
|
|
3481
|
+
* ```typescript
|
|
3482
|
+
* import { getPerplexity } from "./config/perplexity";
|
|
3483
|
+
*
|
|
3484
|
+
* const client = getPerplexity();
|
|
3485
|
+
* const completion = await client.chat.completions.create({
|
|
3486
|
+
* model: "llama-3.1-sonar-large-128k-online",
|
|
3487
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
3488
|
+
* });
|
|
3489
|
+
* ```
|
|
3490
|
+
*/
|
|
1980
3491
|
const getPerplexity = functoolsKit.singleshot(() => {
|
|
1981
3492
|
const apiKey = lib.contextService.context.apiKey;
|
|
1982
3493
|
if (Array.isArray(apiKey)) {
|
|
@@ -1989,11 +3500,49 @@ const getPerplexity = functoolsKit.singleshot(() => {
|
|
|
1989
3500
|
});
|
|
1990
3501
|
});
|
|
1991
3502
|
|
|
3503
|
+
/**
|
|
3504
|
+
* Provider for Perplexity AI models via OpenAI-compatible API.
|
|
3505
|
+
*
|
|
3506
|
+
* Implements Perplexity API access with specialized message handling.
|
|
3507
|
+
* Filters and merges consecutive messages to comply with API requirements.
|
|
3508
|
+
* Note: getStreamCompletion returns error message as streaming is not supported.
|
|
3509
|
+
*
|
|
3510
|
+
* Key features:
|
|
3511
|
+
* - OpenAI-compatible API endpoint
|
|
3512
|
+
* - Message filtering (user/assistant/tool only)
|
|
3513
|
+
* - System message aggregation
|
|
3514
|
+
* - Consecutive message merging (prevents API errors)
|
|
3515
|
+
* - Tool calling support (requires description field)
|
|
3516
|
+
* - Outline completion via response_format
|
|
3517
|
+
* - Streaming not supported (returns error message)
|
|
3518
|
+
*
|
|
3519
|
+
* @example
|
|
3520
|
+
* ```typescript
|
|
3521
|
+
* const provider = new PerplexityProvider(contextService, logger);
|
|
3522
|
+
* const response = await provider.getCompletion({
|
|
3523
|
+
* agentName: "perplexity-assistant",
|
|
3524
|
+
* messages: [{ role: "user", content: "Latest AI research?" }],
|
|
3525
|
+
* mode: "direct",
|
|
3526
|
+
* tools: [searchTool],
|
|
3527
|
+
* clientId: "client-333"
|
|
3528
|
+
* });
|
|
3529
|
+
* ```
|
|
3530
|
+
*/
|
|
1992
3531
|
class PerplexityProvider {
|
|
3532
|
+
/**
|
|
3533
|
+
* Creates a new PerplexityProvider instance.
|
|
3534
|
+
*/
|
|
1993
3535
|
constructor(contextService, logger) {
|
|
1994
3536
|
this.contextService = contextService;
|
|
1995
3537
|
this.logger = logger;
|
|
1996
3538
|
}
|
|
3539
|
+
/**
|
|
3540
|
+
* Performs standard completion with message filtering and merging.
|
|
3541
|
+
* Filters messages to user/assistant/tool only and merges consecutive messages.
|
|
3542
|
+
*
|
|
3543
|
+
* @param params - Completion parameters
|
|
3544
|
+
* @returns Promise resolving to assistant's response
|
|
3545
|
+
*/
|
|
1997
3546
|
async getCompletion(params) {
|
|
1998
3547
|
const perplexity = getPerplexity();
|
|
1999
3548
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -2078,11 +3627,18 @@ class PerplexityProvider {
|
|
|
2078
3627
|
})),
|
|
2079
3628
|
};
|
|
2080
3629
|
// Debug logging
|
|
2081
|
-
if (CC_ENABLE_DEBUG) {
|
|
3630
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2082
3631
|
await fs.appendFile("./debug_perplexity_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
|
|
2083
3632
|
}
|
|
2084
3633
|
return finalResult;
|
|
2085
3634
|
}
|
|
3635
|
+
/**
|
|
3636
|
+
* Returns error message indicating streaming not supported.
|
|
3637
|
+
* Perplexity provider does not implement token-by-token streaming.
|
|
3638
|
+
*
|
|
3639
|
+
* @param params - Completion parameters
|
|
3640
|
+
* @returns Promise resolving to error message
|
|
3641
|
+
*/
|
|
2086
3642
|
async getStreamCompletion(params) {
|
|
2087
3643
|
const { clientId, agentName, mode } = params;
|
|
2088
3644
|
this.logger.log("perplexityProvider getStreamCompletion", {
|
|
@@ -2099,6 +3655,14 @@ class PerplexityProvider {
|
|
|
2099
3655
|
};
|
|
2100
3656
|
return result;
|
|
2101
3657
|
}
|
|
3658
|
+
/**
|
|
3659
|
+
* Performs structured output completion using response_format.
|
|
3660
|
+
* Filters and merges messages before sending.
|
|
3661
|
+
*
|
|
3662
|
+
* @param params - Outline completion parameters
|
|
3663
|
+
* @returns Promise resolving to validated JSON string
|
|
3664
|
+
* @throws Error if model returns refusal
|
|
3665
|
+
*/
|
|
2102
3666
|
async getOutlineCompletion(params) {
|
|
2103
3667
|
const { messages: rawMessages, format } = params;
|
|
2104
3668
|
const perplexity = getPerplexity();
|
|
@@ -2170,13 +3734,40 @@ class PerplexityProvider {
|
|
|
2170
3734
|
content: json,
|
|
2171
3735
|
};
|
|
2172
3736
|
// Debug logging
|
|
2173
|
-
if (CC_ENABLE_DEBUG) {
|
|
3737
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2174
3738
|
await fs.appendFile("./debug_perplexity_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2175
3739
|
}
|
|
2176
3740
|
return result;
|
|
2177
3741
|
}
|
|
2178
3742
|
}
|
|
2179
3743
|
|
|
3744
|
+
/**
|
|
3745
|
+
* Creates and caches an OpenAI-compatible client for Cohere API.
|
|
3746
|
+
*
|
|
3747
|
+
* Uses OpenAI SDK with Cohere's compatibility endpoint.
|
|
3748
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
3749
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
3750
|
+
*
|
|
3751
|
+
* Key features:
|
|
3752
|
+
* - OpenAI SDK compatibility layer
|
|
3753
|
+
* - Single API key support only
|
|
3754
|
+
* - Instance caching with singleshot
|
|
3755
|
+
* - Automatic cache clearing on error
|
|
3756
|
+
*
|
|
3757
|
+
* @returns OpenAI client configured for Cohere API
|
|
3758
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
3759
|
+
*
|
|
3760
|
+
* @example
|
|
3761
|
+
* ```typescript
|
|
3762
|
+
* import { getCohere } from "./config/cohere";
|
|
3763
|
+
*
|
|
3764
|
+
* const client = getCohere();
|
|
3765
|
+
* const completion = await client.chat.completions.create({
|
|
3766
|
+
* model: "command-r-plus",
|
|
3767
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
3768
|
+
* });
|
|
3769
|
+
* ```
|
|
3770
|
+
*/
|
|
2180
3771
|
const getCohere = functoolsKit.singleshot(() => {
|
|
2181
3772
|
const apiKey = lib.contextService.context.apiKey;
|
|
2182
3773
|
if (Array.isArray(apiKey)) {
|
|
@@ -2189,11 +3780,56 @@ const getCohere = functoolsKit.singleshot(() => {
|
|
|
2189
3780
|
});
|
|
2190
3781
|
});
|
|
2191
3782
|
|
|
3783
|
+
/**
|
|
3784
|
+
* Provider for Cohere AI models via OpenAI-compatible API.
|
|
3785
|
+
*
|
|
3786
|
+
* Implements Cohere API access with specialized message handling for tool calling.
|
|
3787
|
+
* Unlike other providers, includes tool messages in conversation and does NOT merge
|
|
3788
|
+
* consecutive assistant messages (required for proper tool calling flow).
|
|
3789
|
+
*
|
|
3790
|
+
* Key features:
|
|
3791
|
+
* - OpenAI-compatible API endpoint
|
|
3792
|
+
* - Message filtering (user/assistant/tool - includes tool messages)
|
|
3793
|
+
* - System message aggregation
|
|
3794
|
+
* - NO consecutive assistant message merging (breaks tool calling)
|
|
3795
|
+
* - Tool calling support (requires description field)
|
|
3796
|
+
* - Outline completion via response_format
|
|
3797
|
+
* - Simulated streaming
|
|
3798
|
+
*
|
|
3799
|
+
* Important: Cohere requires strict tool_calls -> tool_responses sequence.
|
|
3800
|
+
* Merging assistant messages breaks this flow.
|
|
3801
|
+
*
|
|
3802
|
+
* @example
|
|
3803
|
+
* ```typescript
|
|
3804
|
+
* const provider = new CohereProvider(contextService, logger);
|
|
3805
|
+
* const response = await provider.getCompletion({
|
|
3806
|
+
* agentName: "cohere-assistant",
|
|
3807
|
+
* messages: [
|
|
3808
|
+
* { role: "user", content: "Search for AI papers" },
|
|
3809
|
+
* { role: "assistant", content: "", tool_calls: [searchCall] },
|
|
3810
|
+
* { role: "tool", content: "Results...", tool_call_id: "123" }
|
|
3811
|
+
* ],
|
|
3812
|
+
* mode: "direct",
|
|
3813
|
+
* tools: [searchTool],
|
|
3814
|
+
* clientId: "client-222"
|
|
3815
|
+
* });
|
|
3816
|
+
* ```
|
|
3817
|
+
*/
|
|
2192
3818
|
class CohereProvider {
|
|
3819
|
+
/**
|
|
3820
|
+
* Creates a new CohereProvider instance.
|
|
3821
|
+
*/
|
|
2193
3822
|
constructor(contextService, logger) {
|
|
2194
3823
|
this.contextService = contextService;
|
|
2195
3824
|
this.logger = logger;
|
|
2196
3825
|
}
|
|
3826
|
+
/**
|
|
3827
|
+
* Performs standard completion with Cohere-specific message handling.
|
|
3828
|
+
* Includes tool messages and preserves assistant message sequence.
|
|
3829
|
+
*
|
|
3830
|
+
* @param params - Completion parameters
|
|
3831
|
+
* @returns Promise resolving to assistant's response
|
|
3832
|
+
*/
|
|
2197
3833
|
async getCompletion(params) {
|
|
2198
3834
|
const cohere = getCohere();
|
|
2199
3835
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -2265,11 +3901,17 @@ class CohereProvider {
|
|
|
2265
3901
|
})),
|
|
2266
3902
|
};
|
|
2267
3903
|
// Debug logging
|
|
2268
|
-
if (CC_ENABLE_DEBUG) {
|
|
3904
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2269
3905
|
await fs.appendFile("./debug_cohere_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
|
|
2270
3906
|
}
|
|
2271
3907
|
return finalResult;
|
|
2272
3908
|
}
|
|
3909
|
+
/**
|
|
3910
|
+
* Performs simulated streaming completion with Cohere-specific message handling.
|
|
3911
|
+
*
|
|
3912
|
+
* @param params - Completion parameters
|
|
3913
|
+
* @returns Promise resolving to complete response
|
|
3914
|
+
*/
|
|
2273
3915
|
async getStreamCompletion(params) {
|
|
2274
3916
|
const cohere = getCohere();
|
|
2275
3917
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
@@ -2349,11 +3991,19 @@ class CohereProvider {
|
|
|
2349
3991
|
})),
|
|
2350
3992
|
};
|
|
2351
3993
|
// Debug logging
|
|
2352
|
-
if (CC_ENABLE_DEBUG) {
|
|
3994
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2353
3995
|
await fs.appendFile("./debug_cohere_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2354
3996
|
}
|
|
2355
3997
|
return result;
|
|
2356
3998
|
}
|
|
3999
|
+
/**
|
|
4000
|
+
* Performs structured output completion using response_format.
|
|
4001
|
+
* Filters and merges user messages only (preserves assistant sequence).
|
|
4002
|
+
*
|
|
4003
|
+
* @param params - Outline completion parameters
|
|
4004
|
+
* @returns Promise resolving to validated JSON string
|
|
4005
|
+
* @throws Error if model returns refusal
|
|
4006
|
+
*/
|
|
2357
4007
|
async getOutlineCompletion(params) {
|
|
2358
4008
|
const { messages: rawMessages, format } = params;
|
|
2359
4009
|
const cohere = getCohere();
|
|
@@ -2412,20 +4062,63 @@ class CohereProvider {
|
|
|
2412
4062
|
content: json,
|
|
2413
4063
|
};
|
|
2414
4064
|
// Debug logging
|
|
2415
|
-
if (CC_ENABLE_DEBUG) {
|
|
4065
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2416
4066
|
await fs.appendFile("./debug_cohere_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2417
4067
|
}
|
|
2418
4068
|
return result;
|
|
2419
4069
|
}
|
|
2420
4070
|
}
|
|
2421
4071
|
|
|
4072
|
+
/**
|
|
4073
|
+
* Maximum number of retry attempts for outline completion.
|
|
4074
|
+
*/
|
|
2422
4075
|
const MAX_ATTEMPTS = 3;
|
|
4076
|
+
/**
|
|
4077
|
+
* Alibaba Cloud DashScope API base URL.
|
|
4078
|
+
*/
|
|
2423
4079
|
const BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
|
|
4080
|
+
/**
|
|
4081
|
+
* Provider for Alibaba Cloud Qwen models via DashScope API.
|
|
4082
|
+
*
|
|
4083
|
+
* Implements Alibaba Cloud DashScope API access using fetchApi for HTTP requests.
|
|
4084
|
+
* Supports thinking mode control via enable_thinking parameter.
|
|
4085
|
+
* Does NOT support token rotation (single API key only).
|
|
4086
|
+
*
|
|
4087
|
+
* Key features:
|
|
4088
|
+
* - DashScope OpenAI-compatible endpoint
|
|
4089
|
+
* - Direct fetchApi HTTP requests (no SDK)
|
|
4090
|
+
* - Thinking mode control (enable_thinking parameter)
|
|
4091
|
+
* - Tool calling with conditional inclusion
|
|
4092
|
+
* - Simulated streaming
|
|
4093
|
+
* - No token rotation support
|
|
4094
|
+
*
|
|
4095
|
+
* @example
|
|
4096
|
+
* ```typescript
|
|
4097
|
+
* const provider = new AlibabaProvider(contextService, logger);
|
|
4098
|
+
* const response = await provider.getCompletion({
|
|
4099
|
+
* agentName: "qwen-assistant",
|
|
4100
|
+
* messages: [{ role: "user", content: "Explain blockchain" }],
|
|
4101
|
+
* mode: "direct",
|
|
4102
|
+
* tools: [],
|
|
4103
|
+
* clientId: "client-111"
|
|
4104
|
+
* });
|
|
4105
|
+
* ```
|
|
4106
|
+
*/
|
|
2424
4107
|
class AlibabaProvider {
|
|
4108
|
+
/**
|
|
4109
|
+
* Creates a new AlibabaProvider instance.
|
|
4110
|
+
*/
|
|
2425
4111
|
constructor(contextService, logger) {
|
|
2426
4112
|
this.contextService = contextService;
|
|
2427
4113
|
this.logger = logger;
|
|
2428
4114
|
}
|
|
4115
|
+
/**
|
|
4116
|
+
* Performs standard completion request to Alibaba DashScope.
|
|
4117
|
+
*
|
|
4118
|
+
* @param params - Completion parameters
|
|
4119
|
+
* @returns Promise resolving to assistant's response
|
|
4120
|
+
* @throws Error if token rotation attempted
|
|
4121
|
+
*/
|
|
2429
4122
|
async getCompletion(params) {
|
|
2430
4123
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
2431
4124
|
this.logger.log("alibabaProvider getCompletion", {
|
|
@@ -2488,11 +4181,18 @@ class AlibabaProvider {
|
|
|
2488
4181
|
})),
|
|
2489
4182
|
};
|
|
2490
4183
|
// Debug logging
|
|
2491
|
-
if (CC_ENABLE_DEBUG) {
|
|
4184
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2492
4185
|
await fs.appendFile("./debug_alibaba_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2493
4186
|
}
|
|
2494
4187
|
return result;
|
|
2495
4188
|
}
|
|
4189
|
+
/**
|
|
4190
|
+
* Performs simulated streaming completion.
|
|
4191
|
+
*
|
|
4192
|
+
* @param params - Completion parameters
|
|
4193
|
+
* @returns Promise resolving to complete response
|
|
4194
|
+
* @throws Error if token rotation attempted
|
|
4195
|
+
*/
|
|
2496
4196
|
async getStreamCompletion(params) {
|
|
2497
4197
|
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
2498
4198
|
this.logger.log("alibabaProvider getStreamCompletion", {
|
|
@@ -2562,11 +4262,18 @@ class AlibabaProvider {
|
|
|
2562
4262
|
})),
|
|
2563
4263
|
};
|
|
2564
4264
|
// Debug logging
|
|
2565
|
-
if (CC_ENABLE_DEBUG) {
|
|
4265
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2566
4266
|
await fs.appendFile("./debug_alibaba_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2567
4267
|
}
|
|
2568
4268
|
return result;
|
|
2569
4269
|
}
|
|
4270
|
+
/**
|
|
4271
|
+
* Performs structured output completion using tool calling with retry logic.
|
|
4272
|
+
*
|
|
4273
|
+
* @param params - Outline completion parameters
|
|
4274
|
+
* @returns Promise resolving to validated JSON string
|
|
4275
|
+
* @throws Error if model fails after MAX_ATTEMPTS or token rotation attempted
|
|
4276
|
+
*/
|
|
2570
4277
|
async getOutlineCompletion(params) {
|
|
2571
4278
|
const { messages: rawMessages, format } = params;
|
|
2572
4279
|
this.logger.log("alibabaProvider getOutlineCompletion", {
|
|
@@ -2676,7 +4383,7 @@ class AlibabaProvider {
|
|
|
2676
4383
|
content: JSON.stringify(validation.data),
|
|
2677
4384
|
};
|
|
2678
4385
|
// Debug logging
|
|
2679
|
-
if (CC_ENABLE_DEBUG) {
|
|
4386
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
2680
4387
|
await fs.appendFile("./debug_alibaba_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
2681
4388
|
}
|
|
2682
4389
|
return result;
|
|
@@ -2689,42 +4396,527 @@ class AlibabaProvider {
|
|
|
2689
4396
|
}
|
|
2690
4397
|
}
|
|
2691
4398
|
|
|
4399
|
+
/**
|
|
4400
|
+
* Creates and caches an OpenAI-compatible client for Z.ai GLM-4 API.
|
|
4401
|
+
*
|
|
4402
|
+
* Uses OpenAI SDK with Z.ai's API endpoint for accessing Zhipu AI's GLM-4 models.
|
|
4403
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
4404
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
4405
|
+
*
|
|
4406
|
+
* Key features:
|
|
4407
|
+
* - OpenAI SDK compatibility layer
|
|
4408
|
+
* - Single API key support only
|
|
4409
|
+
* - Instance caching with singleshot
|
|
4410
|
+
* - Automatic cache clearing on error
|
|
4411
|
+
* - Context-based API key retrieval
|
|
4412
|
+
*
|
|
4413
|
+
* @returns OpenAI client configured for Z.ai API
|
|
4414
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
4415
|
+
*
|
|
4416
|
+
* @example
|
|
4417
|
+
* ```typescript
|
|
4418
|
+
* import { getZAi } from "./config/zai";
|
|
4419
|
+
*
|
|
4420
|
+
* const client = getZAi();
|
|
4421
|
+
* const completion = await client.chat.completions.create({
|
|
4422
|
+
* model: "glm-4-plus",
|
|
4423
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
4424
|
+
* });
|
|
4425
|
+
* ```
|
|
4426
|
+
*
|
|
4427
|
+
* @example
|
|
4428
|
+
* ```typescript
|
|
4429
|
+
* // With structured output
|
|
4430
|
+
* const client = getZAi();
|
|
4431
|
+
* const completion = await client.chat.completions.create({
|
|
4432
|
+
* model: "glm-4-plus",
|
|
4433
|
+
* messages: [{ role: "user", content: "Generate trading signal" }],
|
|
4434
|
+
* response_format: {
|
|
4435
|
+
* type: "json_schema",
|
|
4436
|
+
* json_schema: { schema: { type: "object", properties: {...} } }
|
|
4437
|
+
* }
|
|
4438
|
+
* });
|
|
4439
|
+
* ```
|
|
4440
|
+
*/
|
|
4441
|
+
const getZAi = functoolsKit.singleshot(() => {
|
|
4442
|
+
const apiKey = lib.contextService.context.apiKey;
|
|
4443
|
+
if (Array.isArray(apiKey)) {
|
|
4444
|
+
getZAi.clear();
|
|
4445
|
+
throw new Error("Z.ai provider does not support token rotation");
|
|
4446
|
+
}
|
|
4447
|
+
return new OpenAI({
|
|
4448
|
+
apiKey,
|
|
4449
|
+
baseURL: "https://api.z.ai/api/paas/v4/"
|
|
4450
|
+
});
|
|
4451
|
+
});
|
|
4452
|
+
|
|
4453
|
+
/**
|
|
4454
|
+
* GLM-4 provider implementation for Z.ai API integration.
|
|
4455
|
+
*
|
|
4456
|
+
* Provides access to Zhipu AI's GLM-4 models through OpenAI-compatible API.
|
|
4457
|
+
* Supports standard completions, streaming, and structured (outline) outputs.
|
|
4458
|
+
* Uses the Z.ai API endpoint for model inference.
|
|
4459
|
+
*
|
|
4460
|
+
* Key features:
|
|
4461
|
+
* - OpenAI SDK compatibility layer
|
|
4462
|
+
* - Tool calling support (function calls)
|
|
4463
|
+
* - Streaming completion with event emission
|
|
4464
|
+
* - Structured JSON output with schema validation
|
|
4465
|
+
* - Debug logging to file when enabled
|
|
4466
|
+
* - Message format transformation between agent-swarm-kit and OpenAI formats
|
|
4467
|
+
*
|
|
4468
|
+
* @example
|
|
4469
|
+
* ```typescript
|
|
4470
|
+
* import { GLM4Provider } from "./client/GLM4Provider.client";
|
|
4471
|
+
* import { ContextService } from "./services/base/ContextService";
|
|
4472
|
+
*
|
|
4473
|
+
* const provider = new GLM4Provider(contextService, logger);
|
|
4474
|
+
*
|
|
4475
|
+
* // Standard completion
|
|
4476
|
+
* const result = await provider.getCompletion({
|
|
4477
|
+
* messages: [{ role: "user", content: "Hello" }],
|
|
4478
|
+
* agentName: "test-agent",
|
|
4479
|
+
* clientId: "client-123",
|
|
4480
|
+
* mode: "default"
|
|
4481
|
+
* });
|
|
4482
|
+
*
|
|
4483
|
+
* // Streaming completion
|
|
4484
|
+
* const stream = await provider.getStreamCompletion({
|
|
4485
|
+
* messages: [{ role: "user", content: "Analyze market" }],
|
|
4486
|
+
* agentName: "trader-agent",
|
|
4487
|
+
* clientId: "client-456",
|
|
4488
|
+
* mode: "stream"
|
|
4489
|
+
* });
|
|
4490
|
+
*
|
|
4491
|
+
* // Structured output
|
|
4492
|
+
* const outline = await provider.getOutlineCompletion({
|
|
4493
|
+
* messages: [{ role: "user", content: "Trading decision" }],
|
|
4494
|
+
* format: { type: "object", properties: {...} }
|
|
4495
|
+
* });
|
|
4496
|
+
* ```
|
|
4497
|
+
*/
|
|
4498
|
+
class GLM4Provider {
|
|
4499
|
+
/**
|
|
4500
|
+
* Creates a new GLM4Provider instance.
|
|
4501
|
+
*
|
|
4502
|
+
* @param contextService - Context service providing execution context (model, API key)
|
|
4503
|
+
* @param logger - Logger service for operation tracking
|
|
4504
|
+
*/
|
|
4505
|
+
constructor(contextService, logger) {
|
|
4506
|
+
this.contextService = contextService;
|
|
4507
|
+
this.logger = logger;
|
|
4508
|
+
}
|
|
4509
|
+
/**
|
|
4510
|
+
* Executes a standard GLM-4 completion request.
|
|
4511
|
+
*
|
|
4512
|
+
* Sends messages to the GLM-4 model and returns the completion response.
|
|
4513
|
+
* Supports tool calling (function calls) and automatically transforms message formats
|
|
4514
|
+
* between agent-swarm-kit and OpenAI formats.
|
|
4515
|
+
*
|
|
4516
|
+
* Key operations:
|
|
4517
|
+
* - Maps agent-swarm-kit messages to OpenAI format
|
|
4518
|
+
* - Handles tool calls with JSON serialization/deserialization
|
|
4519
|
+
* - Logs operation details for debugging
|
|
4520
|
+
* - Optionally writes debug output to file
|
|
4521
|
+
*
|
|
4522
|
+
* @param params - Completion parameters including messages, tools, and context
|
|
4523
|
+
* @param params.messages - Array of conversation messages
|
|
4524
|
+
* @param params.tools - Optional array of function tools available to the model
|
|
4525
|
+
* @param params.agentName - Name of the requesting agent
|
|
4526
|
+
* @param params.clientId - Client session identifier
|
|
4527
|
+
* @param params.mode - Completion mode (e.g., "default", "stream")
|
|
4528
|
+
* @returns Promise resolving to completion message with optional tool calls
|
|
4529
|
+
*
|
|
4530
|
+
* @example
|
|
4531
|
+
* ```typescript
|
|
4532
|
+
* const result = await provider.getCompletion({
|
|
4533
|
+
* messages: [
|
|
4534
|
+
* { role: "system", content: "You are a trading assistant" },
|
|
4535
|
+
* { role: "user", content: "Analyze BTC market" }
|
|
4536
|
+
* ],
|
|
4537
|
+
* tools: [
|
|
4538
|
+
* {
|
|
4539
|
+
* type: "function",
|
|
4540
|
+
* function: {
|
|
4541
|
+
* name: "get_market_data",
|
|
4542
|
+
* parameters: { type: "object", properties: {...} }
|
|
4543
|
+
* }
|
|
4544
|
+
* }
|
|
4545
|
+
* ],
|
|
4546
|
+
* agentName: "trader",
|
|
4547
|
+
* clientId: "session-123",
|
|
4548
|
+
* mode: "default"
|
|
4549
|
+
* });
|
|
4550
|
+
*
|
|
4551
|
+
* console.log(result.content); // Model's text response
|
|
4552
|
+
* console.log(result.tool_calls); // Any function calls requested
|
|
4553
|
+
* ```
|
|
4554
|
+
*/
|
|
4555
|
+
async getCompletion(params) {
|
|
4556
|
+
const openai = getZAi();
|
|
4557
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
4558
|
+
this.logger.log("glm4Provider getCompletion", {
|
|
4559
|
+
agentName,
|
|
4560
|
+
mode,
|
|
4561
|
+
clientId,
|
|
4562
|
+
context: this.contextService.context,
|
|
4563
|
+
});
|
|
4564
|
+
// Map raw messages to OpenAI format
|
|
4565
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
4566
|
+
role,
|
|
4567
|
+
tool_call_id,
|
|
4568
|
+
content,
|
|
4569
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
4570
|
+
...rest,
|
|
4571
|
+
function: {
|
|
4572
|
+
name: f.name,
|
|
4573
|
+
arguments: JSON.stringify(f.arguments),
|
|
4574
|
+
},
|
|
4575
|
+
})),
|
|
4576
|
+
}));
|
|
4577
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
|
|
4578
|
+
model: this.contextService.context.model,
|
|
4579
|
+
messages: messages,
|
|
4580
|
+
tools: tools,
|
|
4581
|
+
});
|
|
4582
|
+
const result = {
|
|
4583
|
+
content: content,
|
|
4584
|
+
mode,
|
|
4585
|
+
agentName,
|
|
4586
|
+
role,
|
|
4587
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
4588
|
+
...rest,
|
|
4589
|
+
function: {
|
|
4590
|
+
name: f.name,
|
|
4591
|
+
arguments: JSON.parse(f.arguments),
|
|
4592
|
+
},
|
|
4593
|
+
})),
|
|
4594
|
+
};
|
|
4595
|
+
// Debug logging
|
|
4596
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
4597
|
+
await fs.appendFile("./debug_glm4_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
4598
|
+
}
|
|
4599
|
+
return result;
|
|
4600
|
+
}
|
|
4601
|
+
/**
|
|
4602
|
+
* Executes a streaming GLM-4 completion request with event emission.
|
|
4603
|
+
*
|
|
4604
|
+
* Similar to getCompletion but emits "llm-completion" events during processing
|
|
4605
|
+
* to enable real-time updates. The full response is accumulated and returned
|
|
4606
|
+
* once streaming completes.
|
|
4607
|
+
*
|
|
4608
|
+
* Key operations:
|
|
4609
|
+
* - Maps agent-swarm-kit messages to OpenAI format
|
|
4610
|
+
* - Formats tools for OpenAI API
|
|
4611
|
+
* - Emits events to client for real-time updates
|
|
4612
|
+
* - Handles tool calls with JSON parsing
|
|
4613
|
+
* - Logs operation details for debugging
|
|
4614
|
+
* - Optionally writes debug output to file
|
|
4615
|
+
*
|
|
4616
|
+
* @param params - Completion parameters including messages, tools, and context
|
|
4617
|
+
* @param params.messages - Array of conversation messages
|
|
4618
|
+
* @param params.tools - Optional array of function tools available to the model
|
|
4619
|
+
* @param params.agentName - Name of the requesting agent
|
|
4620
|
+
* @param params.clientId - Client session identifier for event emission
|
|
4621
|
+
* @param params.mode - Completion mode (typically "stream")
|
|
4622
|
+
* @returns Promise resolving to accumulated completion message
|
|
4623
|
+
*
|
|
4624
|
+
* @example
|
|
4625
|
+
* ```typescript
|
|
4626
|
+
* // Listen for streaming events
|
|
4627
|
+
* listen("llm-completion", (event) => {
|
|
4628
|
+
* console.log("Received chunk:", event.content);
|
|
4629
|
+
* });
|
|
4630
|
+
*
|
|
4631
|
+
* const result = await provider.getStreamCompletion({
|
|
4632
|
+
* messages: [
|
|
4633
|
+
* { role: "user", content: "Generate trading signal for ETH" }
|
|
4634
|
+
* ],
|
|
4635
|
+
* tools: [...],
|
|
4636
|
+
* agentName: "signal-agent",
|
|
4637
|
+
* clientId: "client-789",
|
|
4638
|
+
* mode: "stream"
|
|
4639
|
+
* });
|
|
4640
|
+
*
|
|
4641
|
+
* console.log("Final result:", result.content);
|
|
4642
|
+
* ```
|
|
4643
|
+
*/
|
|
4644
|
+
async getStreamCompletion(params) {
|
|
4645
|
+
const openai = getZAi();
|
|
4646
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
4647
|
+
this.logger.log("glm4Provider getStreamCompletion", {
|
|
4648
|
+
agentName,
|
|
4649
|
+
mode,
|
|
4650
|
+
clientId,
|
|
4651
|
+
context: this.contextService.context,
|
|
4652
|
+
});
|
|
4653
|
+
// Map raw messages to OpenAI format
|
|
4654
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
4655
|
+
role,
|
|
4656
|
+
tool_call_id,
|
|
4657
|
+
content,
|
|
4658
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
4659
|
+
...rest,
|
|
4660
|
+
function: {
|
|
4661
|
+
name: f.name,
|
|
4662
|
+
arguments: JSON.stringify(f.arguments),
|
|
4663
|
+
},
|
|
4664
|
+
})),
|
|
4665
|
+
}));
|
|
4666
|
+
// Map tools to OpenAI format
|
|
4667
|
+
const formattedTools = tools?.map(({ type, function: f }) => ({
|
|
4668
|
+
type: type,
|
|
4669
|
+
function: {
|
|
4670
|
+
name: f.name,
|
|
4671
|
+
parameters: f.parameters,
|
|
4672
|
+
},
|
|
4673
|
+
}));
|
|
4674
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
|
|
4675
|
+
model: this.contextService.context.model,
|
|
4676
|
+
messages: messages,
|
|
4677
|
+
tools: formattedTools,
|
|
4678
|
+
});
|
|
4679
|
+
// Emit events to mimic streaming behavior
|
|
4680
|
+
if (content) {
|
|
4681
|
+
await agentSwarmKit.event(clientId, "llm-completion", {
|
|
4682
|
+
content: content.trim(),
|
|
4683
|
+
agentName,
|
|
4684
|
+
});
|
|
4685
|
+
}
|
|
4686
|
+
const result = {
|
|
4687
|
+
content: content || "",
|
|
4688
|
+
mode,
|
|
4689
|
+
agentName,
|
|
4690
|
+
role,
|
|
4691
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
4692
|
+
...rest,
|
|
4693
|
+
function: {
|
|
4694
|
+
name: f.name,
|
|
4695
|
+
arguments: JSON.parse(f.arguments),
|
|
4696
|
+
},
|
|
4697
|
+
})),
|
|
4698
|
+
};
|
|
4699
|
+
// Debug logging
|
|
4700
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
4701
|
+
await fs.appendFile("./debug_glm4_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
4702
|
+
}
|
|
4703
|
+
return result;
|
|
4704
|
+
}
|
|
4705
|
+
/**
|
|
4706
|
+
* Executes a structured outline completion with JSON schema validation.
|
|
4707
|
+
*
|
|
4708
|
+
* Generates a structured JSON response from GLM-4 that conforms to a provided schema.
|
|
4709
|
+
* Uses OpenAI's response_format parameter to enforce JSON structure.
|
|
4710
|
+
* The response is automatically repaired using jsonrepair if needed.
|
|
4711
|
+
*
|
|
4712
|
+
* Key operations:
|
|
4713
|
+
* - Maps agent-swarm-kit messages to OpenAI format
|
|
4714
|
+
* - Configures JSON schema response format
|
|
4715
|
+
* - Sends request to GLM-4 model
|
|
4716
|
+
* - Validates and repairs JSON response
|
|
4717
|
+
* - Handles refusal messages
|
|
4718
|
+
* - Logs operation details for debugging
|
|
4719
|
+
* - Optionally writes debug output to file
|
|
4720
|
+
*
|
|
4721
|
+
* @param params - Outline completion parameters
|
|
4722
|
+
* @param params.messages - Array of conversation messages
|
|
4723
|
+
* @param params.format - JSON schema format definition or response_format object
|
|
4724
|
+
* @returns Promise resolving to structured JSON message
|
|
4725
|
+
* @throws Error if model refuses to generate response
|
|
4726
|
+
*
|
|
4727
|
+
* @example
|
|
4728
|
+
* ```typescript
|
|
4729
|
+
* const signal = await provider.getOutlineCompletion({
|
|
4730
|
+
* messages: [
|
|
4731
|
+
* { role: "system", content: "Generate trading signals" },
|
|
4732
|
+
* { role: "user", content: "Analyze BTC/USDT" }
|
|
4733
|
+
* ],
|
|
4734
|
+
* format: {
|
|
4735
|
+
* type: "object",
|
|
4736
|
+
* properties: {
|
|
4737
|
+
* position: { type: "string", enum: ["long", "short", "wait"] },
|
|
4738
|
+
* price_open: { type: "number" },
|
|
4739
|
+
* price_stop_loss: { type: "number" },
|
|
4740
|
+
* price_take_profit: { type: "number" }
|
|
4741
|
+
* },
|
|
4742
|
+
* required: ["position", "price_open", "price_stop_loss", "price_take_profit"]
|
|
4743
|
+
* }
|
|
4744
|
+
* });
|
|
4745
|
+
*
|
|
4746
|
+
* const data = JSON.parse(signal.content);
|
|
4747
|
+
* console.log(`Position: ${data.position}`);
|
|
4748
|
+
* console.log(`Entry: ${data.price_open}`);
|
|
4749
|
+
* ```
|
|
4750
|
+
*/
|
|
4751
|
+
async getOutlineCompletion(params) {
|
|
4752
|
+
const { messages: rawMessages, format } = params;
|
|
4753
|
+
const openai = getZAi();
|
|
4754
|
+
this.logger.log("glm4Provider getOutlineCompletion", {
|
|
4755
|
+
context: this.contextService.context,
|
|
4756
|
+
});
|
|
4757
|
+
// Map raw messages to OpenAI format
|
|
4758
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
4759
|
+
role,
|
|
4760
|
+
tool_call_id,
|
|
4761
|
+
content,
|
|
4762
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
4763
|
+
...rest,
|
|
4764
|
+
function: {
|
|
4765
|
+
name: f.name,
|
|
4766
|
+
arguments: JSON.stringify(f.arguments),
|
|
4767
|
+
},
|
|
4768
|
+
})),
|
|
4769
|
+
}));
|
|
4770
|
+
// Extract response format
|
|
4771
|
+
const response_format = "json_schema" in format
|
|
4772
|
+
? format
|
|
4773
|
+
: { type: "json_schema", json_schema: { schema: format } };
|
|
4774
|
+
const completion = await openai.chat.completions.create({
|
|
4775
|
+
messages: messages,
|
|
4776
|
+
model: this.contextService.context.model,
|
|
4777
|
+
response_format: response_format,
|
|
4778
|
+
});
|
|
4779
|
+
const choice = completion.choices[0];
|
|
4780
|
+
if (choice.message.refusal) {
|
|
4781
|
+
throw new Error(choice.message.refusal);
|
|
4782
|
+
}
|
|
4783
|
+
const json = jsonrepair.jsonrepair(choice.message.content || "");
|
|
4784
|
+
const result = {
|
|
4785
|
+
role: "assistant",
|
|
4786
|
+
content: json,
|
|
4787
|
+
};
|
|
4788
|
+
// Debug logging
|
|
4789
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
4790
|
+
await fs.appendFile("./debug_glm4_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
4791
|
+
}
|
|
4792
|
+
return result;
|
|
4793
|
+
}
|
|
4794
|
+
}
|
|
4795
|
+
|
|
4796
|
+
/**
|
|
4797
|
+
* Main library entry point for the Ollama package.
|
|
4798
|
+
*
|
|
4799
|
+
* Initializes the dependency injection container, registers all AI providers,
|
|
4800
|
+
* and exports the engine object containing all services.
|
|
4801
|
+
*
|
|
4802
|
+
* The engine provides access to:
|
|
4803
|
+
* - Common services (logger)
|
|
4804
|
+
* - Base services (context)
|
|
4805
|
+
* - Private services (runner and outline private services)
|
|
4806
|
+
* - Public services (runner and outline public services)
|
|
4807
|
+
*
|
|
4808
|
+
* Registered AI providers:
|
|
4809
|
+
* - Ollama (local and cloud)
|
|
4810
|
+
* - OpenAI (GPT-5)
|
|
4811
|
+
* - Claude (Anthropic)
|
|
4812
|
+
* - Deepseek
|
|
4813
|
+
* - Mistral
|
|
4814
|
+
* - Perplexity
|
|
4815
|
+
* - Cohere
|
|
4816
|
+
* - Grok (xAI)
|
|
4817
|
+
* - Alibaba
|
|
4818
|
+
* - Hugging Face
|
|
4819
|
+
*
|
|
4820
|
+
* @example
|
|
4821
|
+
* ```typescript
|
|
4822
|
+
* import { engine } from "./lib";
|
|
4823
|
+
*
|
|
4824
|
+
* // Access logger
|
|
4825
|
+
* engine.loggerService.info("Application started");
|
|
4826
|
+
*
|
|
4827
|
+
* // Use public service for AI completion
|
|
4828
|
+
* const result = await engine.runnerPublicService.getCompletion(
|
|
4829
|
+
* { messages: [...] },
|
|
4830
|
+
* { inference: "claude", model: "claude-3-5-sonnet", apiKey: "..." }
|
|
4831
|
+
* );
|
|
4832
|
+
* ```
|
|
4833
|
+
*/
|
|
4834
|
+
/**
|
|
4835
|
+
* Common service instances.
|
|
4836
|
+
*/
|
|
2692
4837
|
const commonServices = {
|
|
2693
4838
|
loggerService: inject(TYPES.loggerService),
|
|
2694
4839
|
};
|
|
4840
|
+
/**
|
|
4841
|
+
* Base service instances.
|
|
4842
|
+
*/
|
|
2695
4843
|
const baseServices = {
|
|
2696
4844
|
contextService: inject(TYPES.contextService),
|
|
2697
4845
|
};
|
|
4846
|
+
/**
|
|
4847
|
+
* Private service instances.
|
|
4848
|
+
*/
|
|
2698
4849
|
const privateServices = {
|
|
2699
4850
|
runnerPrivateService: inject(TYPES.runnerPrivateService),
|
|
2700
4851
|
outlinePrivateService: inject(TYPES.outlinePrivateService),
|
|
2701
4852
|
};
|
|
4853
|
+
/**
|
|
4854
|
+
* Public service instances.
|
|
4855
|
+
*/
|
|
2702
4856
|
const publicServices = {
|
|
2703
4857
|
runnerPublicService: inject(TYPES.runnerPublicService),
|
|
2704
4858
|
outlinePublicService: inject(TYPES.outlinePublicService),
|
|
2705
4859
|
};
|
|
4860
|
+
/**
|
|
4861
|
+
* Main engine object containing all services.
|
|
4862
|
+
* Provides unified access to the entire service layer.
|
|
4863
|
+
*/
|
|
2706
4864
|
const engine = {
|
|
2707
4865
|
...commonServices,
|
|
2708
4866
|
...baseServices,
|
|
2709
4867
|
...privateServices,
|
|
2710
4868
|
...publicServices,
|
|
2711
4869
|
};
|
|
4870
|
+
// Initialize DI container
|
|
2712
4871
|
init();
|
|
4872
|
+
/**
|
|
4873
|
+
* Register all AI provider implementations.
|
|
4874
|
+
*/
|
|
2713
4875
|
{
|
|
2714
4876
|
engine.runnerPrivateService.registerRunner(InferenceName.OllamaInference, OllamaProvider);
|
|
2715
|
-
engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider
|
|
4877
|
+
engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider);
|
|
2716
4878
|
engine.runnerPrivateService.registerRunner(InferenceName.HfInference, HfProvider);
|
|
2717
|
-
engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference,
|
|
4879
|
+
engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference, ClaudeProvider);
|
|
2718
4880
|
engine.runnerPrivateService.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
|
|
2719
4881
|
engine.runnerPrivateService.registerRunner(InferenceName.DeepseekInference, DeepseekProvider);
|
|
2720
4882
|
engine.runnerPrivateService.registerRunner(InferenceName.MistralInference, MistralProvider);
|
|
2721
4883
|
engine.runnerPrivateService.registerRunner(InferenceName.PerplexityInference, PerplexityProvider);
|
|
2722
4884
|
engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
|
|
2723
4885
|
engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
|
|
4886
|
+
engine.runnerPrivateService.registerRunner(InferenceName.GLM4Inference, GLM4Provider);
|
|
2724
4887
|
}
|
|
4888
|
+
// Make engine globally accessible for debugging
|
|
2725
4889
|
Object.assign(globalThis, { engine });
|
|
2726
4890
|
var lib = engine;
|
|
2727
4891
|
|
|
4892
|
+
/**
|
|
4893
|
+
* Outline runner completion handler registration.
|
|
4894
|
+
*
|
|
4895
|
+
* Registers a structured outline completion handler with agent-swarm-kit.
|
|
4896
|
+
* This completion type enforces JSON schema validation on AI responses,
|
|
4897
|
+
* ensuring they conform to a predefined structure. Essential for extracting
|
|
4898
|
+
* structured data from AI responses (e.g., trading signals with specific fields).
|
|
4899
|
+
*
|
|
4900
|
+
* Key features:
|
|
4901
|
+
* - JSON schema validation enabled (json: true)
|
|
4902
|
+
* - Structured output enforcement
|
|
4903
|
+
* - Type-safe response parsing
|
|
4904
|
+
* - Automatic validation with retry on failure
|
|
4905
|
+
* - Delegates to RunnerPrivateService
|
|
4906
|
+
*
|
|
4907
|
+
* @example
|
|
4908
|
+
* ```typescript
|
|
4909
|
+
* import { completion } from "agent-swarm-kit";
|
|
4910
|
+
* import { CompletionName } from "./enum/CompletionName";
|
|
4911
|
+
*
|
|
4912
|
+
* const result = await completion(CompletionName.RunnerOutlineCompletion, {
|
|
4913
|
+
* messages: [
|
|
4914
|
+
* { role: "user", content: "Decide trading position" }
|
|
4915
|
+
* ]
|
|
4916
|
+
* });
|
|
4917
|
+
* // Returns structured data validated against schema
|
|
4918
|
+
* ```
|
|
4919
|
+
*/
|
|
2728
4920
|
agentSwarmKit.addCompletion({
|
|
2729
4921
|
completionName: CompletionName.RunnerOutlineCompletion,
|
|
2730
4922
|
getCompletion: async (params) => {
|
|
@@ -2733,6 +4925,33 @@ agentSwarmKit.addCompletion({
|
|
|
2733
4925
|
json: true,
|
|
2734
4926
|
});
|
|
2735
4927
|
|
|
4928
|
+
/**
|
|
4929
|
+
* Streaming runner completion handler registration.
|
|
4930
|
+
*
|
|
4931
|
+
* Registers a streaming AI completion handler with agent-swarm-kit.
|
|
4932
|
+
* This completion type enables real-time token streaming from AI providers
|
|
4933
|
+
* that support it (OpenAI, Claude, etc.), with automatic accumulation into
|
|
4934
|
+
* a complete response.
|
|
4935
|
+
*
|
|
4936
|
+
* Key features:
|
|
4937
|
+
* - Streaming completion mode for real-time responses
|
|
4938
|
+
* - Automatic response accumulation
|
|
4939
|
+
* - Delegates to RunnerPrivateService
|
|
4940
|
+
* - Supports streaming-capable AI providers
|
|
4941
|
+
*
|
|
4942
|
+
* @example
|
|
4943
|
+
* ```typescript
|
|
4944
|
+
* import { completion } from "agent-swarm-kit";
|
|
4945
|
+
* import { CompletionName } from "./enum/CompletionName";
|
|
4946
|
+
*
|
|
4947
|
+
* const result = await completion(CompletionName.RunnerStreamCompletion, {
|
|
4948
|
+
* messages: [
|
|
4949
|
+
* { role: "user", content: "Generate trading analysis" }
|
|
4950
|
+
* ]
|
|
4951
|
+
* });
|
|
4952
|
+
* // Response is accumulated from stream
|
|
4953
|
+
* ```
|
|
4954
|
+
*/
|
|
2736
4955
|
agentSwarmKit.addCompletion({
|
|
2737
4956
|
completionName: CompletionName.RunnerStreamCompletion,
|
|
2738
4957
|
getCompletion: async (params) => {
|
|
@@ -2740,6 +4959,32 @@ agentSwarmKit.addCompletion({
|
|
|
2740
4959
|
},
|
|
2741
4960
|
});
|
|
2742
4961
|
|
|
4962
|
+
/**
|
|
4963
|
+
* Standard runner completion handler registration.
|
|
4964
|
+
*
|
|
4965
|
+
* Registers a non-streaming AI completion handler with agent-swarm-kit.
|
|
4966
|
+
* This completion type is used for standard request-response AI interactions
|
|
4967
|
+
* where the full response is returned at once.
|
|
4968
|
+
*
|
|
4969
|
+
* Key features:
|
|
4970
|
+
* - Standard (non-streaming) completion mode
|
|
4971
|
+
* - Delegates to RunnerPrivateService
|
|
4972
|
+
* - Supports all registered AI providers
|
|
4973
|
+
* - Context-aware provider selection
|
|
4974
|
+
*
|
|
4975
|
+
* @example
|
|
4976
|
+
* ```typescript
|
|
4977
|
+
* import { completion } from "agent-swarm-kit";
|
|
4978
|
+
* import { CompletionName } from "./enum/CompletionName";
|
|
4979
|
+
*
|
|
4980
|
+
* const result = await completion(CompletionName.RunnerCompletion, {
|
|
4981
|
+
* messages: [
|
|
4982
|
+
* { role: "system", content: "You are a trading assistant" },
|
|
4983
|
+
* { role: "user", content: "Analyze BTC/USDT" }
|
|
4984
|
+
* ]
|
|
4985
|
+
* });
|
|
4986
|
+
* ```
|
|
4987
|
+
*/
|
|
2743
4988
|
agentSwarmKit.addCompletion({
|
|
2744
4989
|
completionName: CompletionName.RunnerCompletion,
|
|
2745
4990
|
getCompletion: async (params) => {
|
|
@@ -2747,6 +4992,35 @@ agentSwarmKit.addCompletion({
|
|
|
2747
4992
|
},
|
|
2748
4993
|
});
|
|
2749
4994
|
|
|
4995
|
+
/**
|
|
4996
|
+
* Zod schema for trading signal structured output.
|
|
4997
|
+
*
|
|
4998
|
+
* Defines the JSON schema used for LLM-generated trading signals with
|
|
4999
|
+
* comprehensive field descriptions and validation rules. Used with outline
|
|
5000
|
+
* completion to enforce structured output from language models.
|
|
5001
|
+
*
|
|
5002
|
+
* Fields:
|
|
5003
|
+
* - position: Trading direction (long/short/wait)
|
|
5004
|
+
* - price_open: Entry price in USD
|
|
5005
|
+
* - price_stop_loss: Stop-loss price in USD
|
|
5006
|
+
* - price_take_profit: Take-profit price in USD
|
|
5007
|
+
* - minute_estimated_time: Estimated hold duration in minutes
|
|
5008
|
+
* - risk_note: Detailed risk assessment with specific metrics
|
|
5009
|
+
*
|
|
5010
|
+
* @example
|
|
5011
|
+
* ```typescript
|
|
5012
|
+
* import { SignalSchema } from './schema/Signal.schema';
|
|
5013
|
+
*
|
|
5014
|
+
* const signal = SignalSchema.parse({
|
|
5015
|
+
* position: 'long',
|
|
5016
|
+
* price_open: 50000,
|
|
5017
|
+
* price_stop_loss: 49000,
|
|
5018
|
+
* price_take_profit: 52000,
|
|
5019
|
+
* minute_estimated_time: 120,
|
|
5020
|
+
* risk_note: 'RSI oversold at 32%, volume spike +45%'
|
|
5021
|
+
* });
|
|
5022
|
+
* ```
|
|
5023
|
+
*/
|
|
2750
5024
|
const SignalSchema = zod.z.object({
|
|
2751
5025
|
position: zod.z
|
|
2752
5026
|
.enum(["long", "short", "wait"])
|
|
@@ -2768,6 +5042,46 @@ const SignalSchema = zod.z.object({
|
|
|
2768
5042
|
.describe(functoolsKit.str.newline("Description of current market situation risks:", "", "Analyze and specify applicable risks:", "1. Whale manipulations (volume spikes, long shadows, pin bars, candle engulfing, false breakouts)", "2. Order book (order book walls, spoofing, bid/ask imbalance, low liquidity)", "3. P&L history (recurring mistakes on similar patterns)", "4. Time factors (trading session, low liquidity, upcoming events)", "5. Correlations (overall market trend, conflicting trends across timeframes)", "6. Technical risks (indicator divergences, weak volumes, critical levels)", "7. Gaps and anomalies (price gaps, unfilled gaps, movements without volume)", "", "Provide SPECIFIC numbers, percentages and probabilities.")),
|
|
2769
5043
|
});
|
|
2770
5044
|
|
|
5045
|
+
/**
|
|
5046
|
+
* Trading signal outline schema registration.
|
|
5047
|
+
*
|
|
5048
|
+
* Registers a structured outline for trading signal generation with comprehensive
|
|
5049
|
+
* validation rules. This outline enforces a strict schema for AI-generated trading
|
|
5050
|
+
* signals, ensuring all required fields are present and correctly formatted.
|
|
5051
|
+
*
|
|
5052
|
+
* Schema fields:
|
|
5053
|
+
* - position: Trading direction ("long", "short", or "wait")
|
|
5054
|
+
* - price_open: Entry price for the position
|
|
5055
|
+
* - price_stop_loss: Stop-loss price level
|
|
5056
|
+
* - price_take_profit: Take-profit price level
|
|
5057
|
+
* - minute_estimated_time: Estimated time to reach TP (in minutes)
|
|
5058
|
+
* - risk_note: Risk assessment and reasoning (markdown format)
|
|
5059
|
+
*
|
|
5060
|
+
* Validation rules:
|
|
5061
|
+
* 1. All required fields must be present
|
|
5062
|
+
* 2. Prices must be positive numbers
|
|
5063
|
+
* 3. For LONG: SL < entry < TP
|
|
5064
|
+
* 4. For SHORT: TP < entry < SL
|
|
5065
|
+
* 5. Estimated time must be <= 360 minutes (6 hours)
|
|
5066
|
+
* 6. Wait position skips price validations
|
|
5067
|
+
*
|
|
5068
|
+
* @example
|
|
5069
|
+
* ```typescript
|
|
5070
|
+
* import { json } from "agent-swarm-kit";
|
|
5071
|
+
* import { OutlineName } from "./enum/OutlineName";
|
|
5072
|
+
*
|
|
5073
|
+
* const { data } = await json(OutlineName.SignalOutline, [
|
|
5074
|
+
* { role: "user", content: "Analyze BTC/USDT and decide position" }
|
|
5075
|
+
* ]);
|
|
5076
|
+
*
|
|
5077
|
+
* if (data.position !== "wait") {
|
|
5078
|
+
* console.log(`Position: ${data.position}`);
|
|
5079
|
+
* console.log(`Entry: ${data.price_open}`);
|
|
5080
|
+
* console.log(`SL: ${data.price_stop_loss}`);
|
|
5081
|
+
* console.log(`TP: ${data.price_take_profit}`);
|
|
5082
|
+
* }
|
|
5083
|
+
* ```
|
|
5084
|
+
*/
|
|
2771
5085
|
agentSwarmKit.addOutline({
|
|
2772
5086
|
outlineName: OutlineName.SignalOutline,
|
|
2773
5087
|
completion: CompletionName.RunnerOutlineCompletion,
|
|
@@ -2864,42 +5178,279 @@ agentSwarmKit.addOutline({
|
|
|
2864
5178
|
],
|
|
2865
5179
|
});
|
|
2866
5180
|
|
|
5181
|
+
/**
|
|
5182
|
+
* Bootstrap module for agent-swarm-kit validation.
|
|
5183
|
+
*
|
|
5184
|
+
* Validates that all completion and outline names are properly registered
|
|
5185
|
+
* with agent-swarm-kit before the application starts. This ensures that
|
|
5186
|
+
* all referenced completions and outlines exist and are correctly configured.
|
|
5187
|
+
*
|
|
5188
|
+
* Validation checks:
|
|
5189
|
+
* - All CompletionName enum values have corresponding registered handlers
|
|
5190
|
+
* - All OutlineName enum values have corresponding registered schemas
|
|
5191
|
+
* - No duplicate registrations exist
|
|
5192
|
+
*
|
|
5193
|
+
* This file is imported by index.ts to run validation at startup.
|
|
5194
|
+
*
|
|
5195
|
+
* @throws Error if validation fails (missing or duplicate registrations)
|
|
5196
|
+
*/
|
|
2867
5197
|
agentSwarmKit.validate({
|
|
2868
5198
|
CompletionName: CompletionName$1,
|
|
2869
5199
|
OutlineName: OutlineName$1,
|
|
2870
5200
|
});
|
|
2871
5201
|
|
|
5202
|
+
/**
|
|
5203
|
+
* Generate structured trading signal from Ollama models.
|
|
5204
|
+
*
|
|
5205
|
+
* Supports token rotation by passing multiple API keys. Automatically enforces
|
|
5206
|
+
* the signal JSON schema defined in Signal.schema.ts.
|
|
5207
|
+
*
|
|
5208
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5209
|
+
* @param model - Ollama model name (e.g., "llama3.3:70b")
|
|
5210
|
+
* @param apiKey - Single API key or array of keys for rotation
|
|
5211
|
+
* @returns Promise resolving to structured trading signal
|
|
5212
|
+
*
|
|
5213
|
+
* @example
|
|
5214
|
+
* ```typescript
|
|
5215
|
+
* import { ollama } from '@backtest-kit/ollama';
|
|
5216
|
+
*
|
|
5217
|
+
* const signal = await ollama(messages, 'llama3.3:70b', ['key1', 'key2']);
|
|
5218
|
+
* console.log(signal.position); // "long" | "short" | "wait"
|
|
5219
|
+
* ```
|
|
5220
|
+
*/
|
|
2872
5221
|
const ollama = async (messages, model, apiKey) => {
|
|
2873
5222
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.OllamaInference, model, apiKey);
|
|
2874
5223
|
};
|
|
5224
|
+
/**
|
|
5225
|
+
* Generate structured trading signal from Grok models.
|
|
5226
|
+
*
|
|
5227
|
+
* Uses xAI Grok models through direct API access. Does NOT support token rotation.
|
|
5228
|
+
*
|
|
5229
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5230
|
+
* @param model - Grok model name (e.g., "grok-beta")
|
|
5231
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5232
|
+
* @returns Promise resolving to structured trading signal
|
|
5233
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5234
|
+
*
|
|
5235
|
+
* @example
|
|
5236
|
+
* ```typescript
|
|
5237
|
+
* import { grok } from '@backtest-kit/ollama';
|
|
5238
|
+
*
|
|
5239
|
+
* const signal = await grok(messages, 'grok-beta', process.env.GROK_API_KEY);
|
|
5240
|
+
* ```
|
|
5241
|
+
*/
|
|
2875
5242
|
const grok = async (messages, model, apiKey) => {
|
|
2876
5243
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GrokInference, model, apiKey);
|
|
2877
5244
|
};
|
|
5245
|
+
/**
|
|
5246
|
+
* Generate structured trading signal from Hugging Face models.
|
|
5247
|
+
*
|
|
5248
|
+
* Uses HuggingFace Router API for model access. Does NOT support token rotation.
|
|
5249
|
+
*
|
|
5250
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5251
|
+
* @param model - HuggingFace model name
|
|
5252
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5253
|
+
* @returns Promise resolving to structured trading signal
|
|
5254
|
+
*
|
|
5255
|
+
* @example
|
|
5256
|
+
* ```typescript
|
|
5257
|
+
* import { hf } from '@backtest-kit/ollama';
|
|
5258
|
+
*
|
|
5259
|
+
* const signal = await hf(messages, 'meta-llama/Llama-3-70b', process.env.HF_API_KEY);
|
|
5260
|
+
* ```
|
|
5261
|
+
*/
|
|
2878
5262
|
const hf = async (messages, model, apiKey) => {
|
|
2879
5263
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.HfInference, model, apiKey);
|
|
2880
5264
|
};
|
|
5265
|
+
/**
|
|
5266
|
+
* Generate structured trading signal from Claude models.
|
|
5267
|
+
*
|
|
5268
|
+
* Uses Anthropic Claude through OpenAI-compatible API. Does NOT support token rotation.
|
|
5269
|
+
*
|
|
5270
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5271
|
+
* @param model - Claude model name (e.g., "claude-3-5-sonnet-20241022")
|
|
5272
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5273
|
+
* @returns Promise resolving to structured trading signal
|
|
5274
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5275
|
+
*
|
|
5276
|
+
* @example
|
|
5277
|
+
* ```typescript
|
|
5278
|
+
* import { claude } from '@backtest-kit/ollama';
|
|
5279
|
+
*
|
|
5280
|
+
* const signal = await claude(messages, 'claude-3-5-sonnet-20241022', process.env.ANTHROPIC_API_KEY);
|
|
5281
|
+
* ```
|
|
5282
|
+
*/
|
|
2881
5283
|
const claude = async (messages, model, apiKey) => {
|
|
2882
5284
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.ClaudeInference, model, apiKey);
|
|
2883
5285
|
};
|
|
5286
|
+
/**
|
|
5287
|
+
* Generate structured trading signal from OpenAI GPT models.
|
|
5288
|
+
*
|
|
5289
|
+
* Uses official OpenAI SDK with JSON schema enforcement. Does NOT support token rotation.
|
|
5290
|
+
*
|
|
5291
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5292
|
+
* @param model - OpenAI model name (e.g., "gpt-4o", "gpt-4-turbo")
|
|
5293
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5294
|
+
* @returns Promise resolving to structured trading signal
|
|
5295
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5296
|
+
*
|
|
5297
|
+
* @example
|
|
5298
|
+
* ```typescript
|
|
5299
|
+
* import { gpt5 } from '@backtest-kit/ollama';
|
|
5300
|
+
*
|
|
5301
|
+
* const signal = await gpt5(messages, 'gpt-4o', process.env.OPENAI_API_KEY);
|
|
5302
|
+
* ```
|
|
5303
|
+
*/
|
|
2884
5304
|
const gpt5 = async (messages, model, apiKey) => {
|
|
2885
5305
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GPT5Inference, model, apiKey);
|
|
2886
5306
|
};
|
|
5307
|
+
/**
|
|
5308
|
+
* Generate structured trading signal from DeepSeek models.
|
|
5309
|
+
*
|
|
5310
|
+
* Uses DeepSeek AI through OpenAI-compatible API. Does NOT support token rotation.
|
|
5311
|
+
*
|
|
5312
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5313
|
+
* @param model - DeepSeek model name (e.g., "deepseek-chat")
|
|
5314
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5315
|
+
* @returns Promise resolving to structured trading signal
|
|
5316
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5317
|
+
*
|
|
5318
|
+
* @example
|
|
5319
|
+
* ```typescript
|
|
5320
|
+
* import { deepseek } from '@backtest-kit/ollama';
|
|
5321
|
+
*
|
|
5322
|
+
* const signal = await deepseek(messages, 'deepseek-chat', process.env.DEEPSEEK_API_KEY);
|
|
5323
|
+
* ```
|
|
5324
|
+
*/
|
|
2887
5325
|
const deepseek = async (messages, model, apiKey) => {
|
|
2888
5326
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.DeepseekInference, model, apiKey);
|
|
2889
5327
|
};
|
|
5328
|
+
/**
|
|
5329
|
+
* Generate structured trading signal from Mistral AI models.
|
|
5330
|
+
*
|
|
5331
|
+
* Uses Mistral AI through OpenAI-compatible API. Does NOT support token rotation.
|
|
5332
|
+
*
|
|
5333
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5334
|
+
* @param model - Mistral model name (e.g., "mistral-large-latest")
|
|
5335
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5336
|
+
* @returns Promise resolving to structured trading signal
|
|
5337
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5338
|
+
*
|
|
5339
|
+
* @example
|
|
5340
|
+
* ```typescript
|
|
5341
|
+
* import { mistral } from '@backtest-kit/ollama';
|
|
5342
|
+
*
|
|
5343
|
+
* const signal = await mistral(messages, 'mistral-large-latest', process.env.MISTRAL_API_KEY);
|
|
5344
|
+
* ```
|
|
5345
|
+
*/
|
|
2890
5346
|
const mistral = async (messages, model, apiKey) => {
|
|
2891
5347
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.MistralInference, model, apiKey);
|
|
2892
5348
|
};
|
|
5349
|
+
/**
|
|
5350
|
+
* Generate structured trading signal from Perplexity AI models.
|
|
5351
|
+
*
|
|
5352
|
+
* Uses Perplexity AI through OpenAI-compatible API. Does NOT support token rotation.
|
|
5353
|
+
*
|
|
5354
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5355
|
+
* @param model - Perplexity model name (e.g., "llama-3.1-sonar-huge-128k-online")
|
|
5356
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5357
|
+
* @returns Promise resolving to structured trading signal
|
|
5358
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5359
|
+
*
|
|
5360
|
+
* @example
|
|
5361
|
+
* ```typescript
|
|
5362
|
+
* import { perplexity } from '@backtest-kit/ollama';
|
|
5363
|
+
*
|
|
5364
|
+
* const signal = await perplexity(messages, 'llama-3.1-sonar-huge-128k-online', process.env.PERPLEXITY_API_KEY);
|
|
5365
|
+
* ```
|
|
5366
|
+
*/
|
|
2893
5367
|
const perplexity = async (messages, model, apiKey) => {
|
|
2894
5368
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.PerplexityInference, model, apiKey);
|
|
2895
5369
|
};
|
|
5370
|
+
/**
|
|
5371
|
+
* Generate structured trading signal from Cohere models.
|
|
5372
|
+
*
|
|
5373
|
+
* Uses Cohere AI through OpenAI-compatible API. Does NOT support token rotation.
|
|
5374
|
+
*
|
|
5375
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5376
|
+
* @param model - Cohere model name (e.g., "command-r-plus")
|
|
5377
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5378
|
+
* @returns Promise resolving to structured trading signal
|
|
5379
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5380
|
+
*
|
|
5381
|
+
* @example
|
|
5382
|
+
* ```typescript
|
|
5383
|
+
* import { cohere } from '@backtest-kit/ollama';
|
|
5384
|
+
*
|
|
5385
|
+
* const signal = await cohere(messages, 'command-r-plus', process.env.COHERE_API_KEY);
|
|
5386
|
+
* ```
|
|
5387
|
+
*/
|
|
2896
5388
|
const cohere = async (messages, model, apiKey) => {
|
|
2897
5389
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.CohereInference, model, apiKey);
|
|
2898
5390
|
};
|
|
5391
|
+
/**
|
|
5392
|
+
* Generate structured trading signal from Alibaba Cloud Qwen models.
|
|
5393
|
+
*
|
|
5394
|
+
* Uses Alibaba DashScope API through direct HTTP requests. Does NOT support token rotation.
|
|
5395
|
+
*
|
|
5396
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5397
|
+
* @param model - Qwen model name (e.g., "qwen-max")
|
|
5398
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5399
|
+
* @returns Promise resolving to structured trading signal
|
|
5400
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5401
|
+
*
|
|
5402
|
+
* @example
|
|
5403
|
+
* ```typescript
|
|
5404
|
+
* import { alibaba } from '@backtest-kit/ollama';
|
|
5405
|
+
*
|
|
5406
|
+
* const signal = await alibaba(messages, 'qwen-max', process.env.ALIBABA_API_KEY);
|
|
5407
|
+
* ```
|
|
5408
|
+
*/
|
|
2899
5409
|
const alibaba = async (messages, model, apiKey) => {
|
|
2900
5410
|
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.AlibabaInference, model, apiKey);
|
|
2901
5411
|
};
|
|
5412
|
+
/**
|
|
5413
|
+
* Generate structured trading signal from Zhipu AI GLM-4 models.
|
|
5414
|
+
*
|
|
5415
|
+
* Uses Zhipu AI's GLM-4 through OpenAI-compatible Z.ai API. Does NOT support token rotation.
|
|
5416
|
+
* GLM-4 is a powerful Chinese language model with strong reasoning capabilities.
|
|
5417
|
+
*
|
|
5418
|
+
* @param messages - Array of outline messages (user/assistant/system)
|
|
5419
|
+
* @param model - GLM-4 model name (e.g., "glm-4-plus", "glm-4-air")
|
|
5420
|
+
* @param apiKey - Single API key (token rotation not supported)
|
|
5421
|
+
* @returns Promise resolving to structured trading signal
|
|
5422
|
+
* @throws Error if apiKey is an array (token rotation not supported)
|
|
5423
|
+
*
|
|
5424
|
+
* @example
|
|
5425
|
+
* ```typescript
|
|
5426
|
+
* import { glm4 } from '@backtest-kit/ollama';
|
|
5427
|
+
*
|
|
5428
|
+
* const signal = await glm4(messages, 'glm-4-plus', process.env.ZAI_API_KEY);
|
|
5429
|
+
* console.log(`Position: ${signal.position}`);
|
|
5430
|
+
* console.log(`Entry: ${signal.priceOpen}`);
|
|
5431
|
+
* ```
|
|
5432
|
+
*/
|
|
5433
|
+
const glm4 = async (messages, model, apiKey) => {
|
|
5434
|
+
return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GLM4Inference, model, apiKey);
|
|
5435
|
+
};
|
|
2902
5436
|
|
|
5437
|
+
/**
|
|
5438
|
+
* Sets custom logger implementation for the framework.
|
|
5439
|
+
*
|
|
5440
|
+
* All log messages from internal services will be forwarded to the provided logger
|
|
5441
|
+
* with automatic context injection.
|
|
5442
|
+
*
|
|
5443
|
+
* @param logger - Custom logger implementing ILogger interface
|
|
5444
|
+
*
|
|
5445
|
+
* @example
|
|
5446
|
+
* ```typescript
|
|
5447
|
+
* setLogger({
|
|
5448
|
+
* log: (topic, ...args) => console.log(topic, args),
|
|
5449
|
+
* debug: (topic, ...args) => console.debug(topic, args),
|
|
5450
|
+
* info: (topic, ...args) => console.info(topic, args),
|
|
5451
|
+
* });
|
|
5452
|
+
* ```
|
|
5453
|
+
*/
|
|
2903
5454
|
const setLogger = (logger) => {
|
|
2904
5455
|
lib.loggerService.setLogger(logger);
|
|
2905
5456
|
};
|
|
@@ -2908,6 +5459,7 @@ exports.alibaba = alibaba;
|
|
|
2908
5459
|
exports.claude = claude;
|
|
2909
5460
|
exports.cohere = cohere;
|
|
2910
5461
|
exports.deepseek = deepseek;
|
|
5462
|
+
exports.glm4 = glm4;
|
|
2911
5463
|
exports.gpt5 = gpt5;
|
|
2912
5464
|
exports.grok = grok;
|
|
2913
5465
|
exports.hf = hf;
|