@backtest-kit/ollama 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/build/index.mjs CHANGED
@@ -18,20 +18,69 @@ import { Ollama } from 'ollama';
18
18
  import { zodResponseFormat } from 'openai/helpers/zod';
19
19
  import { z } from 'zod';
20
20
 
21
+ /**
22
+ * Enumeration of completion strategy types.
23
+ *
24
+ * Defines unique identifiers for different completion execution modes.
25
+ * Used internally for routing completion requests to appropriate handlers.
26
+ *
27
+ * @example
28
+ * ```typescript
29
+ * import { CompletionName } from '@backtest-kit/ollama';
30
+ *
31
+ * const completionType = CompletionName.RunnerCompletion;
32
+ * ```
33
+ */
21
34
  var CompletionName;
22
35
  (function (CompletionName) {
36
+ /** Standard completion mode (full response at once) */
23
37
  CompletionName["RunnerCompletion"] = "runner_completion";
38
+ /** Streaming completion mode (progressive response chunks) */
24
39
  CompletionName["RunnerStreamCompletion"] = "runner_stream_completion";
40
+ /** Outline completion mode (structured JSON with schema validation) */
25
41
  CompletionName["RunnerOutlineCompletion"] = "runner_outline_completion";
26
42
  })(CompletionName || (CompletionName = {}));
27
43
  var CompletionName$1 = CompletionName;
28
44
 
45
+ /**
46
+ * Scoped context service for isolated execution contexts.
47
+ *
48
+ * Provides context isolation using async local storage through the di-scoped library.
49
+ * Each operation runs with its own context containing provider, model, and API key configuration.
50
+ * This enables multi-tenant scenarios where different requests use different AI providers or keys.
51
+ *
52
+ * Key features:
53
+ * - Scoped context isolation per execution
54
+ * - Support for single or multiple API keys (token rotation)
55
+ * - Thread-safe context propagation
56
+ * - Automatic cleanup after execution
57
+ *
58
+ * @example
59
+ * ```typescript
60
+ * import ContextService from "./services/base/ContextService";
61
+ *
62
+ * // Execute operation within scoped context
63
+ * const result = await ContextService.runInContext(async () => {
64
+ * // Code here has access to the context
65
+ * const model = contextService.context.model;
66
+ * return await someAiOperation();
67
+ * }, {
68
+ * inference: InferenceName.GPT5Inference,
69
+ * model: "gpt-5o-mini",
70
+ * apiKey: "sk-..."
71
+ * });
72
+ * ```
73
+ */
29
74
  const ContextService = scoped(class {
30
75
  constructor(context) {
31
76
  this.context = context;
32
77
  }
33
78
  });
34
79
 
80
+ /**
81
+ * No-operation logger that silently discards all log messages.
82
+ * Used as default logger before a real logger is configured.
83
+ */
35
84
  const NOOP_LOGGER = {
36
85
  log() {
37
86
  },
@@ -42,43 +91,181 @@ const NOOP_LOGGER = {
42
91
  warn() {
43
92
  },
44
93
  };
94
+ /**
95
+ * Centralized logging service for the Ollama package.
96
+ *
97
+ * Provides a unified interface for logging operations across the application.
98
+ * Uses a delegate pattern to forward log calls to a configured logger implementation.
99
+ * Defaults to a no-op logger if no logger is set.
100
+ *
101
+ * Key features:
102
+ * - Supports multiple log levels: log, debug, info, warn
103
+ * - Configurable logger backend via setLogger
104
+ * - Async logging support
105
+ * - Safe default (no-op) when unconfigured
106
+ *
107
+ * @example
108
+ * ```typescript
109
+ * import { LoggerService } from "./services/common/LoggerService";
110
+ * import { setLogger } from "./function/setup.function";
111
+ *
112
+ * // Configure custom logger
113
+ * setLogger({
114
+ * log: async (topic, ...args) => console.log(topic, ...args),
115
+ * debug: async (topic, ...args) => console.debug(topic, ...args),
116
+ * info: async (topic, ...args) => console.info(topic, ...args),
117
+ * warn: async (topic, ...args) => console.warn(topic, ...args),
118
+ * });
119
+ *
120
+ * const loggerService = inject<LoggerService>(TYPES.loggerService);
121
+ * await loggerService.info("Operation completed", { status: "success" });
122
+ * ```
123
+ */
45
124
  class LoggerService {
46
125
  constructor() {
126
+ /** Internal logger instance, defaults to NOOP_LOGGER */
47
127
  this._commonLogger = NOOP_LOGGER;
128
+ /**
129
+ * Logs a general message with optional arguments.
130
+ *
131
+ * @param topic - Message topic or category
132
+ * @param args - Additional arguments to log
133
+ */
48
134
  this.log = async (topic, ...args) => {
49
135
  await this._commonLogger.log(topic, ...args);
50
136
  };
137
+ /**
138
+ * Logs a debug message with optional arguments.
139
+ * Used for detailed diagnostic information.
140
+ *
141
+ * @param topic - Message topic or category
142
+ * @param args - Additional arguments to log
143
+ */
51
144
  this.debug = async (topic, ...args) => {
52
145
  await this._commonLogger.debug(topic, ...args);
53
146
  };
147
+ /**
148
+ * Logs an informational message with optional arguments.
149
+ * Used for general operational information.
150
+ *
151
+ * @param topic - Message topic or category
152
+ * @param args - Additional arguments to log
153
+ */
54
154
  this.info = async (topic, ...args) => {
55
155
  await this._commonLogger.info(topic, ...args);
56
156
  };
157
+ /**
158
+ * Logs a warning message with optional arguments.
159
+ * Used for potentially problematic situations.
160
+ *
161
+ * @param topic - Message topic or category
162
+ * @param args - Additional arguments to log
163
+ */
57
164
  this.warn = async (topic, ...args) => {
58
165
  await this._commonLogger.warn(topic, ...args);
59
166
  };
167
+ /**
168
+ * Sets the logger implementation to use for all logging operations.
169
+ *
170
+ * @param logger - Logger implementation conforming to ILogger interface
171
+ *
172
+ * @example
173
+ * ```typescript
174
+ * const logger = new LoggerService();
175
+ * logger.setLogger({
176
+ * log: async (topic, ...args) => console.log(topic, ...args),
177
+ * debug: async (topic, ...args) => console.debug(topic, ...args),
178
+ * info: async (topic, ...args) => console.info(topic, ...args),
179
+ * warn: async (topic, ...args) => console.warn(topic, ...args),
180
+ * });
181
+ * ```
182
+ */
60
183
  this.setLogger = (logger) => {
61
184
  this._commonLogger = logger;
62
185
  };
63
186
  }
64
187
  }
65
188
 
189
+ /**
190
+ * Dependency injection activator for the Ollama package.
191
+ *
192
+ * Creates a scoped DI container using di-kit with the namespace "ollama".
193
+ * Provides functions for service registration, injection, initialization, and overriding.
194
+ *
195
+ * Exported functions:
196
+ * - provide: Register a service implementation in the container
197
+ * - inject: Retrieve a service instance from the container
198
+ * - init: Initialize the DI container (must be called before using services)
199
+ * - override: Replace an existing service registration with a new implementation
200
+ *
201
+ * @example
202
+ * ```typescript
203
+ * import { provide, inject, init } from "./core/di";
204
+ * import { TYPES } from "./core/types";
205
+ *
206
+ * // Register service
207
+ * provide(TYPES.loggerService, () => new LoggerService());
208
+ *
209
+ * // Initialize container
210
+ * init();
211
+ *
212
+ * // Inject service
213
+ * const logger = inject<LoggerService>(TYPES.loggerService);
214
+ * ```
215
+ */
66
216
  const { provide, inject, init, override } = createActivator("ollama");
67
217
 
218
+ /**
219
+ * Common service type identifiers.
220
+ * Services used across the entire application.
221
+ */
68
222
  const commonServices$1 = {
223
+ /** Logger service for application-wide logging */
69
224
  loggerService: Symbol("loggerService"),
70
225
  };
226
+ /**
227
+ * Base service type identifiers.
228
+ * Core foundational services.
229
+ */
71
230
  const baseServices$1 = {
231
+ /** Context service for scoped execution contexts */
72
232
  contextService: Symbol('contextService'),
73
233
  };
234
+ /**
235
+ * Private service type identifiers.
236
+ * Internal services not exposed in public API.
237
+ */
74
238
  const privateServices$1 = {
239
+ /** Runner private service for AI provider operations */
75
240
  runnerPrivateService: Symbol('runnerPrivateService'),
241
+ /** Outline private service for structured completions */
76
242
  outlinePrivateService: Symbol('outlinePrivateService'),
77
243
  };
244
+ /**
245
+ * Public service type identifiers.
246
+ * Services exposed in the public API.
247
+ */
78
248
  const publicServices$1 = {
249
+ /** Runner public service for context-managed AI operations */
79
250
  runnerPublicService: Symbol('runnerPublicService'),
251
+ /** Outline public service for simplified structured completions */
80
252
  outlinePublicService: Symbol('outlinePublicService'),
81
253
  };
254
+ /**
255
+ * Service type identifier registry for dependency injection.
256
+ *
257
+ * Centralizes all Symbol-based type identifiers used for DI container registration.
258
+ * Organized by service layer: common, base, private, and public services.
259
+ *
260
+ * @example
261
+ * ```typescript
262
+ * import { inject } from "./di";
263
+ * import { TYPES } from "./types";
264
+ * import LoggerService from "../services/common/LoggerService";
265
+ *
266
+ * const logger = inject<LoggerService>(TYPES.loggerService);
267
+ * ```
268
+ */
82
269
  const TYPES = {
83
270
  ...commonServices$1,
84
271
  ...baseServices$1,
@@ -86,12 +273,44 @@ const TYPES = {
86
273
  ...publicServices$1,
87
274
  };
88
275
 
276
+ /**
277
+ * Enumeration of supported JSON schema outlines.
278
+ *
279
+ * Defines unique identifiers for structured output schemas used with
280
+ * LLM providers. Outlines enforce JSON schema validation for critical
281
+ * data structures like trading signals.
282
+ *
283
+ * @example
284
+ * ```typescript
285
+ * import { OutlineName } from '@backtest-kit/ollama';
286
+ *
287
+ * const outlineName = OutlineName.SignalOutline;
288
+ * ```
289
+ */
89
290
  var OutlineName;
90
291
  (function (OutlineName) {
292
+ /** Trading signal JSON schema for position, TP/SL, and risk parameters */
91
293
  OutlineName["SignalOutline"] = "signal_outline";
92
294
  })(OutlineName || (OutlineName = {}));
93
295
  var OutlineName$1 = OutlineName;
94
296
 
297
+ /**
298
+ * Lints and auto-fixes markdown content using markdownlint rules.
299
+ *
300
+ * Validates markdown syntax and applies automatic fixes for common issues
301
+ * like inconsistent list markers, trailing spaces, and heading styles.
302
+ * Returns the original content if no errors found or fixes cannot be applied.
303
+ *
304
+ * @param content - Raw markdown content to lint
305
+ * @returns Promise resolving to linted markdown content
306
+ *
307
+ * @example
308
+ * ```typescript
309
+ * const markdown = "# Title\n\n\n## Subtitle"; // Multiple blank lines
310
+ * const linted = await toLintMarkdown(markdown);
311
+ * // Returns: "# Title\n\n## Subtitle" (extra blank line removed)
312
+ * ```
313
+ */
95
314
  const toLintMarkdown = async (content) => {
96
315
  if (!content) {
97
316
  return "";
@@ -103,8 +322,28 @@ const toLintMarkdown = async (content) => {
103
322
  const value = applyFixes(content, errors);
104
323
  return value ? value : content;
105
324
  };
106
- globalThis.toLintMarkdown = toLintMarkdown;
107
325
 
326
+ /**
327
+ * Converts markdown content to plain text with Telegram-compatible HTML formatting.
328
+ *
329
+ * Processes markdown through three stages:
330
+ * 1. Lints and fixes markdown using markdownlint
331
+ * 2. Renders markdown to HTML using markdown-it
332
+ * 3. Sanitizes HTML to Telegram-compatible subset
333
+ *
334
+ * Supported tags: b, i, a, code, pre, s, u, tg-spoiler, blockquote, br
335
+ * Transforms: headings removed, lists to bullets, multiple newlines collapsed
336
+ *
337
+ * @param content - Raw markdown content
338
+ * @returns Promise resolving to sanitized plain text with HTML formatting
339
+ *
340
+ * @example
341
+ * ```typescript
342
+ * const markdown = "# Title\n**Bold** and *italic*\n- Item 1\n- Item 2";
343
+ * const plain = await toPlainString(markdown);
344
+ * // Returns: "Bold and italic\n• Item 1\n• Item 2"
345
+ * ```
346
+ */
108
347
  const toPlainString = async (content) => {
109
348
  if (!content) {
110
349
  return "";
@@ -155,9 +394,58 @@ const toPlainString = async (content) => {
155
394
  return telegramHtml.replaceAll(/\n[\s\n]*\n/g, "\n").trim();
156
395
  };
157
396
 
397
+ /**
398
+ * Private service for processing structured outline completions.
399
+ *
400
+ * Handles the core logic for executing outline-based AI completions with schema validation.
401
+ * Processes AI responses through the agent-swarm-kit json function to extract and validate
402
+ * structured trading signal data.
403
+ *
404
+ * Key features:
405
+ * - JSON schema validation using agent-swarm-kit
406
+ * - Trading signal extraction and transformation
407
+ * - Type conversion for numeric fields
408
+ * - Markdown formatting cleanup for notes
409
+ * - Error handling for validation failures
410
+ *
411
+ * @example
412
+ * ```typescript
413
+ * const outlinePrivate = inject<OutlinePrivateService>(TYPES.outlinePrivateService);
414
+ * const signal = await outlinePrivate.getCompletion([
415
+ * { role: "user", content: "Analyze market" }
416
+ * ]);
417
+ * ```
418
+ */
158
419
  class OutlinePrivateService {
159
420
  constructor() {
421
+ /** Logger service for operation tracking */
160
422
  this.loggerService = inject(TYPES.loggerService);
423
+ /**
424
+ * Processes outline completion messages and extracts structured signal data.
425
+ *
426
+ * Sends messages to the AI provider, validates the response against the signal schema,
427
+ * and transforms the data into a structured format. Returns null if the AI decides
428
+ * to wait (no position).
429
+ *
430
+ * @param messages - Array of conversation messages for the AI
431
+ * @returns Promise resolving to structured signal data or null if position is "wait"
432
+ * @throws Error if validation fails or AI returns an error
433
+ *
434
+ * @example
435
+ * ```typescript
436
+ * const signal = await outlinePrivateService.getCompletion([
437
+ * { role: "system", content: "Trading analyst role" },
438
+ * { role: "user", content: "Market analysis data..." }
439
+ * ]);
440
+ *
441
+ * if (signal) {
442
+ * console.log(`Position: ${signal.position}`);
443
+ * console.log(`Entry: ${signal.priceOpen}`);
444
+ * console.log(`SL: ${signal.priceStopLoss}`);
445
+ * console.log(`TP: ${signal.priceTakeProfit}`);
446
+ * }
447
+ * ```
448
+ */
161
449
  this.getCompletion = async (messages) => {
162
450
  this.loggerService.log("outlinePrivateService getCompletion", {
163
451
  messages,
@@ -182,40 +470,194 @@ class OutlinePrivateService {
182
470
  }
183
471
  }
184
472
 
473
+ /**
474
+ * Private service managing AI inference provider registry and execution.
475
+ *
476
+ * Coordinates AI operations across multiple inference providers (OpenAI, Claude, Ollama, etc.).
477
+ * Maintains a registry of provider implementations and instantiates them on-demand.
478
+ * Uses memoization to cache provider instances for better performance.
479
+ *
480
+ * Key features:
481
+ * - Dynamic provider registration for multiple AI services
482
+ * - Lazy instantiation with memoization for performance
483
+ * - Context-aware provider selection based on inference type
484
+ * - Support for standard, streaming, and structured completions
485
+ * - Type-safe provider interface
486
+ *
487
+ * @example
488
+ * ```typescript
489
+ * // Provider registration (typically done at startup)
490
+ * const runnerPrivate = inject<RunnerPrivateService>(TYPES.runnerPrivateService);
491
+ * runnerPrivate.registerRunner(InferenceName.ClaudeInference, ClaudeProvider);
492
+ * runnerPrivate.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
493
+ *
494
+ * // Provider usage (automatically selected based on context)
495
+ * const result = await runnerPrivate.getCompletion({
496
+ * messages: [{ role: "user", content: "Analyze trade" }]
497
+ * });
498
+ * ```
499
+ */
185
500
  class RunnerPrivateService {
186
501
  constructor() {
502
+ /** Context service providing execution context (model, API key, provider) */
187
503
  this.contextService = inject(TYPES.contextService);
504
+ /** Logger service for operation tracking */
188
505
  this.loggerService = inject(TYPES.loggerService);
506
+ /** Registry storing provider class constructors by inference name */
189
507
  this._registry = new ToolRegistry("runner_registry");
508
+ /**
509
+ * Memoized provider instance getter.
510
+ * Creates and caches provider instances per inference type.
511
+ */
190
512
  this.getRunner = memoize(([inference]) => `${inference}`, (inference) => {
191
513
  const Runner = this._registry.get(inference);
192
514
  return new Runner(this.contextService, this.loggerService);
193
515
  });
516
+ /**
517
+ * Executes a standard AI completion using the provider specified in context.
518
+ *
519
+ * @param params - Completion parameters including messages and options
520
+ * @returns Promise resolving to AI response message
521
+ *
522
+ * @example
523
+ * ```typescript
524
+ * const result = await runnerPrivateService.getCompletion({
525
+ * messages: [
526
+ * { role: "system", content: "You are a trading assistant" },
527
+ * { role: "user", content: "Analyze BTC market" }
528
+ * ]
529
+ * });
530
+ * ```
531
+ */
194
532
  this.getCompletion = async (params) => {
195
533
  this.loggerService.log("runnerPrivateService getCompletion");
196
534
  const runner = this.getRunner(this.contextService.context.inference);
197
535
  return await runner.getCompletion(params);
198
536
  };
537
+ /**
538
+ * Executes a streaming AI completion using the provider specified in context.
539
+ *
540
+ * @param params - Completion parameters including messages and options
541
+ * @returns Promise resolving to accumulated AI response message
542
+ *
543
+ * @example
544
+ * ```typescript
545
+ * const result = await runnerPrivateService.getStreamCompletion({
546
+ * messages: [{ role: "user", content: "Generate signal" }]
547
+ * });
548
+ * ```
549
+ */
199
550
  this.getStreamCompletion = async (params) => {
200
551
  this.loggerService.log("runnerPrivateService getStreamCompletion");
201
552
  const runner = this.getRunner(this.contextService.context.inference);
202
553
  return await runner.getStreamCompletion(params);
203
554
  };
555
+ /**
556
+ * Executes a structured outline completion using the provider specified in context.
557
+ *
558
+ * @param params - Outline completion parameters including messages and schema
559
+ * @returns Promise resolving to structured AI response
560
+ *
561
+ * @example
562
+ * ```typescript
563
+ * const signal = await runnerPrivateService.getOutlineCompletion({
564
+ * messages: [{ role: "user", content: "Trading decision for ETH" }]
565
+ * });
566
+ * ```
567
+ */
204
568
  this.getOutlineCompletion = async (params) => {
205
569
  this.loggerService.log("runnerPrivateService getOutlineCompletion");
206
570
  const runner = this.getRunner(this.contextService.context.inference);
207
571
  return await runner.getOutlineCompletion(params);
208
572
  };
573
+ /**
574
+ * Registers a new AI provider implementation in the registry.
575
+ *
576
+ * @param name - Inference provider identifier
577
+ * @param runner - Provider class constructor
578
+ *
579
+ * @example
580
+ * ```typescript
581
+ * runnerPrivateService.registerRunner(
582
+ * InferenceName.ClaudeInference,
583
+ * ClaudeProvider
584
+ * );
585
+ * ```
586
+ */
209
587
  this.registerRunner = (name, runner) => {
210
588
  this._registry = this._registry.register(name, runner);
211
589
  };
212
590
  }
213
591
  }
214
592
 
593
+ /**
594
+ * Public-facing service for structured AI outline completions.
595
+ *
596
+ * Provides a simplified interface for executing structured AI completions with schema validation.
597
+ * Handles context creation and isolation for outline-based operations.
598
+ * Used for extracting structured data from AI responses (e.g., trading signals).
599
+ *
600
+ * Key features:
601
+ * - Simplified API with automatic context management
602
+ * - JSON schema validation for structured outputs
603
+ * - Support for multiple AI providers
604
+ * - Optional API key parameter with fallback
605
+ * - Logging integration
606
+ *
607
+ * @example
608
+ * ```typescript
609
+ * import { engine } from "./lib";
610
+ * import { InferenceName } from "./enum/InferenceName";
611
+ *
612
+ * const signal = await engine.outlinePublicService.getCompletion(
613
+ * [{ role: "user", content: "Analyze BTC/USDT and decide position" }],
614
+ * InferenceName.ClaudeInference,
615
+ * "claude-3-5-sonnet-20240620",
616
+ * "sk-ant-..."
617
+ * );
618
+ *
619
+ * // Returns structured signal:
620
+ * // {
621
+ * // position: "long",
622
+ * // priceOpen: 50000,
623
+ * // priceStopLoss: 48000,
624
+ * // priceTakeProfit: 52000,
625
+ * // minuteEstimatedTime: 120,
626
+ * // note: "Strong bullish momentum..."
627
+ * // }
628
+ * ```
629
+ */
215
630
  class OutlinePublicService {
216
631
  constructor() {
632
+ /** Logger service for operation tracking */
217
633
  this.loggerService = inject(TYPES.loggerService);
634
+ /** Private service handling outline completion logic */
218
635
  this.outlinePrivateService = inject(TYPES.outlinePrivateService);
636
+ /**
637
+ * Executes a structured outline completion with schema validation.
638
+ *
639
+ * Creates an isolated execution context and processes messages through the AI provider
640
+ * to generate a structured response conforming to a predefined schema.
641
+ *
642
+ * @param messages - Array of conversation messages for the AI
643
+ * @param inference - AI provider identifier
644
+ * @param model - Model name/identifier
645
+ * @param apiKey - Optional API key(s), required for most providers
646
+ * @returns Promise resolving to structured signal data or null if position is "wait"
647
+ *
648
+ * @example
649
+ * ```typescript
650
+ * const result = await outlinePublicService.getCompletion(
651
+ * [
652
+ * { role: "system", content: "You are a trading analyst" },
653
+ * { role: "user", content: "Analyze current BTC market" }
654
+ * ],
655
+ * InferenceName.DeepseekInference,
656
+ * "deepseek-chat",
657
+ * "sk-..."
658
+ * );
659
+ * ```
660
+ */
219
661
  this.getCompletion = async (messages, inference, model, apiKey) => {
220
662
  this.loggerService.log("outlinePublicService getCompletion", {
221
663
  messages,
@@ -234,22 +676,134 @@ class OutlinePublicService {
234
676
  }
235
677
  }
236
678
 
679
+ /**
680
+ * Public-facing service for AI inference operations with context management.
681
+ *
682
+ * Provides context-scoped access to AI completion operations.
683
+ * Acts as a facade that wraps RunnerPrivateService methods with context isolation.
684
+ * Each operation runs within a dedicated execution context to ensure proper API key
685
+ * and model configuration isolation.
686
+ *
687
+ * Key features:
688
+ * - Context-isolated execution for multi-tenant scenarios
689
+ * - Support for standard, streaming, and structured (outline) completions
690
+ * - Automatic context propagation to private service layer
691
+ * - Logging integration for operation tracking
692
+ *
693
+ * @example
694
+ * ```typescript
695
+ * import { engine } from "./lib";
696
+ * import { InferenceName } from "./enum/InferenceName";
697
+ *
698
+ * const context = {
699
+ * inference: InferenceName.ClaudeInference,
700
+ * model: "claude-3-5-sonnet-20240620",
701
+ * apiKey: "sk-ant-..."
702
+ * };
703
+ *
704
+ * // Standard completion
705
+ * const result = await engine.runnerPublicService.getCompletion({
706
+ * messages: [{ role: "user", content: "Analyze this trade..." }]
707
+ * }, context);
708
+ *
709
+ * // Streaming completion
710
+ * const stream = await engine.runnerPublicService.getStreamCompletion({
711
+ * messages: [{ role: "user", content: "Generate signal..." }]
712
+ * }, context);
713
+ *
714
+ * // Structured outline completion
715
+ * const outline = await engine.runnerPublicService.getOutlineCompletion({
716
+ * messages: [{ role: "user", content: "Trading decision..." }]
717
+ * }, context);
718
+ * ```
719
+ */
237
720
  class RunnerPublicService {
238
721
  constructor() {
722
+ /** Private service handling AI provider operations */
239
723
  this.runnerPrivateService = inject(TYPES.runnerPrivateService);
724
+ /** Logger service for operation tracking */
240
725
  this.loggerService = inject(TYPES.loggerService);
726
+ /**
727
+ * Executes a standard AI completion within the specified context.
728
+ *
729
+ * @param params - Completion parameters including messages and options
730
+ * @param context - Execution context with inference provider, model, and API key
731
+ * @returns Promise resolving to AI response message
732
+ *
733
+ * @example
734
+ * ```typescript
735
+ * const result = await runnerPublicService.getCompletion({
736
+ * messages: [
737
+ * { role: "system", content: "You are a trading analyst" },
738
+ * { role: "user", content: "Analyze BTC/USDT" }
739
+ * ]
740
+ * }, {
741
+ * inference: InferenceName.ClaudeInference,
742
+ * model: "claude-3-5-sonnet-20240620",
743
+ * apiKey: "sk-ant-..."
744
+ * });
745
+ * ```
746
+ */
241
747
  this.getCompletion = async (params, context) => {
242
748
  this.loggerService.log("runnerPublicService getCompletion");
243
749
  return await ContextService.runInContext(async () => {
244
750
  return await this.runnerPrivateService.getCompletion(params);
245
751
  }, context);
246
752
  };
753
+ /**
754
+ * Executes a streaming AI completion within the specified context.
755
+ *
756
+ * Similar to getCompletion but enables streaming mode where supported by the provider.
757
+ * The response is accumulated and returned as a complete message once streaming finishes.
758
+ *
759
+ * @param params - Completion parameters including messages and options
760
+ * @param context - Execution context with inference provider, model, and API key
761
+ * @returns Promise resolving to accumulated AI response message
762
+ *
763
+ * @example
764
+ * ```typescript
765
+ * const result = await runnerPublicService.getStreamCompletion({
766
+ * messages: [
767
+ * { role: "user", content: "Generate trading signal for ETH/USDT" }
768
+ * ]
769
+ * }, {
770
+ * inference: InferenceName.GPT5Inference,
771
+ * model: "gpt-5o-mini",
772
+ * apiKey: "sk-..."
773
+ * });
774
+ * ```
775
+ */
247
776
  this.getStreamCompletion = async (params, context) => {
248
777
  this.loggerService.log("runnerPublicService getStreamCompletion");
249
778
  return await ContextService.runInContext(async () => {
250
779
  return await this.runnerPrivateService.getStreamCompletion(params);
251
780
  }, context);
252
781
  };
782
+ /**
783
+ * Executes a structured outline completion within the specified context.
784
+ *
785
+ * Uses structured output (JSON schema validation) to ensure the AI response
786
+ * conforms to a predefined format. Ideal for extracting structured data
787
+ * from AI responses (e.g., trading signals with specific fields).
788
+ *
789
+ * @param params - Outline completion parameters including messages and schema
790
+ * @param context - Execution context with inference provider, model, and API key
791
+ * @returns Promise resolving to structured AI response
792
+ *
793
+ * @example
794
+ * ```typescript
795
+ * const signal = await runnerPublicService.getOutlineCompletion({
796
+ * messages: [
797
+ * { role: "user", content: "Decide position for BTC/USDT" }
798
+ * ]
799
+ * }, {
800
+ * inference: InferenceName.DeepseekInference,
801
+ * model: "deepseek-chat",
802
+ * apiKey: "sk-..."
803
+ * });
804
+ * // Returns: { position: "long", price_open: 50000, ... }
805
+ * ```
806
+ */
253
807
  this.getOutlineCompletion = async (params, context) => {
254
808
  this.loggerService.log("runnerPublicService getOutlineCompletion");
255
809
  return await ContextService.runInContext(async () => {
@@ -259,36 +813,116 @@ class RunnerPublicService {
259
813
  }
260
814
  }
261
815
 
816
+ /**
817
+ * Service registration module for dependency injection.
818
+ *
819
+ * Registers all service implementations in the DI container during application startup.
820
+ * Services are organized by layer: common, base, private, and public services.
821
+ * Each service is registered with a factory function that creates new instances.
822
+ *
823
+ * Registration order:
824
+ * 1. Common services (LoggerService)
825
+ * 2. Base services (ContextService)
826
+ * 3. Private services (RunnerPrivateService, OutlinePrivateService)
827
+ * 4. Public services (RunnerPublicService, OutlinePublicService)
828
+ *
829
+ * This file is imported by lib/index.ts to ensure services are registered
830
+ * before the DI container is initialized.
831
+ */
832
+ /**
833
+ * Register common services.
834
+ */
262
835
  {
263
836
  provide(TYPES.loggerService, () => new LoggerService());
264
837
  }
838
+ /**
839
+ * Register base services.
840
+ */
265
841
  {
266
842
  provide(TYPES.contextService, () => new ContextService());
267
843
  }
844
+ /**
845
+ * Register private services.
846
+ */
268
847
  {
269
848
  provide(TYPES.runnerPrivateService, () => new RunnerPrivateService());
270
849
  provide(TYPES.outlinePrivateService, () => new OutlinePrivateService());
271
850
  }
851
+ /**
852
+ * Register public services.
853
+ */
272
854
  {
273
855
  provide(TYPES.runnerPublicService, () => new RunnerPublicService());
274
856
  provide(TYPES.outlinePublicService, () => new OutlinePublicService());
275
857
  }
276
858
 
859
+ /**
860
+ * Enumeration of supported LLM inference providers.
861
+ *
862
+ * Defines unique identifiers for each LLM provider supported by the library.
863
+ * Used internally for dependency injection and provider resolution.
864
+ *
865
+ * @example
866
+ * ```typescript
867
+ * import { InferenceName } from '@backtest-kit/ollama';
868
+ *
869
+ * const providerName = InferenceName.GPT5Inference;
870
+ * ```
871
+ */
277
872
  var InferenceName;
278
873
  (function (InferenceName) {
874
+ /** Ollama provider for local/cloud LLM inference */
279
875
  InferenceName["OllamaInference"] = "ollama_inference";
876
+ /** Grok provider by X.AI (api.x.ai) */
280
877
  InferenceName["GrokInference"] = "grok_inference";
878
+ /** Hugging Face Inference API provider */
281
879
  InferenceName["HfInference"] = "hf_inference";
880
+ /** Claude provider by Anthropic (api.anthropic.com) */
282
881
  InferenceName["ClaudeInference"] = "claude_inference";
882
+ /** OpenAI GPT provider (api.openai.com) */
283
883
  InferenceName["GPT5Inference"] = "gpt5_inference";
884
+ /** Z.ai GPT Provider (api.z.ai/api/paas/v4) */
885
+ InferenceName["GLM4Inference"] = "glm4_inference";
886
+ /** DeepSeek provider (api.deepseek.com) */
284
887
  InferenceName["DeepseekInference"] = "deepseek_inference";
888
+ /** Mistral AI provider (api.mistral.ai) */
285
889
  InferenceName["MistralInference"] = "mistral_inference";
890
+ /** Perplexity AI provider (api.perplexity.ai) */
286
891
  InferenceName["PerplexityInference"] = "perplexity_inference";
892
+ /** Cohere provider (api.cohere.ai) */
287
893
  InferenceName["CohereInference"] = "cohere_inference";
894
+ /** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
288
895
  InferenceName["AlibabaInference"] = "alibaba_inference";
289
896
  })(InferenceName || (InferenceName = {}));
290
897
  var InferenceName$1 = InferenceName;
291
898
 
899
+ /**
900
+ * Creates and caches an OpenAI-compatible client for Grok (xAI) API.
901
+ *
902
+ * Uses OpenAI SDK with Grok's API endpoint.
903
+ * The client instance is cached using singleshot memoization for performance.
904
+ * Token rotation is not supported - throws error if array of keys is provided.
905
+ *
906
+ * Key features:
907
+ * - OpenAI SDK compatibility layer
908
+ * - Single API key support only
909
+ * - Instance caching with singleshot
910
+ * - Automatic cache clearing on error
911
+ *
912
+ * @returns OpenAI client configured for Grok API
913
+ * @throws Error if API key array is provided (token rotation not supported)
914
+ *
915
+ * @example
916
+ * ```typescript
917
+ * import { getGrok } from "./config/grok";
918
+ *
919
+ * const client = getGrok();
920
+ * const completion = await client.chat.completions.create({
921
+ * model: "grok-beta",
922
+ * messages: [{ role: "user", content: "Hello" }]
923
+ * });
924
+ * ```
925
+ */
292
926
  const getGrok = singleshot(() => {
293
927
  const apiKey = lib.contextService.context.apiKey;
294
928
  if (Array.isArray(apiKey)) {
@@ -301,8 +935,54 @@ const getGrok = singleshot(() => {
301
935
  });
302
936
  });
303
937
 
304
- const CC_ENABLE_DEBUG = "CC_ENABLE_DEBUG" in process.env ? !!parseInt(process.env.CC_ENABLE_DEBUG) : false;
938
+ /**
939
+ * Global configuration parameters for the Ollama package.
940
+ *
941
+ * Provides runtime configuration via environment variables with sensible defaults.
942
+ * All configuration values are immutable once initialized.
943
+ *
944
+ * Available configurations:
945
+ * - CC_ENABLE_DEBUG: Enable detailed debug logging
946
+ * - CC_ENABLE_THINKING: Enable AI extended reasoning mode
947
+ *
948
+ * @example
949
+ * ```typescript
950
+ * import { GLOBAL_CONFIG } from "./config/params";
951
+ *
952
+ * if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
953
+ * console.log("Debug mode enabled");
954
+ * }
955
+ *
956
+ * if (GLOBAL_CONFIG.CC_ENABLE_THINKING) {
957
+ * // AI will provide reasoning before responses
958
+ * }
959
+ * ```
960
+ */
961
+ /**
962
+ * Mutable global configuration object.
963
+ * Values are read from environment variables at initialization.
964
+ */
965
+ const GLOBAL_CONFIG = {
966
+ /**
967
+ * Enable debug mode for detailed logging.
968
+ * When enabled, additional debug information will be logged.
969
+ * Can be set via CC_ENABLE_DEBUG environment variable.
970
+ * Default: false
971
+ */
972
+ CC_ENABLE_DEBUG: "CC_ENABLE_DEBUG" in process.env ? !!parseInt(process.env.CC_ENABLE_DEBUG) : false,
973
+ /**
974
+ * Enable thinking mode for AI responses.
975
+ * When enabled, the AI will provide extended reasoning before answering.
976
+ * Can be set via CC_ENABLE_THINKING environment variable.
977
+ * Default: false
978
+ */
979
+ CC_ENABLE_THINKING: "CC_ENABLE_THINKING" in process.env ? !!parseInt(process.env.CC_ENABLE_THINKING) : false,
980
+ };
305
981
 
982
+ /**
983
+ * Custom ChatXAI implementation with simplified token counting.
984
+ * Estimates tokens as content.length / 4 for compatibility.
985
+ */
306
986
  class CustomChat extends ChatXAI {
307
987
  async getNumTokens(content) {
308
988
  if (typeof content !== "string") {
@@ -311,16 +991,54 @@ class CustomChat extends ChatXAI {
311
991
  return Math.ceil(content.length / 4);
312
992
  }
313
993
  }
994
+ /**
995
+ * Creates configured ChatXAI instance for Grok streaming.
996
+ */
314
997
  const getChat$1 = (model, apiKey) => new CustomChat({
315
998
  apiKey,
316
999
  model,
317
1000
  streaming: true,
318
1001
  });
319
- let GrokProvider$1 = class GrokProvider {
1002
+ /**
1003
+ * Provider for xAI Grok models via LangChain ChatXAI.
1004
+ *
1005
+ * Uses LangChain's ChatXAI integration for xAI Grok models.
1006
+ * Provides true token-by-token streaming via LangChain callbacks and OpenAI SDK for standard requests.
1007
+ *
1008
+ * Key features:
1009
+ * - LangChain ChatXAI for true streaming
1010
+ * - OpenAI SDK via getGrok() for standard completion
1011
+ * - Direct xAI API access for outline completion
1012
+ * - Tool calling via bindTools (streaming) or tools parameter (standard)
1013
+ * - Real-time token emission via stream callbacks
1014
+ * - No token rotation support (single API key only)
1015
+ *
1016
+ * @example
1017
+ * ```typescript
1018
+ * const provider = new GrokProvider(contextService, logger);
1019
+ * const response = await provider.getStreamCompletion({
1020
+ * agentName: "grok",
1021
+ * messages: [{ role: "user", content: "Latest AI news?" }],
1022
+ * mode: "direct",
1023
+ * tools: [searchTool],
1024
+ * clientId: "client-888"
1025
+ * });
1026
+ * ```
1027
+ */
1028
+ class GrokProvider {
1029
+ /**
1030
+ * Creates a new GrokProvider instance.
1031
+ */
320
1032
  constructor(contextService, logger) {
321
1033
  this.contextService = contextService;
322
1034
  this.logger = logger;
323
1035
  }
1036
+ /**
1037
+ * Performs standard completion request via OpenAI SDK.
1038
+ *
1039
+ * @param params - Completion parameters
1040
+ * @returns Promise resolving to assistant's response
1041
+ */
324
1042
  async getCompletion(params) {
325
1043
  const grok = getGrok();
326
1044
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -364,11 +1082,19 @@ let GrokProvider$1 = class GrokProvider {
364
1082
  })),
365
1083
  };
366
1084
  // Debug logging
367
- if (CC_ENABLE_DEBUG) {
1085
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
368
1086
  await fs.appendFile("./debug_grok_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
369
1087
  }
370
1088
  return result;
371
1089
  }
1090
+ /**
1091
+ * Performs true streaming completion via LangChain ChatXAI.
1092
+ * Emits tokens in real-time as they are generated.
1093
+ *
1094
+ * @param params - Completion parameters
1095
+ * @returns Promise resolving to complete response after streaming
1096
+ * @throws Error if token rotation attempted
1097
+ */
372
1098
  async getStreamCompletion(params) {
373
1099
  if (Array.isArray(this.contextService.context.apiKey)) {
374
1100
  throw new Error("Grok provider does not support token rotation");
@@ -463,11 +1189,19 @@ let GrokProvider$1 = class GrokProvider {
463
1189
  tool_calls: formattedToolCalls,
464
1190
  };
465
1191
  // Debug logging
466
- if (CC_ENABLE_DEBUG) {
1192
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
467
1193
  await fs.appendFile("./debug_grok_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
468
1194
  }
469
1195
  return result;
470
1196
  }
1197
+ /**
1198
+ * Performs structured output completion via direct xAI API.
1199
+ * Uses response_format parameter for schema enforcement.
1200
+ *
1201
+ * @param params - Outline completion parameters
1202
+ * @returns Promise resolving to validated JSON string
1203
+ * @throws Error if model returns refusal or token rotation attempted
1204
+ */
471
1205
  async getOutlineCompletion(params) {
472
1206
  const { messages: rawMessages, format } = params;
473
1207
  this.logger.log("grokProvider getOutlineCompletion", {
@@ -510,14 +1244,21 @@ let GrokProvider$1 = class GrokProvider {
510
1244
  content: json,
511
1245
  };
512
1246
  // Debug logging
513
- if (CC_ENABLE_DEBUG) {
1247
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
514
1248
  await fs.appendFile("./debug_grok_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
515
1249
  }
516
1250
  return result;
517
1251
  }
518
- };
1252
+ }
519
1253
 
1254
+ /**
1255
+ * Maximum number of retry attempts for outline completion.
1256
+ */
520
1257
  const MAX_ATTEMPTS$5 = 5;
1258
+ /**
1259
+ * Custom ChatOpenAI implementation for HuggingFace with simplified token counting.
1260
+ * Routes requests to HuggingFace Router endpoint.
1261
+ */
521
1262
  class HuggingFaceChat extends ChatOpenAI {
522
1263
  async getNumTokens(content) {
523
1264
  if (typeof content !== "string") {
@@ -526,6 +1267,9 @@ class HuggingFaceChat extends ChatOpenAI {
526
1267
  return Math.ceil(content.length / 4);
527
1268
  }
528
1269
  }
1270
+ /**
1271
+ * Creates configured HuggingFaceChat instance for streaming.
1272
+ */
529
1273
  const getChat = (model, apiKey) => new HuggingFaceChat({
530
1274
  configuration: {
531
1275
  baseURL: "https://router.huggingface.co/v1",
@@ -534,12 +1278,52 @@ const getChat = (model, apiKey) => new HuggingFaceChat({
534
1278
  model,
535
1279
  streaming: true,
536
1280
  });
1281
+ /**
1282
+ * Creates HuggingFace InferenceClient for standard completion.
1283
+ */
537
1284
  const getInference = (apiKey) => new InferenceClient(apiKey);
1285
+ /**
1286
+ * Provider for HuggingFace models via HuggingFace Router API.
1287
+ *
1288
+ * Implements HuggingFace API access using both InferenceClient (standard completion)
1289
+ * and LangChain ChatOpenAI (streaming). Supports thinking mode via reasoning_content.
1290
+ * Does NOT support token rotation (single API key only).
1291
+ *
1292
+ * Key features:
1293
+ * - HuggingFace InferenceClient for standard completion
1294
+ * - LangChain ChatOpenAI for true streaming
1295
+ * - Tool calling support with proper message conversion
1296
+ * - Reasoning/thinking content capture (_thinking field)
1297
+ * - Direct API access for outline completion
1298
+ * - No token rotation support
1299
+ *
1300
+ * @example
1301
+ * ```typescript
1302
+ * const provider = new HfProvider(contextService, logger);
1303
+ * const response = await provider.getStreamCompletion({
1304
+ * agentName: "hf-assistant",
1305
+ * messages: [{ role: "user", content: "Explain attention mechanism" }],
1306
+ * mode: "direct",
1307
+ * tools: [codeTool],
1308
+ * clientId: "client-777"
1309
+ * });
1310
+ * ```
1311
+ */
538
1312
  class HfProvider {
1313
+ /**
1314
+ * Creates a new HfProvider instance.
1315
+ */
539
1316
  constructor(contextService, logger) {
540
1317
  this.contextService = contextService;
541
1318
  this.logger = logger;
542
1319
  }
1320
+ /**
1321
+ * Performs standard completion using HuggingFace InferenceClient.
1322
+ *
1323
+ * @param params - Completion parameters
1324
+ * @returns Promise resolving to assistant's response
1325
+ * @throws Error if token rotation attempted
1326
+ */
543
1327
  async getCompletion(params) {
544
1328
  if (Array.isArray(this.contextService.context.apiKey)) {
545
1329
  throw new Error("Hf provider does not support token rotation");
@@ -614,7 +1398,7 @@ class HfProvider {
614
1398
  })),
615
1399
  };
616
1400
  // Debug logging
617
- if (CC_ENABLE_DEBUG) {
1401
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
618
1402
  await fs.appendFile("./debug_hf_provider.txt", JSON.stringify({
619
1403
  params,
620
1404
  answer: result,
@@ -622,6 +1406,14 @@ class HfProvider {
622
1406
  }
623
1407
  return result;
624
1408
  }
1409
+ /**
1410
+ * Performs true streaming completion using LangChain ChatOpenAI.
1411
+ * Emits tokens in real-time via callbacks.
1412
+ *
1413
+ * @param params - Completion parameters
1414
+ * @returns Promise resolving to complete response after streaming
1415
+ * @throws Error if token rotation attempted
1416
+ */
625
1417
  async getStreamCompletion(params) {
626
1418
  if (Array.isArray(this.contextService.context.apiKey)) {
627
1419
  throw new Error("Hf provider does not support token rotation");
@@ -709,7 +1501,7 @@ class HfProvider {
709
1501
  })),
710
1502
  };
711
1503
  // Debug logging
712
- if (CC_ENABLE_DEBUG) {
1504
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
713
1505
  await fs.appendFile("./debug_hf_provider_stream.txt", JSON.stringify({
714
1506
  params,
715
1507
  answer: result,
@@ -717,6 +1509,14 @@ class HfProvider {
717
1509
  }
718
1510
  return result;
719
1511
  }
1512
+ /**
1513
+ * Performs structured output completion using tool calling with extended retry logic.
1514
+ * Captures reasoning_content as _thinking field in response.
1515
+ *
1516
+ * @param params - Outline completion parameters
1517
+ * @returns Promise resolving to validated JSON string with thinking
1518
+ * @throws Error if model fails after 5 attempts or token rotation attempted
1519
+ */
720
1520
  async getOutlineCompletion(params) {
721
1521
  const { messages: rawMessages, format } = params;
722
1522
  this.logger.log("hfProvider getOutlineCompletion", {
@@ -822,7 +1622,7 @@ class HfProvider {
822
1622
  content: JSON.stringify(validation.data),
823
1623
  };
824
1624
  // Debug logging
825
- if (CC_ENABLE_DEBUG) {
1625
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
826
1626
  await fs.appendFile("./debug_hf_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
827
1627
  }
828
1628
  return result;
@@ -835,9 +1635,25 @@ class HfProvider {
835
1635
  }
836
1636
  }
837
1637
 
1638
+ /**
1639
+ * Wrapper class for Ollama client with token rotation support.
1640
+ *
1641
+ * Implements round-robin API key rotation for high-volume Ollama usage.
1642
+ * Each request automatically rotates through the provided API keys to
1643
+ * distribute load and avoid rate limiting.
1644
+ *
1645
+ * Key features:
1646
+ * - Round-robin token rotation using RoundRobin from agent-swarm-kit
1647
+ * - Streaming and non-streaming support
1648
+ * - Type-safe method overloads
1649
+ * - Automatic Ollama client creation per token
1650
+ *
1651
+ * @throws Error if no API keys are provided in context
1652
+ */
838
1653
  class OllamaWrapper {
839
1654
  constructor(_config) {
840
1655
  this._config = _config;
1656
+ /** Round-robin chat function factory */
841
1657
  this._chatFn = RoundRobin.create(lib.contextService.context.apiKey, (token) => {
842
1658
  const ollama = new Ollama({
843
1659
  ...this._config,
@@ -858,14 +1674,86 @@ class OllamaWrapper {
858
1674
  throw new Error("OllamaRotate required apiKey[] to process token rotation");
859
1675
  }
860
1676
  }
1677
+ /**
1678
+ * Executes a chat request with automatic token rotation.
1679
+ *
1680
+ * @param request - Chat request configuration
1681
+ * @returns Chat response or async iterable (for streaming)
1682
+ */
861
1683
  async chat(request) {
862
1684
  return await this._chatFn(request);
863
1685
  }
864
1686
  }
1687
+ /**
1688
+ * Creates and caches an Ollama wrapper with token rotation enabled.
1689
+ *
1690
+ * Requires an array of API keys in the execution context.
1691
+ * The wrapper automatically rotates through keys using round-robin strategy.
1692
+ *
1693
+ * @returns OllamaWrapper instance with token rotation
1694
+ *
1695
+ * @example
1696
+ * ```typescript
1697
+ * import { getOllamaRotate } from "./config/ollama.rotate";
1698
+ *
1699
+ * // Context must have array of API keys
1700
+ * const client = getOllamaRotate();
1701
+ * const response = await client.chat({
1702
+ * model: "llama2",
1703
+ * messages: [{ role: "user", content: "Hello" }]
1704
+ * });
1705
+ * // Next request will use a different API key
1706
+ * ```
1707
+ */
865
1708
  const getOllamaRotate = singleshot(() => new OllamaWrapper({
866
1709
  host: "https://ollama.com",
867
1710
  }));
868
1711
 
1712
+ /**
1713
+ * Creates and caches an Ollama client with flexible configuration.
1714
+ *
1715
+ * Supports three modes of operation:
1716
+ * 1. Token rotation mode: Array of API keys enables automatic rotation
1717
+ * 2. Cloud mode: Single API key connects to ollama.com
1718
+ * 3. Local mode: No API key connects to local Ollama instance
1719
+ *
1720
+ * The client instance is cached using singleshot memoization for performance.
1721
+ * Automatically selects the appropriate client based on API key configuration.
1722
+ *
1723
+ * Key features:
1724
+ * - Token rotation support for high-volume usage
1725
+ * - Cloud and local Ollama support
1726
+ * - Instance caching with singleshot
1727
+ * - Automatic mode detection
1728
+ *
1729
+ * @returns Ollama client or OllamaWrapper (for token rotation)
1730
+ *
1731
+ * @example
1732
+ * ```typescript
1733
+ * import { getOllama } from "./config/ollama";
1734
+ *
1735
+ * // Local mode (no API key)
1736
+ * const localClient = getOllama();
1737
+ * const response = await localClient.chat({
1738
+ * model: "llama2",
1739
+ * messages: [{ role: "user", content: "Hello" }]
1740
+ * });
1741
+ *
1742
+ * // Cloud mode (single API key)
1743
+ * const cloudClient = getOllama();
1744
+ * const response = await cloudClient.chat({
1745
+ * model: "llama2",
1746
+ * messages: [{ role: "user", content: "Hello" }]
1747
+ * });
1748
+ *
1749
+ * // Token rotation mode (array of API keys)
1750
+ * const rotateClient = getOllama();
1751
+ * const response = await rotateClient.chat({
1752
+ * model: "llama2",
1753
+ * messages: [{ role: "user", content: "Hello" }]
1754
+ * });
1755
+ * ```
1756
+ */
869
1757
  const getOllama = singleshot(() => {
870
1758
  const apiKey = lib.contextService.context.apiKey;
871
1759
  if (Array.isArray(apiKey)) {
@@ -882,12 +1770,98 @@ const getOllama = singleshot(() => {
882
1770
  });
883
1771
  });
884
1772
 
1773
+ /**
1774
+ * Maximum number of retry attempts for outline completion when model fails to use tools correctly.
1775
+ */
885
1776
  const MAX_ATTEMPTS$4 = 3;
1777
+ /**
1778
+ * Provider for Ollama LLM completions.
1779
+ *
1780
+ * Supports local and remote Ollama models with full tool calling capabilities.
1781
+ * Provides both standard and streaming completion modes, as well as structured
1782
+ * output through the outline completion method.
1783
+ *
1784
+ * Key features:
1785
+ * - Native Ollama protocol support
1786
+ * - Real-time streaming with token-by-token delivery
1787
+ * - Tool calling with automatic retry logic
1788
+ * - JSON schema validation for structured outputs
1789
+ * - Optional thinking mode support (via CC_ENABLE_THINKING)
1790
+ * - Debug logging when CC_ENABLE_DEBUG is enabled
1791
+ *
1792
+ * @example
1793
+ * ```typescript
1794
+ * const provider = new OllamaProvider(contextService, logger);
1795
+ *
1796
+ * // Standard completion
1797
+ * const response = await provider.getCompletion({
1798
+ * agentName: "assistant",
1799
+ * messages: [{ role: "user", content: "Hello!" }],
1800
+ * mode: "direct",
1801
+ * tools: [],
1802
+ * clientId: "client-123"
1803
+ * });
1804
+ *
1805
+ * // Streaming completion
1806
+ * const streamResponse = await provider.getStreamCompletion({
1807
+ * agentName: "assistant",
1808
+ * messages: [{ role: "user", content: "Explain AI" }],
1809
+ * mode: "direct",
1810
+ * tools: [],
1811
+ * clientId: "client-123"
1812
+ * });
1813
+ *
1814
+ * // Structured output with schema enforcement
1815
+ * const outlineResponse = await provider.getOutlineCompletion({
1816
+ * messages: [{ role: "user", content: "Analyze sentiment" }],
1817
+ * format: {
1818
+ * type: "object",
1819
+ * properties: {
1820
+ * sentiment: { type: "string" },
1821
+ * confidence: { type: "number" }
1822
+ * }
1823
+ * }
1824
+ * });
1825
+ * ```
1826
+ */
886
1827
  class OllamaProvider {
1828
+ /**
1829
+ * Creates a new OllamaProvider instance.
1830
+ *
1831
+ * @param contextService - Context service managing model configuration and API settings
1832
+ * @param logger - Logger instance for tracking provider operations
1833
+ */
887
1834
  constructor(contextService, logger) {
888
1835
  this.contextService = contextService;
889
1836
  this.logger = logger;
890
1837
  }
1838
+ /**
1839
+ * Performs a standard (non-streaming) completion request to Ollama.
1840
+ *
1841
+ * Sends messages and tools to the Ollama model and returns the complete response.
1842
+ * Supports tool calling with automatic ID generation for tool calls.
1843
+ *
1844
+ * @param params - Completion parameters including messages, tools, and agent configuration
1845
+ * @param params.agentName - Name of the agent making the request
1846
+ * @param params.messages - Conversation history with roles and content
1847
+ * @param params.mode - Completion mode (e.g., "direct", "delegated")
1848
+ * @param params.tools - Available tools for the model to call
1849
+ * @param params.clientId - Client identifier for tracking requests
1850
+ * @returns Promise resolving to the assistant's response message with optional tool calls
1851
+ *
1852
+ * @example
1853
+ * ```typescript
1854
+ * const response = await provider.getCompletion({
1855
+ * agentName: "assistant",
1856
+ * messages: [
1857
+ * { role: "user", content: "What's the weather in Tokyo?" }
1858
+ * ],
1859
+ * mode: "direct",
1860
+ * tools: [weatherTool],
1861
+ * clientId: "client-123"
1862
+ * });
1863
+ * ```
1864
+ */
891
1865
  async getCompletion(params) {
892
1866
  const { agentName, messages: rawMessages, mode, tools, clientId } = params;
893
1867
  const ollama = getOllama();
@@ -908,6 +1882,7 @@ class OllamaProvider {
908
1882
  })),
909
1883
  })),
910
1884
  tools,
1885
+ think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
911
1886
  });
912
1887
  const message = response.message;
913
1888
  const result = {
@@ -922,11 +1897,40 @@ class OllamaProvider {
922
1897
  role: response.message.role,
923
1898
  };
924
1899
  // Debug logging
925
- if (CC_ENABLE_DEBUG) {
1900
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
926
1901
  await fs.appendFile("./debug_ollama_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
927
1902
  }
928
1903
  return result;
929
1904
  }
1905
+ /**
1906
+ * Performs a streaming completion request to Ollama.
1907
+ *
1908
+ * Sends messages and tools to the Ollama model and streams the response token by token.
1909
+ * Emits "llm-new-token" events for each token and "llm-completion" when finished.
1910
+ * Accumulates tool calls and content chunks from the stream.
1911
+ *
1912
+ * @param params - Completion parameters including messages, tools, and agent configuration
1913
+ * @param params.agentName - Name of the agent making the request
1914
+ * @param params.messages - Conversation history with roles and content
1915
+ * @param params.mode - Completion mode (e.g., "direct", "delegated")
1916
+ * @param params.tools - Available tools for the model to call
1917
+ * @param params.clientId - Client identifier for event emission
1918
+ * @returns Promise resolving to the complete assistant's response after streaming finishes
1919
+ *
1920
+ * @example
1921
+ * ```typescript
1922
+ * const response = await provider.getStreamCompletion({
1923
+ * agentName: "assistant",
1924
+ * messages: [
1925
+ * { role: "user", content: "Explain quantum computing" }
1926
+ * ],
1927
+ * mode: "direct",
1928
+ * tools: [],
1929
+ * clientId: "client-123"
1930
+ * });
1931
+ * // Client receives "llm-new-token" events during generation
1932
+ * ```
1933
+ */
930
1934
  async getStreamCompletion(params) {
931
1935
  const { agentName, messages: rawMessages, mode, tools, clientId } = params;
932
1936
  const ollama = getOllama();
@@ -951,6 +1955,7 @@ class OllamaProvider {
951
1955
  messages,
952
1956
  tools,
953
1957
  stream: true,
1958
+ think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
954
1959
  });
955
1960
  for await (const chunk of stream) {
956
1961
  if (chunk.message.tool_calls) {
@@ -982,11 +1987,45 @@ class OllamaProvider {
982
1987
  })),
983
1988
  };
984
1989
  // Debug logging
985
- if (CC_ENABLE_DEBUG) {
1990
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
986
1991
  await fs.appendFile("./debug_ollama_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
987
1992
  }
988
1993
  return result;
989
1994
  }
1995
+ /**
1996
+ * Performs structured output completion using JSON schema enforcement via tool calling.
1997
+ *
1998
+ * Forces the model to use a specific tool ("provide_answer") to ensure response
1999
+ * conforms to the provided JSON schema. Implements retry logic with up to MAX_ATTEMPTS
2000
+ * attempts if the model fails to use the tool correctly or returns invalid JSON.
2001
+ *
2002
+ * Uses jsonrepair to fix malformed JSON and validates the output against the schema.
2003
+ * Adds context information to the returned data structure.
2004
+ *
2005
+ * @param params - Outline completion parameters
2006
+ * @param params.messages - Conversation history for context
2007
+ * @param params.format - JSON schema or response format definition
2008
+ * @returns Promise resolving to validated JSON string conforming to the schema
2009
+ * @throws Error if model fails to use tool after MAX_ATTEMPTS attempts
2010
+ *
2011
+ * @example
2012
+ * ```typescript
2013
+ * const response = await provider.getOutlineCompletion({
2014
+ * messages: [
2015
+ * { role: "user", content: "Analyze: 'Great product!'" }
2016
+ * ],
2017
+ * format: {
2018
+ * type: "object",
2019
+ * properties: {
2020
+ * sentiment: { type: "string", enum: ["positive", "negative", "neutral"] },
2021
+ * confidence: { type: "number", minimum: 0, maximum: 1 }
2022
+ * },
2023
+ * required: ["sentiment", "confidence"]
2024
+ * }
2025
+ * });
2026
+ * // response.content = '{"sentiment":"positive","confidence":0.95,"_context":{...}}'
2027
+ * ```
2028
+ */
990
2029
  async getOutlineCompletion(params) {
991
2030
  const { messages: rawMessages, format } = params;
992
2031
  const ollama = getOllama();
@@ -1029,6 +2068,7 @@ class OllamaProvider {
1029
2068
  model: this.contextService.context.model,
1030
2069
  messages,
1031
2070
  tools: [toolDefinition],
2071
+ think: GLOBAL_CONFIG.CC_ENABLE_THINKING,
1032
2072
  });
1033
2073
  const { tool_calls } = response.message;
1034
2074
  if (!tool_calls?.length) {
@@ -1068,7 +2108,7 @@ class OllamaProvider {
1068
2108
  content: JSON.stringify(validation.data),
1069
2109
  };
1070
2110
  // Debug logging
1071
- if (CC_ENABLE_DEBUG) {
2111
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1072
2112
  await fs.appendFile("./debug_ollama_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1073
2113
  }
1074
2114
  return result;
@@ -1081,6 +2121,33 @@ class OllamaProvider {
1081
2121
  }
1082
2122
  }
1083
2123
 
2124
+ /**
2125
+ * Creates and caches an OpenAI-compatible client for Claude (Anthropic) API.
2126
+ *
2127
+ * Uses OpenAI SDK with Claude's API endpoint for compatibility.
2128
+ * The client instance is cached using singleshot memoization for performance.
2129
+ * Token rotation is not supported - throws error if array of keys is provided.
2130
+ *
2131
+ * Key features:
2132
+ * - OpenAI SDK compatibility layer
2133
+ * - Single API key support only
2134
+ * - Instance caching with singleshot
2135
+ * - Automatic cache clearing on error
2136
+ *
2137
+ * @returns OpenAI client configured for Claude API
2138
+ * @throws Error if API key array is provided (token rotation not supported)
2139
+ *
2140
+ * @example
2141
+ * ```typescript
2142
+ * import { getClaude } from "./config/claude";
2143
+ *
2144
+ * const client = getClaude();
2145
+ * const completion = await client.chat.completions.create({
2146
+ * model: "claude-3-5-sonnet-20240620",
2147
+ * messages: [{ role: "user", content: "Hello" }]
2148
+ * });
2149
+ * ```
2150
+ */
1084
2151
  const getClaude = singleshot(() => {
1085
2152
  const apiKey = lib.contextService.context.apiKey;
1086
2153
  if (Array.isArray(apiKey)) {
@@ -1093,12 +2160,72 @@ const getClaude = singleshot(() => {
1093
2160
  });
1094
2161
  });
1095
2162
 
2163
+ /**
2164
+ * Maximum number of retry attempts for outline completion when model fails to use tools correctly.
2165
+ */
1096
2166
  const MAX_ATTEMPTS$3 = 5;
1097
- class GrokProvider {
2167
+ /**
2168
+ * Provider for Anthropic Claude models via OpenAI-compatible API.
2169
+ *
2170
+ * Note: This file exports ClaudeProvider class name but implements Claude functionality.
2171
+ * This appears to be a naming inconsistency that should be addressed.
2172
+ *
2173
+ * Implements Claude API access through OpenAI-compatible endpoint with full tool calling support.
2174
+ * Supports both standard and simulated streaming modes, as well as structured output
2175
+ * through tool-based schema enforcement.
2176
+ *
2177
+ * Key features:
2178
+ * - Claude API via OpenAI-compatible endpoint
2179
+ * - Tool calling with retry logic and validation
2180
+ * - Simulated streaming (returns complete response)
2181
+ * - JSON schema enforcement via tool calling
2182
+ * - Conditional tool parameter (only adds if tools present)
2183
+ * - Debug logging when CC_ENABLE_DEBUG is enabled
2184
+ *
2185
+ * @example
2186
+ * ```typescript
2187
+ * const provider = new ClaudeProvider(contextService, logger); // Note: Should be ClaudeProvider
2188
+ *
2189
+ * // Standard completion
2190
+ * const response = await provider.getCompletion({
2191
+ * agentName: "claude-assistant",
2192
+ * messages: [{ role: "user", content: "Explain neural networks" }],
2193
+ * mode: "direct",
2194
+ * tools: [searchTool],
2195
+ * clientId: "client-789"
2196
+ * });
2197
+ *
2198
+ * // Structured output with schema validation
2199
+ * const structured = await provider.getOutlineCompletion({
2200
+ * messages: [{ role: "user", content: "Classify: 'Best purchase ever!'" }],
2201
+ * format: {
2202
+ * type: "object",
2203
+ * properties: {
2204
+ * category: { type: "string" },
2205
+ * confidence: { type: "number" }
2206
+ * }
2207
+ * }
2208
+ * });
2209
+ * ```
2210
+ */
2211
+ class ClaudeProvider {
2212
+ /**
2213
+ * Creates a new ClaudeProvider instance (implements Claude functionality).
2214
+ *
2215
+ * @param contextService - Context service managing model configuration and API key
2216
+ * @param logger - Logger instance for tracking provider operations
2217
+ */
1098
2218
  constructor(contextService, logger) {
1099
2219
  this.contextService = contextService;
1100
2220
  this.logger = logger;
1101
2221
  }
2222
+ /**
2223
+ * Performs a standard completion request to Claude via OpenAI-compatible API.
2224
+ * Only adds tools parameter if tools array is non-empty.
2225
+ *
2226
+ * @param params - Completion parameters
2227
+ * @returns Promise resolving to assistant's response message
2228
+ */
1102
2229
  async getCompletion(params) {
1103
2230
  const claude = getClaude();
1104
2231
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1147,11 +2274,17 @@ class GrokProvider {
1147
2274
  })),
1148
2275
  };
1149
2276
  // Debug logging
1150
- if (CC_ENABLE_DEBUG) {
2277
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1151
2278
  await fs.appendFile("./debug_claude_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1152
2279
  }
1153
2280
  return result;
1154
2281
  }
2282
+ /**
2283
+ * Performs simulated streaming completion (returns complete response, emits completion event).
2284
+ *
2285
+ * @param params - Completion parameters
2286
+ * @returns Promise resolving to complete assistant's response
2287
+ */
1155
2288
  async getStreamCompletion(params) {
1156
2289
  const openai = getClaude();
1157
2290
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1208,11 +2341,19 @@ class GrokProvider {
1208
2341
  })),
1209
2342
  };
1210
2343
  // Debug logging
1211
- if (CC_ENABLE_DEBUG) {
2344
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1212
2345
  await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1213
2346
  }
1214
2347
  return result;
1215
2348
  }
2349
+ /**
2350
+ * Performs structured output completion using tool calling with retry logic.
2351
+ * Uses tool_choice to force model to use the provide_answer tool.
2352
+ *
2353
+ * @param params - Outline completion parameters
2354
+ * @returns Promise resolving to validated JSON string
2355
+ * @throws Error if model fails after MAX_ATTEMPTS attempts
2356
+ */
1216
2357
  async getOutlineCompletion(params) {
1217
2358
  const { messages: rawMessages, format } = params;
1218
2359
  const claude = getClaude();
@@ -1310,7 +2451,7 @@ class GrokProvider {
1310
2451
  content: JSON.stringify(validation.data),
1311
2452
  };
1312
2453
  // Debug logging
1313
- if (CC_ENABLE_DEBUG) {
2454
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1314
2455
  await fs.appendFile("./debug_claude_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1315
2456
  }
1316
2457
  return result;
@@ -1323,6 +2464,33 @@ class GrokProvider {
1323
2464
  }
1324
2465
  }
1325
2466
 
2467
+ /**
2468
+ * Creates and caches an OpenAI client for OpenAI API.
2469
+ *
2470
+ * Uses the official OpenAI SDK with default settings.
2471
+ * The client instance is cached using singleshot memoization for performance.
2472
+ * Token rotation is not supported - throws error if array of keys is provided.
2473
+ *
2474
+ * Key features:
2475
+ * - Official OpenAI SDK
2476
+ * - Single API key support only
2477
+ * - Instance caching with singleshot
2478
+ * - Automatic cache clearing on error
2479
+ *
2480
+ * @returns OpenAI client configured for OpenAI API
2481
+ * @throws Error if API key array is provided (token rotation not supported)
2482
+ *
2483
+ * @example
2484
+ * ```typescript
2485
+ * import { getOpenAi } from "./config/openai";
2486
+ *
2487
+ * const client = getOpenAi();
2488
+ * const completion = await client.chat.completions.create({
2489
+ * model: "gpt-5o-mini",
2490
+ * messages: [{ role: "user", content: "Hello" }]
2491
+ * });
2492
+ * ```
2493
+ */
1326
2494
  const getOpenAi = singleshot(() => {
1327
2495
  const apiKey = lib.contextService.context.apiKey;
1328
2496
  if (Array.isArray(apiKey)) {
@@ -1334,11 +2502,93 @@ const getOpenAi = singleshot(() => {
1334
2502
  });
1335
2503
  });
1336
2504
 
2505
+ /**
2506
+ * Provider for OpenAI GPT models (GPT-4, GPT-4 Turbo, GPT-3.5, etc.).
2507
+ *
2508
+ * Implements the OpenAI Chat Completions API with full tool calling support.
2509
+ * Uses the official OpenAI SDK for reliable communication with OpenAI's API.
2510
+ * Supports both standard and simulated streaming modes.
2511
+ *
2512
+ * Key features:
2513
+ * - OpenAI Chat Completions API via official SDK
2514
+ * - Tool calling with automatic argument serialization
2515
+ * - Simulated streaming (returns complete response, emits completion event)
2516
+ * - JSON schema enforcement for structured outputs
2517
+ * - Debug logging when CC_ENABLE_DEBUG is enabled
2518
+ *
2519
+ * Note: This provider does not implement true token-by-token streaming.
2520
+ * The getStreamCompletion method returns the complete response and emits
2521
+ * a single completion event to maintain interface compatibility.
2522
+ *
2523
+ * @example
2524
+ * ```typescript
2525
+ * const provider = new GPT5Provider(contextService, logger);
2526
+ *
2527
+ * // Standard completion with GPT-4
2528
+ * const response = await provider.getCompletion({
2529
+ * agentName: "assistant",
2530
+ * messages: [{ role: "user", content: "Explain relativity" }],
2531
+ * mode: "direct",
2532
+ * tools: [],
2533
+ * clientId: "client-123"
2534
+ * });
2535
+ *
2536
+ * // Structured output with JSON schema
2537
+ * const analysis = await provider.getOutlineCompletion({
2538
+ * messages: [{ role: "user", content: "Analyze sentiment" }],
2539
+ * format: {
2540
+ * type: "json_schema",
2541
+ * json_schema: {
2542
+ * schema: {
2543
+ * type: "object",
2544
+ * properties: {
2545
+ * sentiment: { type: "string" },
2546
+ * score: { type: "number" }
2547
+ * }
2548
+ * }
2549
+ * }
2550
+ * }
2551
+ * });
2552
+ * ```
2553
+ */
1337
2554
  class GPT5Provider {
2555
+ /**
2556
+ * Creates a new GPT5Provider instance.
2557
+ *
2558
+ * @param contextService - Context service managing model configuration and API key
2559
+ * @param logger - Logger instance for tracking provider operations
2560
+ */
1338
2561
  constructor(contextService, logger) {
1339
2562
  this.contextService = contextService;
1340
2563
  this.logger = logger;
1341
2564
  }
2565
+ /**
2566
+ * Performs a standard completion request to OpenAI.
2567
+ *
2568
+ * Sends messages and tools to the OpenAI API and returns the complete response.
2569
+ * Automatically serializes tool call arguments to JSON strings for API compatibility.
2570
+ *
2571
+ * @param params - Completion parameters including messages, tools, and agent configuration
2572
+ * @param params.agentName - Name of the agent making the request
2573
+ * @param params.messages - Conversation history with roles and content
2574
+ * @param params.mode - Completion mode (e.g., "direct", "delegated")
2575
+ * @param params.tools - Available tools for the model to call
2576
+ * @param params.clientId - Client identifier for tracking requests
2577
+ * @returns Promise resolving to the assistant's response message with optional tool calls
2578
+ *
2579
+ * @example
2580
+ * ```typescript
2581
+ * const response = await provider.getCompletion({
2582
+ * agentName: "gpt-assistant",
2583
+ * messages: [
2584
+ * { role: "user", content: "Calculate 15% tip on $85" }
2585
+ * ],
2586
+ * mode: "direct",
2587
+ * tools: [calculatorTool],
2588
+ * clientId: "client-456"
2589
+ * });
2590
+ * ```
2591
+ */
1342
2592
  async getCompletion(params) {
1343
2593
  const openai = getOpenAi();
1344
2594
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1380,11 +2630,43 @@ class GPT5Provider {
1380
2630
  })),
1381
2631
  };
1382
2632
  // Debug logging
1383
- if (CC_ENABLE_DEBUG) {
2633
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1384
2634
  await fs.appendFile("./debug_gpt5_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1385
2635
  }
1386
2636
  return result;
1387
2637
  }
2638
+ /**
2639
+ * Performs a simulated streaming completion request to OpenAI.
2640
+ *
2641
+ * Note: This method does NOT implement true token-by-token streaming.
2642
+ * It performs a standard completion and emits a single "llm-completion"
2643
+ * event with the full response to maintain interface compatibility.
2644
+ *
2645
+ * For true streaming, the OpenAI SDK streaming API would need to be used
2646
+ * with "stream: true" parameter.
2647
+ *
2648
+ * @param params - Completion parameters including messages, tools, and agent configuration
2649
+ * @param params.agentName - Name of the agent making the request
2650
+ * @param params.messages - Conversation history with roles and content
2651
+ * @param params.mode - Completion mode (e.g., "direct", "delegated")
2652
+ * @param params.tools - Available tools for the model to call
2653
+ * @param params.clientId - Client identifier for event emission
2654
+ * @returns Promise resolving to the complete assistant's response
2655
+ *
2656
+ * @example
2657
+ * ```typescript
2658
+ * const response = await provider.getStreamCompletion({
2659
+ * agentName: "gpt-assistant",
2660
+ * messages: [
2661
+ * { role: "user", content: "Write a haiku about coding" }
2662
+ * ],
2663
+ * mode: "direct",
2664
+ * tools: [],
2665
+ * clientId: "client-456"
2666
+ * });
2667
+ * // Client receives single "llm-completion" event with full response
2668
+ * ```
2669
+ */
1388
2670
  async getStreamCompletion(params) {
1389
2671
  const openai = getOpenAi();
1390
2672
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1441,11 +2723,53 @@ class GPT5Provider {
1441
2723
  })),
1442
2724
  };
1443
2725
  // Debug logging
1444
- if (CC_ENABLE_DEBUG) {
2726
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1445
2727
  await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1446
2728
  }
1447
2729
  return result;
1448
2730
  }
2731
+ /**
2732
+ * Performs structured output completion using OpenAI's response_format parameter.
2733
+ *
2734
+ * Uses OpenAI's native JSON schema mode to enforce structured output.
2735
+ * The model is instructed to respond in a specific JSON format matching
2736
+ * the provided schema. Uses jsonrepair to handle any JSON formatting issues.
2737
+ *
2738
+ * @param params - Outline completion parameters
2739
+ * @param params.messages - Conversation history for context
2740
+ * @param params.format - JSON schema or response format definition (supports both formats)
2741
+ * @returns Promise resolving to validated JSON string conforming to the schema
2742
+ * @throws Error if model returns a refusal message
2743
+ *
2744
+ * @example
2745
+ * ```typescript
2746
+ * const response = await provider.getOutlineCompletion({
2747
+ * messages: [
2748
+ * { role: "user", content: "Extract entities from: 'Apple released iPhone in Cupertino'" }
2749
+ * ],
2750
+ * format: {
2751
+ * type: "json_schema",
2752
+ * json_schema: {
2753
+ * schema: {
2754
+ * type: "object",
2755
+ * properties: {
2756
+ * entities: {
2757
+ * type: "array",
2758
+ * items: {
2759
+ * type: "object",
2760
+ * properties: {
2761
+ * text: { type: "string" },
2762
+ * type: { type: "string" }
2763
+ * }
2764
+ * }
2765
+ * }
2766
+ * }
2767
+ * }
2768
+ * }
2769
+ * }
2770
+ * });
2771
+ * ```
2772
+ */
1449
2773
  async getOutlineCompletion(params) {
1450
2774
  const { messages: rawMessages, format } = params;
1451
2775
  const openai = getOpenAi();
@@ -1484,13 +2808,40 @@ class GPT5Provider {
1484
2808
  content: json,
1485
2809
  };
1486
2810
  // Debug logging
1487
- if (CC_ENABLE_DEBUG) {
2811
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1488
2812
  await fs.appendFile("./debug_gpt5_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1489
2813
  }
1490
2814
  return result;
1491
2815
  }
1492
2816
  }
1493
2817
 
2818
+ /**
2819
+ * Creates and caches an OpenAI-compatible client for Deepseek API.
2820
+ *
2821
+ * Uses OpenAI SDK with Deepseek's API endpoint.
2822
+ * The client instance is cached using singleshot memoization for performance.
2823
+ * Token rotation is not supported - throws error if array of keys is provided.
2824
+ *
2825
+ * Key features:
2826
+ * - OpenAI SDK compatibility layer
2827
+ * - Single API key support only
2828
+ * - Instance caching with singleshot
2829
+ * - Automatic cache clearing on error
2830
+ *
2831
+ * @returns OpenAI client configured for Deepseek API
2832
+ * @throws Error if API key array is provided (token rotation not supported)
2833
+ *
2834
+ * @example
2835
+ * ```typescript
2836
+ * import { getDeepseek } from "./config/deepseek";
2837
+ *
2838
+ * const client = getDeepseek();
2839
+ * const completion = await client.chat.completions.create({
2840
+ * model: "deepseek-chat",
2841
+ * messages: [{ role: "user", content: "Hello" }]
2842
+ * });
2843
+ * ```
2844
+ */
1494
2845
  const getDeepseek = singleshot(() => {
1495
2846
  const apiKey = lib.contextService.context.apiKey;
1496
2847
  if (Array.isArray(apiKey)) {
@@ -1503,12 +2854,52 @@ const getDeepseek = singleshot(() => {
1503
2854
  });
1504
2855
  });
1505
2856
 
2857
+ /**
2858
+ * Maximum number of retry attempts for outline completion.
2859
+ */
1506
2860
  const MAX_ATTEMPTS$2 = 3;
2861
+ /**
2862
+ * Provider for Deepseek AI models via OpenAI-compatible API.
2863
+ *
2864
+ * Supports Deepseek models through OpenAI-compatible endpoint with tool calling.
2865
+ * Features simulated streaming and structured output via tool-based schema enforcement.
2866
+ *
2867
+ * Key features:
2868
+ * - OpenAI-compatible API endpoint
2869
+ * - Tool calling with conditional inclusion (only if tools present)
2870
+ * - Simulated streaming (returns complete response)
2871
+ * - Schema enforcement via tool calling with retry logic
2872
+ * - Debug logging support
2873
+ *
2874
+ * @example
2875
+ * ```typescript
2876
+ * const provider = new DeepseekProvider(contextService, logger);
2877
+ * const response = await provider.getCompletion({
2878
+ * agentName: "deepseek-assistant",
2879
+ * messages: [{ role: "user", content: "Explain transformers" }],
2880
+ * mode: "direct",
2881
+ * tools: [],
2882
+ * clientId: "client-001"
2883
+ * });
2884
+ * ```
2885
+ */
1507
2886
  class DeepseekProvider {
2887
+ /**
2888
+ * Creates a new DeepseekProvider instance.
2889
+ *
2890
+ * @param contextService - Context service with model configuration
2891
+ * @param logger - Logger for operation tracking
2892
+ */
1508
2893
  constructor(contextService, logger) {
1509
2894
  this.contextService = contextService;
1510
2895
  this.logger = logger;
1511
2896
  }
2897
+ /**
2898
+ * Performs standard completion request to Deepseek.
2899
+ *
2900
+ * @param params - Completion parameters
2901
+ * @returns Promise resolving to assistant's response
2902
+ */
1512
2903
  async getCompletion(params) {
1513
2904
  const deepseek = getDeepseek();
1514
2905
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1557,11 +2948,17 @@ class DeepseekProvider {
1557
2948
  })),
1558
2949
  };
1559
2950
  // Debug logging
1560
- if (CC_ENABLE_DEBUG) {
2951
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1561
2952
  await fs.appendFile("./debug_deepseek_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1562
2953
  }
1563
2954
  return result;
1564
2955
  }
2956
+ /**
2957
+ * Performs simulated streaming completion.
2958
+ *
2959
+ * @param params - Completion parameters
2960
+ * @returns Promise resolving to complete response
2961
+ */
1565
2962
  async getStreamCompletion(params) {
1566
2963
  const deepseek = getDeepseek();
1567
2964
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1618,11 +3015,18 @@ class DeepseekProvider {
1618
3015
  })),
1619
3016
  };
1620
3017
  // Debug logging
1621
- if (CC_ENABLE_DEBUG) {
3018
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1622
3019
  await fs.appendFile("./debug_deepseek_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1623
3020
  }
1624
3021
  return result;
1625
3022
  }
3023
+ /**
3024
+ * Performs structured output completion with schema validation.
3025
+ *
3026
+ * @param params - Outline completion parameters
3027
+ * @returns Promise resolving to validated JSON string
3028
+ * @throws Error if model fails after MAX_ATTEMPTS
3029
+ */
1626
3030
  async getOutlineCompletion(params) {
1627
3031
  const { messages: rawMessages, format } = params;
1628
3032
  const deepseek = getDeepseek();
@@ -1720,7 +3124,7 @@ class DeepseekProvider {
1720
3124
  content: JSON.stringify(validation.data),
1721
3125
  };
1722
3126
  // Debug logging
1723
- if (CC_ENABLE_DEBUG) {
3127
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1724
3128
  await fs.appendFile("./debug_deepseek_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1725
3129
  }
1726
3130
  return result;
@@ -1733,6 +3137,33 @@ class DeepseekProvider {
1733
3137
  }
1734
3138
  }
1735
3139
 
3140
+ /**
3141
+ * Creates and caches an OpenAI-compatible client for Mistral API.
3142
+ *
3143
+ * Uses OpenAI SDK with Mistral's API endpoint.
3144
+ * The client instance is cached using singleshot memoization for performance.
3145
+ * Token rotation is not supported - throws error if array of keys is provided.
3146
+ *
3147
+ * Key features:
3148
+ * - OpenAI SDK compatibility layer
3149
+ * - Single API key support only
3150
+ * - Instance caching with singleshot
3151
+ * - Automatic cache clearing on error
3152
+ *
3153
+ * @returns OpenAI client configured for Mistral API
3154
+ * @throws Error if API key array is provided (token rotation not supported)
3155
+ *
3156
+ * @example
3157
+ * ```typescript
3158
+ * import { getMistral } from "./config/mistral";
3159
+ *
3160
+ * const client = getMistral();
3161
+ * const completion = await client.chat.completions.create({
3162
+ * model: "mistral-large-latest",
3163
+ * messages: [{ role: "user", content: "Hello" }]
3164
+ * });
3165
+ * ```
3166
+ */
1736
3167
  const getMistral = singleshot(() => {
1737
3168
  const apiKey = lib.contextService.context.apiKey;
1738
3169
  if (Array.isArray(apiKey)) {
@@ -1745,12 +3176,52 @@ const getMistral = singleshot(() => {
1745
3176
  });
1746
3177
  });
1747
3178
 
3179
+ /**
3180
+ * Maximum number of retry attempts for outline completion.
3181
+ */
1748
3182
  const MAX_ATTEMPTS$1 = 3;
3183
+ /**
3184
+ * Provider for Mistral AI models via OpenAI-compatible API.
3185
+ *
3186
+ * Implements Mistral API access through OpenAI-compatible endpoint.
3187
+ * Supports tool calling, simulated streaming, and structured output.
3188
+ *
3189
+ * Key features:
3190
+ * - Mistral AI API via OpenAI-compatible endpoint
3191
+ * - Tool calling with conditional inclusion
3192
+ * - Simulated streaming (complete response)
3193
+ * - Schema enforcement via tool calling with retry
3194
+ * - Debug logging support
3195
+ *
3196
+ * @example
3197
+ * ```typescript
3198
+ * const provider = new MistralProvider(contextService, logger);
3199
+ * const response = await provider.getCompletion({
3200
+ * agentName: "mistral-assistant",
3201
+ * messages: [{ role: "user", content: "Summarize quantum physics" }],
3202
+ * mode: "direct",
3203
+ * tools: [],
3204
+ * clientId: "client-555"
3205
+ * });
3206
+ * ```
3207
+ */
1749
3208
  class MistralProvider {
3209
+ /**
3210
+ * Creates a new MistralProvider instance.
3211
+ *
3212
+ * @param contextService - Context service with model configuration
3213
+ * @param logger - Logger for operation tracking
3214
+ */
1750
3215
  constructor(contextService, logger) {
1751
3216
  this.contextService = contextService;
1752
3217
  this.logger = logger;
1753
3218
  }
3219
+ /**
3220
+ * Performs standard completion request to Mistral.
3221
+ *
3222
+ * @param params - Completion parameters
3223
+ * @returns Promise resolving to assistant's response
3224
+ */
1754
3225
  async getCompletion(params) {
1755
3226
  const mistral = getMistral();
1756
3227
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1799,11 +3270,17 @@ class MistralProvider {
1799
3270
  })),
1800
3271
  };
1801
3272
  // Debug logging
1802
- if (CC_ENABLE_DEBUG) {
3273
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1803
3274
  await fs.appendFile("./debug_mistral_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1804
3275
  }
1805
3276
  return result;
1806
3277
  }
3278
+ /**
3279
+ * Performs simulated streaming completion.
3280
+ *
3281
+ * @param params - Completion parameters
3282
+ * @returns Promise resolving to complete response
3283
+ */
1807
3284
  async getStreamCompletion(params) {
1808
3285
  const mistral = getMistral();
1809
3286
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -1860,11 +3337,18 @@ class MistralProvider {
1860
3337
  })),
1861
3338
  };
1862
3339
  // Debug logging
1863
- if (CC_ENABLE_DEBUG) {
3340
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1864
3341
  await fs.appendFile("./debug_mistral_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1865
3342
  }
1866
3343
  return result;
1867
3344
  }
3345
+ /**
3346
+ * Performs structured output completion with schema validation.
3347
+ *
3348
+ * @param params - Outline completion parameters
3349
+ * @returns Promise resolving to validated JSON string
3350
+ * @throws Error if model fails after MAX_ATTEMPTS
3351
+ */
1868
3352
  async getOutlineCompletion(params) {
1869
3353
  const { messages: rawMessages, format } = params;
1870
3354
  const mistral = getMistral();
@@ -1962,7 +3446,7 @@ class MistralProvider {
1962
3446
  content: JSON.stringify(validation.data),
1963
3447
  };
1964
3448
  // Debug logging
1965
- if (CC_ENABLE_DEBUG) {
3449
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
1966
3450
  await fs.appendFile("./debug_mistral_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1967
3451
  }
1968
3452
  return result;
@@ -1975,6 +3459,33 @@ class MistralProvider {
1975
3459
  }
1976
3460
  }
1977
3461
 
3462
+ /**
3463
+ * Creates and caches an OpenAI-compatible client for Perplexity API.
3464
+ *
3465
+ * Uses OpenAI SDK with Perplexity's API endpoint.
3466
+ * The client instance is cached using singleshot memoization for performance.
3467
+ * Token rotation is not supported - throws error if array of keys is provided.
3468
+ *
3469
+ * Key features:
3470
+ * - OpenAI SDK compatibility layer
3471
+ * - Single API key support only
3472
+ * - Instance caching with singleshot
3473
+ * - Automatic cache clearing on error
3474
+ *
3475
+ * @returns OpenAI client configured for Perplexity API
3476
+ * @throws Error if API key array is provided (token rotation not supported)
3477
+ *
3478
+ * @example
3479
+ * ```typescript
3480
+ * import { getPerplexity } from "./config/perplexity";
3481
+ *
3482
+ * const client = getPerplexity();
3483
+ * const completion = await client.chat.completions.create({
3484
+ * model: "llama-3.1-sonar-large-128k-online",
3485
+ * messages: [{ role: "user", content: "Hello" }]
3486
+ * });
3487
+ * ```
3488
+ */
1978
3489
  const getPerplexity = singleshot(() => {
1979
3490
  const apiKey = lib.contextService.context.apiKey;
1980
3491
  if (Array.isArray(apiKey)) {
@@ -1987,11 +3498,49 @@ const getPerplexity = singleshot(() => {
1987
3498
  });
1988
3499
  });
1989
3500
 
3501
+ /**
3502
+ * Provider for Perplexity AI models via OpenAI-compatible API.
3503
+ *
3504
+ * Implements Perplexity API access with specialized message handling.
3505
+ * Filters and merges consecutive messages to comply with API requirements.
3506
+ * Note: getStreamCompletion returns error message as streaming is not supported.
3507
+ *
3508
+ * Key features:
3509
+ * - OpenAI-compatible API endpoint
3510
+ * - Message filtering (user/assistant/tool only)
3511
+ * - System message aggregation
3512
+ * - Consecutive message merging (prevents API errors)
3513
+ * - Tool calling support (requires description field)
3514
+ * - Outline completion via response_format
3515
+ * - Streaming not supported (returns error message)
3516
+ *
3517
+ * @example
3518
+ * ```typescript
3519
+ * const provider = new PerplexityProvider(contextService, logger);
3520
+ * const response = await provider.getCompletion({
3521
+ * agentName: "perplexity-assistant",
3522
+ * messages: [{ role: "user", content: "Latest AI research?" }],
3523
+ * mode: "direct",
3524
+ * tools: [searchTool],
3525
+ * clientId: "client-333"
3526
+ * });
3527
+ * ```
3528
+ */
1990
3529
  class PerplexityProvider {
3530
+ /**
3531
+ * Creates a new PerplexityProvider instance.
3532
+ */
1991
3533
  constructor(contextService, logger) {
1992
3534
  this.contextService = contextService;
1993
3535
  this.logger = logger;
1994
3536
  }
3537
+ /**
3538
+ * Performs standard completion with message filtering and merging.
3539
+ * Filters messages to user/assistant/tool only and merges consecutive messages.
3540
+ *
3541
+ * @param params - Completion parameters
3542
+ * @returns Promise resolving to assistant's response
3543
+ */
1995
3544
  async getCompletion(params) {
1996
3545
  const perplexity = getPerplexity();
1997
3546
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -2076,11 +3625,18 @@ class PerplexityProvider {
2076
3625
  })),
2077
3626
  };
2078
3627
  // Debug logging
2079
- if (CC_ENABLE_DEBUG) {
3628
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2080
3629
  await fs.appendFile("./debug_perplexity_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2081
3630
  }
2082
3631
  return finalResult;
2083
3632
  }
3633
+ /**
3634
+ * Returns error message indicating streaming not supported.
3635
+ * Perplexity provider does not implement token-by-token streaming.
3636
+ *
3637
+ * @param params - Completion parameters
3638
+ * @returns Promise resolving to error message
3639
+ */
2084
3640
  async getStreamCompletion(params) {
2085
3641
  const { clientId, agentName, mode } = params;
2086
3642
  this.logger.log("perplexityProvider getStreamCompletion", {
@@ -2097,6 +3653,14 @@ class PerplexityProvider {
2097
3653
  };
2098
3654
  return result;
2099
3655
  }
3656
+ /**
3657
+ * Performs structured output completion using response_format.
3658
+ * Filters and merges messages before sending.
3659
+ *
3660
+ * @param params - Outline completion parameters
3661
+ * @returns Promise resolving to validated JSON string
3662
+ * @throws Error if model returns refusal
3663
+ */
2100
3664
  async getOutlineCompletion(params) {
2101
3665
  const { messages: rawMessages, format } = params;
2102
3666
  const perplexity = getPerplexity();
@@ -2168,13 +3732,40 @@ class PerplexityProvider {
2168
3732
  content: json,
2169
3733
  };
2170
3734
  // Debug logging
2171
- if (CC_ENABLE_DEBUG) {
3735
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2172
3736
  await fs.appendFile("./debug_perplexity_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2173
3737
  }
2174
3738
  return result;
2175
3739
  }
2176
3740
  }
2177
3741
 
3742
+ /**
3743
+ * Creates and caches an OpenAI-compatible client for Cohere API.
3744
+ *
3745
+ * Uses OpenAI SDK with Cohere's compatibility endpoint.
3746
+ * The client instance is cached using singleshot memoization for performance.
3747
+ * Token rotation is not supported - throws error if array of keys is provided.
3748
+ *
3749
+ * Key features:
3750
+ * - OpenAI SDK compatibility layer
3751
+ * - Single API key support only
3752
+ * - Instance caching with singleshot
3753
+ * - Automatic cache clearing on error
3754
+ *
3755
+ * @returns OpenAI client configured for Cohere API
3756
+ * @throws Error if API key array is provided (token rotation not supported)
3757
+ *
3758
+ * @example
3759
+ * ```typescript
3760
+ * import { getCohere } from "./config/cohere";
3761
+ *
3762
+ * const client = getCohere();
3763
+ * const completion = await client.chat.completions.create({
3764
+ * model: "command-r-plus",
3765
+ * messages: [{ role: "user", content: "Hello" }]
3766
+ * });
3767
+ * ```
3768
+ */
2178
3769
  const getCohere = singleshot(() => {
2179
3770
  const apiKey = lib.contextService.context.apiKey;
2180
3771
  if (Array.isArray(apiKey)) {
@@ -2187,11 +3778,56 @@ const getCohere = singleshot(() => {
2187
3778
  });
2188
3779
  });
2189
3780
 
3781
+ /**
3782
+ * Provider for Cohere AI models via OpenAI-compatible API.
3783
+ *
3784
+ * Implements Cohere API access with specialized message handling for tool calling.
3785
+ * Unlike other providers, includes tool messages in conversation and does NOT merge
3786
+ * consecutive assistant messages (required for proper tool calling flow).
3787
+ *
3788
+ * Key features:
3789
+ * - OpenAI-compatible API endpoint
3790
+ * - Message filtering (user/assistant/tool - includes tool messages)
3791
+ * - System message aggregation
3792
+ * - NO consecutive assistant message merging (breaks tool calling)
3793
+ * - Tool calling support (requires description field)
3794
+ * - Outline completion via response_format
3795
+ * - Simulated streaming
3796
+ *
3797
+ * Important: Cohere requires strict tool_calls -> tool_responses sequence.
3798
+ * Merging assistant messages breaks this flow.
3799
+ *
3800
+ * @example
3801
+ * ```typescript
3802
+ * const provider = new CohereProvider(contextService, logger);
3803
+ * const response = await provider.getCompletion({
3804
+ * agentName: "cohere-assistant",
3805
+ * messages: [
3806
+ * { role: "user", content: "Search for AI papers" },
3807
+ * { role: "assistant", content: "", tool_calls: [searchCall] },
3808
+ * { role: "tool", content: "Results...", tool_call_id: "123" }
3809
+ * ],
3810
+ * mode: "direct",
3811
+ * tools: [searchTool],
3812
+ * clientId: "client-222"
3813
+ * });
3814
+ * ```
3815
+ */
2190
3816
  class CohereProvider {
3817
+ /**
3818
+ * Creates a new CohereProvider instance.
3819
+ */
2191
3820
  constructor(contextService, logger) {
2192
3821
  this.contextService = contextService;
2193
3822
  this.logger = logger;
2194
3823
  }
3824
+ /**
3825
+ * Performs standard completion with Cohere-specific message handling.
3826
+ * Includes tool messages and preserves assistant message sequence.
3827
+ *
3828
+ * @param params - Completion parameters
3829
+ * @returns Promise resolving to assistant's response
3830
+ */
2195
3831
  async getCompletion(params) {
2196
3832
  const cohere = getCohere();
2197
3833
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -2263,11 +3899,17 @@ class CohereProvider {
2263
3899
  })),
2264
3900
  };
2265
3901
  // Debug logging
2266
- if (CC_ENABLE_DEBUG) {
3902
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2267
3903
  await fs.appendFile("./debug_cohere_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2268
3904
  }
2269
3905
  return finalResult;
2270
3906
  }
3907
+ /**
3908
+ * Performs simulated streaming completion with Cohere-specific message handling.
3909
+ *
3910
+ * @param params - Completion parameters
3911
+ * @returns Promise resolving to complete response
3912
+ */
2271
3913
  async getStreamCompletion(params) {
2272
3914
  const cohere = getCohere();
2273
3915
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
@@ -2347,11 +3989,19 @@ class CohereProvider {
2347
3989
  })),
2348
3990
  };
2349
3991
  // Debug logging
2350
- if (CC_ENABLE_DEBUG) {
3992
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2351
3993
  await fs.appendFile("./debug_cohere_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2352
3994
  }
2353
3995
  return result;
2354
3996
  }
3997
+ /**
3998
+ * Performs structured output completion using response_format.
3999
+ * Filters and merges user messages only (preserves assistant sequence).
4000
+ *
4001
+ * @param params - Outline completion parameters
4002
+ * @returns Promise resolving to validated JSON string
4003
+ * @throws Error if model returns refusal
4004
+ */
2355
4005
  async getOutlineCompletion(params) {
2356
4006
  const { messages: rawMessages, format } = params;
2357
4007
  const cohere = getCohere();
@@ -2410,20 +4060,63 @@ class CohereProvider {
2410
4060
  content: json,
2411
4061
  };
2412
4062
  // Debug logging
2413
- if (CC_ENABLE_DEBUG) {
4063
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2414
4064
  await fs.appendFile("./debug_cohere_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2415
4065
  }
2416
4066
  return result;
2417
4067
  }
2418
4068
  }
2419
4069
 
4070
+ /**
4071
+ * Maximum number of retry attempts for outline completion.
4072
+ */
2420
4073
  const MAX_ATTEMPTS = 3;
4074
+ /**
4075
+ * Alibaba Cloud DashScope API base URL.
4076
+ */
2421
4077
  const BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
4078
+ /**
4079
+ * Provider for Alibaba Cloud Qwen models via DashScope API.
4080
+ *
4081
+ * Implements Alibaba Cloud DashScope API access using fetchApi for HTTP requests.
4082
+ * Supports thinking mode control via enable_thinking parameter.
4083
+ * Does NOT support token rotation (single API key only).
4084
+ *
4085
+ * Key features:
4086
+ * - DashScope OpenAI-compatible endpoint
4087
+ * - Direct fetchApi HTTP requests (no SDK)
4088
+ * - Thinking mode control (enable_thinking parameter)
4089
+ * - Tool calling with conditional inclusion
4090
+ * - Simulated streaming
4091
+ * - No token rotation support
4092
+ *
4093
+ * @example
4094
+ * ```typescript
4095
+ * const provider = new AlibabaProvider(contextService, logger);
4096
+ * const response = await provider.getCompletion({
4097
+ * agentName: "qwen-assistant",
4098
+ * messages: [{ role: "user", content: "Explain blockchain" }],
4099
+ * mode: "direct",
4100
+ * tools: [],
4101
+ * clientId: "client-111"
4102
+ * });
4103
+ * ```
4104
+ */
2422
4105
  class AlibabaProvider {
4106
+ /**
4107
+ * Creates a new AlibabaProvider instance.
4108
+ */
2423
4109
  constructor(contextService, logger) {
2424
4110
  this.contextService = contextService;
2425
4111
  this.logger = logger;
2426
4112
  }
4113
+ /**
4114
+ * Performs standard completion request to Alibaba DashScope.
4115
+ *
4116
+ * @param params - Completion parameters
4117
+ * @returns Promise resolving to assistant's response
4118
+ * @throws Error if token rotation attempted
4119
+ */
2427
4120
  async getCompletion(params) {
2428
4121
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2429
4122
  this.logger.log("alibabaProvider getCompletion", {
@@ -2486,11 +4179,18 @@ class AlibabaProvider {
2486
4179
  })),
2487
4180
  };
2488
4181
  // Debug logging
2489
- if (CC_ENABLE_DEBUG) {
4182
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2490
4183
  await fs.appendFile("./debug_alibaba_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2491
4184
  }
2492
4185
  return result;
2493
4186
  }
4187
+ /**
4188
+ * Performs simulated streaming completion.
4189
+ *
4190
+ * @param params - Completion parameters
4191
+ * @returns Promise resolving to complete response
4192
+ * @throws Error if token rotation attempted
4193
+ */
2494
4194
  async getStreamCompletion(params) {
2495
4195
  const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2496
4196
  this.logger.log("alibabaProvider getStreamCompletion", {
@@ -2560,11 +4260,18 @@ class AlibabaProvider {
2560
4260
  })),
2561
4261
  };
2562
4262
  // Debug logging
2563
- if (CC_ENABLE_DEBUG) {
4263
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2564
4264
  await fs.appendFile("./debug_alibaba_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2565
4265
  }
2566
4266
  return result;
2567
4267
  }
4268
+ /**
4269
+ * Performs structured output completion using tool calling with retry logic.
4270
+ *
4271
+ * @param params - Outline completion parameters
4272
+ * @returns Promise resolving to validated JSON string
4273
+ * @throws Error if model fails after MAX_ATTEMPTS or token rotation attempted
4274
+ */
2568
4275
  async getOutlineCompletion(params) {
2569
4276
  const { messages: rawMessages, format } = params;
2570
4277
  this.logger.log("alibabaProvider getOutlineCompletion", {
@@ -2674,7 +4381,7 @@ class AlibabaProvider {
2674
4381
  content: JSON.stringify(validation.data),
2675
4382
  };
2676
4383
  // Debug logging
2677
- if (CC_ENABLE_DEBUG) {
4384
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
2678
4385
  await fs.appendFile("./debug_alibaba_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2679
4386
  }
2680
4387
  return result;
@@ -2687,42 +4394,527 @@ class AlibabaProvider {
2687
4394
  }
2688
4395
  }
2689
4396
 
4397
+ /**
4398
+ * Creates and caches an OpenAI-compatible client for Z.ai GLM-4 API.
4399
+ *
4400
+ * Uses OpenAI SDK with Z.ai's API endpoint for accessing Zhipu AI's GLM-4 models.
4401
+ * The client instance is cached using singleshot memoization for performance.
4402
+ * Token rotation is not supported - throws error if array of keys is provided.
4403
+ *
4404
+ * Key features:
4405
+ * - OpenAI SDK compatibility layer
4406
+ * - Single API key support only
4407
+ * - Instance caching with singleshot
4408
+ * - Automatic cache clearing on error
4409
+ * - Context-based API key retrieval
4410
+ *
4411
+ * @returns OpenAI client configured for Z.ai API
4412
+ * @throws Error if API key array is provided (token rotation not supported)
4413
+ *
4414
+ * @example
4415
+ * ```typescript
4416
+ * import { getZAi } from "./config/zai";
4417
+ *
4418
+ * const client = getZAi();
4419
+ * const completion = await client.chat.completions.create({
4420
+ * model: "glm-4-plus",
4421
+ * messages: [{ role: "user", content: "Hello" }]
4422
+ * });
4423
+ * ```
4424
+ *
4425
+ * @example
4426
+ * ```typescript
4427
+ * // With structured output
4428
+ * const client = getZAi();
4429
+ * const completion = await client.chat.completions.create({
4430
+ * model: "glm-4-plus",
4431
+ * messages: [{ role: "user", content: "Generate trading signal" }],
4432
+ * response_format: {
4433
+ * type: "json_schema",
4434
+ * json_schema: { schema: { type: "object", properties: {...} } }
4435
+ * }
4436
+ * });
4437
+ * ```
4438
+ */
4439
+ const getZAi = singleshot(() => {
4440
+ const apiKey = lib.contextService.context.apiKey;
4441
+ if (Array.isArray(apiKey)) {
4442
+ getZAi.clear();
4443
+ throw new Error("Z.ai provider does not support token rotation");
4444
+ }
4445
+ return new OpenAI({
4446
+ apiKey,
4447
+ baseURL: "https://api.z.ai/api/paas/v4/"
4448
+ });
4449
+ });
4450
+
4451
+ /**
4452
+ * GLM-4 provider implementation for Z.ai API integration.
4453
+ *
4454
+ * Provides access to Zhipu AI's GLM-4 models through OpenAI-compatible API.
4455
+ * Supports standard completions, streaming, and structured (outline) outputs.
4456
+ * Uses the Z.ai API endpoint for model inference.
4457
+ *
4458
+ * Key features:
4459
+ * - OpenAI SDK compatibility layer
4460
+ * - Tool calling support (function calls)
4461
+ * - Streaming completion with event emission
4462
+ * - Structured JSON output with schema validation
4463
+ * - Debug logging to file when enabled
4464
+ * - Message format transformation between agent-swarm-kit and OpenAI formats
4465
+ *
4466
+ * @example
4467
+ * ```typescript
4468
+ * import { GLM4Provider } from "./client/GLM4Provider.client";
4469
+ * import { ContextService } from "./services/base/ContextService";
4470
+ *
4471
+ * const provider = new GLM4Provider(contextService, logger);
4472
+ *
4473
+ * // Standard completion
4474
+ * const result = await provider.getCompletion({
4475
+ * messages: [{ role: "user", content: "Hello" }],
4476
+ * agentName: "test-agent",
4477
+ * clientId: "client-123",
4478
+ * mode: "default"
4479
+ * });
4480
+ *
4481
+ * // Streaming completion
4482
+ * const stream = await provider.getStreamCompletion({
4483
+ * messages: [{ role: "user", content: "Analyze market" }],
4484
+ * agentName: "trader-agent",
4485
+ * clientId: "client-456",
4486
+ * mode: "stream"
4487
+ * });
4488
+ *
4489
+ * // Structured output
4490
+ * const outline = await provider.getOutlineCompletion({
4491
+ * messages: [{ role: "user", content: "Trading decision" }],
4492
+ * format: { type: "object", properties: {...} }
4493
+ * });
4494
+ * ```
4495
+ */
4496
+ class GLM4Provider {
4497
+ /**
4498
+ * Creates a new GLM4Provider instance.
4499
+ *
4500
+ * @param contextService - Context service providing execution context (model, API key)
4501
+ * @param logger - Logger service for operation tracking
4502
+ */
4503
+ constructor(contextService, logger) {
4504
+ this.contextService = contextService;
4505
+ this.logger = logger;
4506
+ }
4507
+ /**
4508
+ * Executes a standard GLM-4 completion request.
4509
+ *
4510
+ * Sends messages to the GLM-4 model and returns the completion response.
4511
+ * Supports tool calling (function calls) and automatically transforms message formats
4512
+ * between agent-swarm-kit and OpenAI formats.
4513
+ *
4514
+ * Key operations:
4515
+ * - Maps agent-swarm-kit messages to OpenAI format
4516
+ * - Handles tool calls with JSON serialization/deserialization
4517
+ * - Logs operation details for debugging
4518
+ * - Optionally writes debug output to file
4519
+ *
4520
+ * @param params - Completion parameters including messages, tools, and context
4521
+ * @param params.messages - Array of conversation messages
4522
+ * @param params.tools - Optional array of function tools available to the model
4523
+ * @param params.agentName - Name of the requesting agent
4524
+ * @param params.clientId - Client session identifier
4525
+ * @param params.mode - Completion mode (e.g., "default", "stream")
4526
+ * @returns Promise resolving to completion message with optional tool calls
4527
+ *
4528
+ * @example
4529
+ * ```typescript
4530
+ * const result = await provider.getCompletion({
4531
+ * messages: [
4532
+ * { role: "system", content: "You are a trading assistant" },
4533
+ * { role: "user", content: "Analyze BTC market" }
4534
+ * ],
4535
+ * tools: [
4536
+ * {
4537
+ * type: "function",
4538
+ * function: {
4539
+ * name: "get_market_data",
4540
+ * parameters: { type: "object", properties: {...} }
4541
+ * }
4542
+ * }
4543
+ * ],
4544
+ * agentName: "trader",
4545
+ * clientId: "session-123",
4546
+ * mode: "default"
4547
+ * });
4548
+ *
4549
+ * console.log(result.content); // Model's text response
4550
+ * console.log(result.tool_calls); // Any function calls requested
4551
+ * ```
4552
+ */
4553
+ async getCompletion(params) {
4554
+ const openai = getZAi();
4555
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
4556
+ this.logger.log("glm4Provider getCompletion", {
4557
+ agentName,
4558
+ mode,
4559
+ clientId,
4560
+ context: this.contextService.context,
4561
+ });
4562
+ // Map raw messages to OpenAI format
4563
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
4564
+ role,
4565
+ tool_call_id,
4566
+ content,
4567
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
4568
+ ...rest,
4569
+ function: {
4570
+ name: f.name,
4571
+ arguments: JSON.stringify(f.arguments),
4572
+ },
4573
+ })),
4574
+ }));
4575
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
4576
+ model: this.contextService.context.model,
4577
+ messages: messages,
4578
+ tools: tools,
4579
+ });
4580
+ const result = {
4581
+ content: content,
4582
+ mode,
4583
+ agentName,
4584
+ role,
4585
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
4586
+ ...rest,
4587
+ function: {
4588
+ name: f.name,
4589
+ arguments: JSON.parse(f.arguments),
4590
+ },
4591
+ })),
4592
+ };
4593
+ // Debug logging
4594
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
4595
+ await fs.appendFile("./debug_glm4_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
4596
+ }
4597
+ return result;
4598
+ }
4599
+ /**
4600
+ * Executes a streaming GLM-4 completion request with event emission.
4601
+ *
4602
+ * Similar to getCompletion but emits "llm-completion" events during processing
4603
+ * to enable real-time updates. The full response is accumulated and returned
4604
+ * once streaming completes.
4605
+ *
4606
+ * Key operations:
4607
+ * - Maps agent-swarm-kit messages to OpenAI format
4608
+ * - Formats tools for OpenAI API
4609
+ * - Emits events to client for real-time updates
4610
+ * - Handles tool calls with JSON parsing
4611
+ * - Logs operation details for debugging
4612
+ * - Optionally writes debug output to file
4613
+ *
4614
+ * @param params - Completion parameters including messages, tools, and context
4615
+ * @param params.messages - Array of conversation messages
4616
+ * @param params.tools - Optional array of function tools available to the model
4617
+ * @param params.agentName - Name of the requesting agent
4618
+ * @param params.clientId - Client session identifier for event emission
4619
+ * @param params.mode - Completion mode (typically "stream")
4620
+ * @returns Promise resolving to accumulated completion message
4621
+ *
4622
+ * @example
4623
+ * ```typescript
4624
+ * // Listen for streaming events
4625
+ * listen("llm-completion", (event) => {
4626
+ * console.log("Received chunk:", event.content);
4627
+ * });
4628
+ *
4629
+ * const result = await provider.getStreamCompletion({
4630
+ * messages: [
4631
+ * { role: "user", content: "Generate trading signal for ETH" }
4632
+ * ],
4633
+ * tools: [...],
4634
+ * agentName: "signal-agent",
4635
+ * clientId: "client-789",
4636
+ * mode: "stream"
4637
+ * });
4638
+ *
4639
+ * console.log("Final result:", result.content);
4640
+ * ```
4641
+ */
4642
+ async getStreamCompletion(params) {
4643
+ const openai = getZAi();
4644
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
4645
+ this.logger.log("glm4Provider getStreamCompletion", {
4646
+ agentName,
4647
+ mode,
4648
+ clientId,
4649
+ context: this.contextService.context,
4650
+ });
4651
+ // Map raw messages to OpenAI format
4652
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
4653
+ role,
4654
+ tool_call_id,
4655
+ content,
4656
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
4657
+ ...rest,
4658
+ function: {
4659
+ name: f.name,
4660
+ arguments: JSON.stringify(f.arguments),
4661
+ },
4662
+ })),
4663
+ }));
4664
+ // Map tools to OpenAI format
4665
+ const formattedTools = tools?.map(({ type, function: f }) => ({
4666
+ type: type,
4667
+ function: {
4668
+ name: f.name,
4669
+ parameters: f.parameters,
4670
+ },
4671
+ }));
4672
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
4673
+ model: this.contextService.context.model,
4674
+ messages: messages,
4675
+ tools: formattedTools,
4676
+ });
4677
+ // Emit events to mimic streaming behavior
4678
+ if (content) {
4679
+ await event(clientId, "llm-completion", {
4680
+ content: content.trim(),
4681
+ agentName,
4682
+ });
4683
+ }
4684
+ const result = {
4685
+ content: content || "",
4686
+ mode,
4687
+ agentName,
4688
+ role,
4689
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
4690
+ ...rest,
4691
+ function: {
4692
+ name: f.name,
4693
+ arguments: JSON.parse(f.arguments),
4694
+ },
4695
+ })),
4696
+ };
4697
+ // Debug logging
4698
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
4699
+ await fs.appendFile("./debug_glm4_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
4700
+ }
4701
+ return result;
4702
+ }
4703
+ /**
4704
+ * Executes a structured outline completion with JSON schema validation.
4705
+ *
4706
+ * Generates a structured JSON response from GLM-4 that conforms to a provided schema.
4707
+ * Uses OpenAI's response_format parameter to enforce JSON structure.
4708
+ * The response is automatically repaired using jsonrepair if needed.
4709
+ *
4710
+ * Key operations:
4711
+ * - Maps agent-swarm-kit messages to OpenAI format
4712
+ * - Configures JSON schema response format
4713
+ * - Sends request to GLM-4 model
4714
+ * - Validates and repairs JSON response
4715
+ * - Handles refusal messages
4716
+ * - Logs operation details for debugging
4717
+ * - Optionally writes debug output to file
4718
+ *
4719
+ * @param params - Outline completion parameters
4720
+ * @param params.messages - Array of conversation messages
4721
+ * @param params.format - JSON schema format definition or response_format object
4722
+ * @returns Promise resolving to structured JSON message
4723
+ * @throws Error if model refuses to generate response
4724
+ *
4725
+ * @example
4726
+ * ```typescript
4727
+ * const signal = await provider.getOutlineCompletion({
4728
+ * messages: [
4729
+ * { role: "system", content: "Generate trading signals" },
4730
+ * { role: "user", content: "Analyze BTC/USDT" }
4731
+ * ],
4732
+ * format: {
4733
+ * type: "object",
4734
+ * properties: {
4735
+ * position: { type: "string", enum: ["long", "short", "wait"] },
4736
+ * price_open: { type: "number" },
4737
+ * price_stop_loss: { type: "number" },
4738
+ * price_take_profit: { type: "number" }
4739
+ * },
4740
+ * required: ["position", "price_open", "price_stop_loss", "price_take_profit"]
4741
+ * }
4742
+ * });
4743
+ *
4744
+ * const data = JSON.parse(signal.content);
4745
+ * console.log(`Position: ${data.position}`);
4746
+ * console.log(`Entry: ${data.price_open}`);
4747
+ * ```
4748
+ */
4749
+ async getOutlineCompletion(params) {
4750
+ const { messages: rawMessages, format } = params;
4751
+ const openai = getZAi();
4752
+ this.logger.log("glm4Provider getOutlineCompletion", {
4753
+ context: this.contextService.context,
4754
+ });
4755
+ // Map raw messages to OpenAI format
4756
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
4757
+ role,
4758
+ tool_call_id,
4759
+ content,
4760
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
4761
+ ...rest,
4762
+ function: {
4763
+ name: f.name,
4764
+ arguments: JSON.stringify(f.arguments),
4765
+ },
4766
+ })),
4767
+ }));
4768
+ // Extract response format
4769
+ const response_format = "json_schema" in format
4770
+ ? format
4771
+ : { type: "json_schema", json_schema: { schema: format } };
4772
+ const completion = await openai.chat.completions.create({
4773
+ messages: messages,
4774
+ model: this.contextService.context.model,
4775
+ response_format: response_format,
4776
+ });
4777
+ const choice = completion.choices[0];
4778
+ if (choice.message.refusal) {
4779
+ throw new Error(choice.message.refusal);
4780
+ }
4781
+ const json = jsonrepair(choice.message.content || "");
4782
+ const result = {
4783
+ role: "assistant",
4784
+ content: json,
4785
+ };
4786
+ // Debug logging
4787
+ if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
4788
+ await fs.appendFile("./debug_glm4_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
4789
+ }
4790
+ return result;
4791
+ }
4792
+ }
4793
+
4794
+ /**
4795
+ * Main library entry point for the Ollama package.
4796
+ *
4797
+ * Initializes the dependency injection container, registers all AI providers,
4798
+ * and exports the engine object containing all services.
4799
+ *
4800
+ * The engine provides access to:
4801
+ * - Common services (logger)
4802
+ * - Base services (context)
4803
+ * - Private services (runner and outline private services)
4804
+ * - Public services (runner and outline public services)
4805
+ *
4806
+ * Registered AI providers:
4807
+ * - Ollama (local and cloud)
4808
+ * - OpenAI (GPT-5)
4809
+ * - Claude (Anthropic)
4810
+ * - Deepseek
4811
+ * - Mistral
4812
+ * - Perplexity
4813
+ * - Cohere
4814
+ * - Grok (xAI)
4815
+ * - Alibaba
4816
+ * - Hugging Face
4817
+ *
4818
+ * @example
4819
+ * ```typescript
4820
+ * import { engine } from "./lib";
4821
+ *
4822
+ * // Access logger
4823
+ * engine.loggerService.info("Application started");
4824
+ *
4825
+ * // Use public service for AI completion
4826
+ * const result = await engine.runnerPublicService.getCompletion(
4827
+ * { messages: [...] },
4828
+ * { inference: "claude", model: "claude-3-5-sonnet", apiKey: "..." }
4829
+ * );
4830
+ * ```
4831
+ */
4832
+ /**
4833
+ * Common service instances.
4834
+ */
2690
4835
  const commonServices = {
2691
4836
  loggerService: inject(TYPES.loggerService),
2692
4837
  };
4838
+ /**
4839
+ * Base service instances.
4840
+ */
2693
4841
  const baseServices = {
2694
4842
  contextService: inject(TYPES.contextService),
2695
4843
  };
4844
+ /**
4845
+ * Private service instances.
4846
+ */
2696
4847
  const privateServices = {
2697
4848
  runnerPrivateService: inject(TYPES.runnerPrivateService),
2698
4849
  outlinePrivateService: inject(TYPES.outlinePrivateService),
2699
4850
  };
4851
+ /**
4852
+ * Public service instances.
4853
+ */
2700
4854
  const publicServices = {
2701
4855
  runnerPublicService: inject(TYPES.runnerPublicService),
2702
4856
  outlinePublicService: inject(TYPES.outlinePublicService),
2703
4857
  };
4858
+ /**
4859
+ * Main engine object containing all services.
4860
+ * Provides unified access to the entire service layer.
4861
+ */
2704
4862
  const engine = {
2705
4863
  ...commonServices,
2706
4864
  ...baseServices,
2707
4865
  ...privateServices,
2708
4866
  ...publicServices,
2709
4867
  };
4868
+ // Initialize DI container
2710
4869
  init();
4870
+ /**
4871
+ * Register all AI provider implementations.
4872
+ */
2711
4873
  {
2712
4874
  engine.runnerPrivateService.registerRunner(InferenceName.OllamaInference, OllamaProvider);
2713
- engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider$1);
4875
+ engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider);
2714
4876
  engine.runnerPrivateService.registerRunner(InferenceName.HfInference, HfProvider);
2715
- engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference, GrokProvider);
4877
+ engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference, ClaudeProvider);
2716
4878
  engine.runnerPrivateService.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
2717
4879
  engine.runnerPrivateService.registerRunner(InferenceName.DeepseekInference, DeepseekProvider);
2718
4880
  engine.runnerPrivateService.registerRunner(InferenceName.MistralInference, MistralProvider);
2719
4881
  engine.runnerPrivateService.registerRunner(InferenceName.PerplexityInference, PerplexityProvider);
2720
4882
  engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
2721
4883
  engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
4884
+ engine.runnerPrivateService.registerRunner(InferenceName.GLM4Inference, GLM4Provider);
2722
4885
  }
4886
+ // Make engine globally accessible for debugging
2723
4887
  Object.assign(globalThis, { engine });
2724
4888
  var lib = engine;
2725
4889
 
4890
+ /**
4891
+ * Outline runner completion handler registration.
4892
+ *
4893
+ * Registers a structured outline completion handler with agent-swarm-kit.
4894
+ * This completion type enforces JSON schema validation on AI responses,
4895
+ * ensuring they conform to a predefined structure. Essential for extracting
4896
+ * structured data from AI responses (e.g., trading signals with specific fields).
4897
+ *
4898
+ * Key features:
4899
+ * - JSON schema validation enabled (json: true)
4900
+ * - Structured output enforcement
4901
+ * - Type-safe response parsing
4902
+ * - Automatic validation with retry on failure
4903
+ * - Delegates to RunnerPrivateService
4904
+ *
4905
+ * @example
4906
+ * ```typescript
4907
+ * import { completion } from "agent-swarm-kit";
4908
+ * import { CompletionName } from "./enum/CompletionName";
4909
+ *
4910
+ * const result = await completion(CompletionName.RunnerOutlineCompletion, {
4911
+ * messages: [
4912
+ * { role: "user", content: "Decide trading position" }
4913
+ * ]
4914
+ * });
4915
+ * // Returns structured data validated against schema
4916
+ * ```
4917
+ */
2726
4918
  addCompletion({
2727
4919
  completionName: CompletionName.RunnerOutlineCompletion,
2728
4920
  getCompletion: async (params) => {
@@ -2731,6 +4923,33 @@ addCompletion({
2731
4923
  json: true,
2732
4924
  });
2733
4925
 
4926
+ /**
4927
+ * Streaming runner completion handler registration.
4928
+ *
4929
+ * Registers a streaming AI completion handler with agent-swarm-kit.
4930
+ * This completion type enables real-time token streaming from AI providers
4931
+ * that support it (OpenAI, Claude, etc.), with automatic accumulation into
4932
+ * a complete response.
4933
+ *
4934
+ * Key features:
4935
+ * - Streaming completion mode for real-time responses
4936
+ * - Automatic response accumulation
4937
+ * - Delegates to RunnerPrivateService
4938
+ * - Supports streaming-capable AI providers
4939
+ *
4940
+ * @example
4941
+ * ```typescript
4942
+ * import { completion } from "agent-swarm-kit";
4943
+ * import { CompletionName } from "./enum/CompletionName";
4944
+ *
4945
+ * const result = await completion(CompletionName.RunnerStreamCompletion, {
4946
+ * messages: [
4947
+ * { role: "user", content: "Generate trading analysis" }
4948
+ * ]
4949
+ * });
4950
+ * // Response is accumulated from stream
4951
+ * ```
4952
+ */
2734
4953
  addCompletion({
2735
4954
  completionName: CompletionName.RunnerStreamCompletion,
2736
4955
  getCompletion: async (params) => {
@@ -2738,6 +4957,32 @@ addCompletion({
2738
4957
  },
2739
4958
  });
2740
4959
 
4960
+ /**
4961
+ * Standard runner completion handler registration.
4962
+ *
4963
+ * Registers a non-streaming AI completion handler with agent-swarm-kit.
4964
+ * This completion type is used for standard request-response AI interactions
4965
+ * where the full response is returned at once.
4966
+ *
4967
+ * Key features:
4968
+ * - Standard (non-streaming) completion mode
4969
+ * - Delegates to RunnerPrivateService
4970
+ * - Supports all registered AI providers
4971
+ * - Context-aware provider selection
4972
+ *
4973
+ * @example
4974
+ * ```typescript
4975
+ * import { completion } from "agent-swarm-kit";
4976
+ * import { CompletionName } from "./enum/CompletionName";
4977
+ *
4978
+ * const result = await completion(CompletionName.RunnerCompletion, {
4979
+ * messages: [
4980
+ * { role: "system", content: "You are a trading assistant" },
4981
+ * { role: "user", content: "Analyze BTC/USDT" }
4982
+ * ]
4983
+ * });
4984
+ * ```
4985
+ */
2741
4986
  addCompletion({
2742
4987
  completionName: CompletionName.RunnerCompletion,
2743
4988
  getCompletion: async (params) => {
@@ -2745,6 +4990,35 @@ addCompletion({
2745
4990
  },
2746
4991
  });
2747
4992
 
4993
+ /**
4994
+ * Zod schema for trading signal structured output.
4995
+ *
4996
+ * Defines the JSON schema used for LLM-generated trading signals with
4997
+ * comprehensive field descriptions and validation rules. Used with outline
4998
+ * completion to enforce structured output from language models.
4999
+ *
5000
+ * Fields:
5001
+ * - position: Trading direction (long/short/wait)
5002
+ * - price_open: Entry price in USD
5003
+ * - price_stop_loss: Stop-loss price in USD
5004
+ * - price_take_profit: Take-profit price in USD
5005
+ * - minute_estimated_time: Estimated hold duration in minutes
5006
+ * - risk_note: Detailed risk assessment with specific metrics
5007
+ *
5008
+ * @example
5009
+ * ```typescript
5010
+ * import { SignalSchema } from './schema/Signal.schema';
5011
+ *
5012
+ * const signal = SignalSchema.parse({
5013
+ * position: 'long',
5014
+ * price_open: 50000,
5015
+ * price_stop_loss: 49000,
5016
+ * price_take_profit: 52000,
5017
+ * minute_estimated_time: 120,
5018
+ * risk_note: 'RSI oversold at 32%, volume spike +45%'
5019
+ * });
5020
+ * ```
5021
+ */
2748
5022
  const SignalSchema = z.object({
2749
5023
  position: z
2750
5024
  .enum(["long", "short", "wait"])
@@ -2766,6 +5040,46 @@ const SignalSchema = z.object({
2766
5040
  .describe(str.newline("Description of current market situation risks:", "", "Analyze and specify applicable risks:", "1. Whale manipulations (volume spikes, long shadows, pin bars, candle engulfing, false breakouts)", "2. Order book (order book walls, spoofing, bid/ask imbalance, low liquidity)", "3. P&L history (recurring mistakes on similar patterns)", "4. Time factors (trading session, low liquidity, upcoming events)", "5. Correlations (overall market trend, conflicting trends across timeframes)", "6. Technical risks (indicator divergences, weak volumes, critical levels)", "7. Gaps and anomalies (price gaps, unfilled gaps, movements without volume)", "", "Provide SPECIFIC numbers, percentages and probabilities.")),
2767
5041
  });
2768
5042
 
5043
+ /**
5044
+ * Trading signal outline schema registration.
5045
+ *
5046
+ * Registers a structured outline for trading signal generation with comprehensive
5047
+ * validation rules. This outline enforces a strict schema for AI-generated trading
5048
+ * signals, ensuring all required fields are present and correctly formatted.
5049
+ *
5050
+ * Schema fields:
5051
+ * - position: Trading direction ("long", "short", or "wait")
5052
+ * - price_open: Entry price for the position
5053
+ * - price_stop_loss: Stop-loss price level
5054
+ * - price_take_profit: Take-profit price level
5055
+ * - minute_estimated_time: Estimated time to reach TP (in minutes)
5056
+ * - risk_note: Risk assessment and reasoning (markdown format)
5057
+ *
5058
+ * Validation rules:
5059
+ * 1. All required fields must be present
5060
+ * 2. Prices must be positive numbers
5061
+ * 3. For LONG: SL < entry < TP
5062
+ * 4. For SHORT: TP < entry < SL
5063
+ * 5. Estimated time must be <= 360 minutes (6 hours)
5064
+ * 6. Wait position skips price validations
5065
+ *
5066
+ * @example
5067
+ * ```typescript
5068
+ * import { json } from "agent-swarm-kit";
5069
+ * import { OutlineName } from "./enum/OutlineName";
5070
+ *
5071
+ * const { data } = await json(OutlineName.SignalOutline, [
5072
+ * { role: "user", content: "Analyze BTC/USDT and decide position" }
5073
+ * ]);
5074
+ *
5075
+ * if (data.position !== "wait") {
5076
+ * console.log(`Position: ${data.position}`);
5077
+ * console.log(`Entry: ${data.price_open}`);
5078
+ * console.log(`SL: ${data.price_stop_loss}`);
5079
+ * console.log(`TP: ${data.price_take_profit}`);
5080
+ * }
5081
+ * ```
5082
+ */
2769
5083
  addOutline({
2770
5084
  outlineName: OutlineName.SignalOutline,
2771
5085
  completion: CompletionName.RunnerOutlineCompletion,
@@ -2862,44 +5176,281 @@ addOutline({
2862
5176
  ],
2863
5177
  });
2864
5178
 
5179
+ /**
5180
+ * Bootstrap module for agent-swarm-kit validation.
5181
+ *
5182
+ * Validates that all completion and outline names are properly registered
5183
+ * with agent-swarm-kit before the application starts. This ensures that
5184
+ * all referenced completions and outlines exist and are correctly configured.
5185
+ *
5186
+ * Validation checks:
5187
+ * - All CompletionName enum values have corresponding registered handlers
5188
+ * - All OutlineName enum values have corresponding registered schemas
5189
+ * - No duplicate registrations exist
5190
+ *
5191
+ * This file is imported by index.ts to run validation at startup.
5192
+ *
5193
+ * @throws Error if validation fails (missing or duplicate registrations)
5194
+ */
2865
5195
  validate({
2866
5196
  CompletionName: CompletionName$1,
2867
5197
  OutlineName: OutlineName$1,
2868
5198
  });
2869
5199
 
5200
+ /**
5201
+ * Generate structured trading signal from Ollama models.
5202
+ *
5203
+ * Supports token rotation by passing multiple API keys. Automatically enforces
5204
+ * the signal JSON schema defined in Signal.schema.ts.
5205
+ *
5206
+ * @param messages - Array of outline messages (user/assistant/system)
5207
+ * @param model - Ollama model name (e.g., "llama3.3:70b")
5208
+ * @param apiKey - Single API key or array of keys for rotation
5209
+ * @returns Promise resolving to structured trading signal
5210
+ *
5211
+ * @example
5212
+ * ```typescript
5213
+ * import { ollama } from '@backtest-kit/ollama';
5214
+ *
5215
+ * const signal = await ollama(messages, 'llama3.3:70b', ['key1', 'key2']);
5216
+ * console.log(signal.position); // "long" | "short" | "wait"
5217
+ * ```
5218
+ */
2870
5219
  const ollama = async (messages, model, apiKey) => {
2871
5220
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.OllamaInference, model, apiKey);
2872
5221
  };
5222
+ /**
5223
+ * Generate structured trading signal from Grok models.
5224
+ *
5225
+ * Uses xAI Grok models through direct API access. Does NOT support token rotation.
5226
+ *
5227
+ * @param messages - Array of outline messages (user/assistant/system)
5228
+ * @param model - Grok model name (e.g., "grok-beta")
5229
+ * @param apiKey - Single API key (token rotation not supported)
5230
+ * @returns Promise resolving to structured trading signal
5231
+ * @throws Error if apiKey is an array (token rotation not supported)
5232
+ *
5233
+ * @example
5234
+ * ```typescript
5235
+ * import { grok } from '@backtest-kit/ollama';
5236
+ *
5237
+ * const signal = await grok(messages, 'grok-beta', process.env.GROK_API_KEY);
5238
+ * ```
5239
+ */
2873
5240
  const grok = async (messages, model, apiKey) => {
2874
5241
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GrokInference, model, apiKey);
2875
5242
  };
5243
+ /**
5244
+ * Generate structured trading signal from Hugging Face models.
5245
+ *
5246
+ * Uses HuggingFace Router API for model access. Does NOT support token rotation.
5247
+ *
5248
+ * @param messages - Array of outline messages (user/assistant/system)
5249
+ * @param model - HuggingFace model name
5250
+ * @param apiKey - Single API key (token rotation not supported)
5251
+ * @returns Promise resolving to structured trading signal
5252
+ *
5253
+ * @example
5254
+ * ```typescript
5255
+ * import { hf } from '@backtest-kit/ollama';
5256
+ *
5257
+ * const signal = await hf(messages, 'meta-llama/Llama-3-70b', process.env.HF_API_KEY);
5258
+ * ```
5259
+ */
2876
5260
  const hf = async (messages, model, apiKey) => {
2877
5261
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.HfInference, model, apiKey);
2878
5262
  };
5263
+ /**
5264
+ * Generate structured trading signal from Claude models.
5265
+ *
5266
+ * Uses Anthropic Claude through OpenAI-compatible API. Does NOT support token rotation.
5267
+ *
5268
+ * @param messages - Array of outline messages (user/assistant/system)
5269
+ * @param model - Claude model name (e.g., "claude-3-5-sonnet-20241022")
5270
+ * @param apiKey - Single API key (token rotation not supported)
5271
+ * @returns Promise resolving to structured trading signal
5272
+ * @throws Error if apiKey is an array (token rotation not supported)
5273
+ *
5274
+ * @example
5275
+ * ```typescript
5276
+ * import { claude } from '@backtest-kit/ollama';
5277
+ *
5278
+ * const signal = await claude(messages, 'claude-3-5-sonnet-20241022', process.env.ANTHROPIC_API_KEY);
5279
+ * ```
5280
+ */
2879
5281
  const claude = async (messages, model, apiKey) => {
2880
5282
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.ClaudeInference, model, apiKey);
2881
5283
  };
5284
+ /**
5285
+ * Generate structured trading signal from OpenAI GPT models.
5286
+ *
5287
+ * Uses official OpenAI SDK with JSON schema enforcement. Does NOT support token rotation.
5288
+ *
5289
+ * @param messages - Array of outline messages (user/assistant/system)
5290
+ * @param model - OpenAI model name (e.g., "gpt-4o", "gpt-4-turbo")
5291
+ * @param apiKey - Single API key (token rotation not supported)
5292
+ * @returns Promise resolving to structured trading signal
5293
+ * @throws Error if apiKey is an array (token rotation not supported)
5294
+ *
5295
+ * @example
5296
+ * ```typescript
5297
+ * import { gpt5 } from '@backtest-kit/ollama';
5298
+ *
5299
+ * const signal = await gpt5(messages, 'gpt-4o', process.env.OPENAI_API_KEY);
5300
+ * ```
5301
+ */
2882
5302
  const gpt5 = async (messages, model, apiKey) => {
2883
5303
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GPT5Inference, model, apiKey);
2884
5304
  };
5305
+ /**
5306
+ * Generate structured trading signal from DeepSeek models.
5307
+ *
5308
+ * Uses DeepSeek AI through OpenAI-compatible API. Does NOT support token rotation.
5309
+ *
5310
+ * @param messages - Array of outline messages (user/assistant/system)
5311
+ * @param model - DeepSeek model name (e.g., "deepseek-chat")
5312
+ * @param apiKey - Single API key (token rotation not supported)
5313
+ * @returns Promise resolving to structured trading signal
5314
+ * @throws Error if apiKey is an array (token rotation not supported)
5315
+ *
5316
+ * @example
5317
+ * ```typescript
5318
+ * import { deepseek } from '@backtest-kit/ollama';
5319
+ *
5320
+ * const signal = await deepseek(messages, 'deepseek-chat', process.env.DEEPSEEK_API_KEY);
5321
+ * ```
5322
+ */
2885
5323
  const deepseek = async (messages, model, apiKey) => {
2886
5324
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.DeepseekInference, model, apiKey);
2887
5325
  };
5326
+ /**
5327
+ * Generate structured trading signal from Mistral AI models.
5328
+ *
5329
+ * Uses Mistral AI through OpenAI-compatible API. Does NOT support token rotation.
5330
+ *
5331
+ * @param messages - Array of outline messages (user/assistant/system)
5332
+ * @param model - Mistral model name (e.g., "mistral-large-latest")
5333
+ * @param apiKey - Single API key (token rotation not supported)
5334
+ * @returns Promise resolving to structured trading signal
5335
+ * @throws Error if apiKey is an array (token rotation not supported)
5336
+ *
5337
+ * @example
5338
+ * ```typescript
5339
+ * import { mistral } from '@backtest-kit/ollama';
5340
+ *
5341
+ * const signal = await mistral(messages, 'mistral-large-latest', process.env.MISTRAL_API_KEY);
5342
+ * ```
5343
+ */
2888
5344
  const mistral = async (messages, model, apiKey) => {
2889
5345
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.MistralInference, model, apiKey);
2890
5346
  };
5347
+ /**
5348
+ * Generate structured trading signal from Perplexity AI models.
5349
+ *
5350
+ * Uses Perplexity AI through OpenAI-compatible API. Does NOT support token rotation.
5351
+ *
5352
+ * @param messages - Array of outline messages (user/assistant/system)
5353
+ * @param model - Perplexity model name (e.g., "llama-3.1-sonar-huge-128k-online")
5354
+ * @param apiKey - Single API key (token rotation not supported)
5355
+ * @returns Promise resolving to structured trading signal
5356
+ * @throws Error if apiKey is an array (token rotation not supported)
5357
+ *
5358
+ * @example
5359
+ * ```typescript
5360
+ * import { perplexity } from '@backtest-kit/ollama';
5361
+ *
5362
+ * const signal = await perplexity(messages, 'llama-3.1-sonar-huge-128k-online', process.env.PERPLEXITY_API_KEY);
5363
+ * ```
5364
+ */
2891
5365
  const perplexity = async (messages, model, apiKey) => {
2892
5366
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.PerplexityInference, model, apiKey);
2893
5367
  };
5368
+ /**
5369
+ * Generate structured trading signal from Cohere models.
5370
+ *
5371
+ * Uses Cohere AI through OpenAI-compatible API. Does NOT support token rotation.
5372
+ *
5373
+ * @param messages - Array of outline messages (user/assistant/system)
5374
+ * @param model - Cohere model name (e.g., "command-r-plus")
5375
+ * @param apiKey - Single API key (token rotation not supported)
5376
+ * @returns Promise resolving to structured trading signal
5377
+ * @throws Error if apiKey is an array (token rotation not supported)
5378
+ *
5379
+ * @example
5380
+ * ```typescript
5381
+ * import { cohere } from '@backtest-kit/ollama';
5382
+ *
5383
+ * const signal = await cohere(messages, 'command-r-plus', process.env.COHERE_API_KEY);
5384
+ * ```
5385
+ */
2894
5386
  const cohere = async (messages, model, apiKey) => {
2895
5387
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.CohereInference, model, apiKey);
2896
5388
  };
5389
+ /**
5390
+ * Generate structured trading signal from Alibaba Cloud Qwen models.
5391
+ *
5392
+ * Uses Alibaba DashScope API through direct HTTP requests. Does NOT support token rotation.
5393
+ *
5394
+ * @param messages - Array of outline messages (user/assistant/system)
5395
+ * @param model - Qwen model name (e.g., "qwen-max")
5396
+ * @param apiKey - Single API key (token rotation not supported)
5397
+ * @returns Promise resolving to structured trading signal
5398
+ * @throws Error if apiKey is an array (token rotation not supported)
5399
+ *
5400
+ * @example
5401
+ * ```typescript
5402
+ * import { alibaba } from '@backtest-kit/ollama';
5403
+ *
5404
+ * const signal = await alibaba(messages, 'qwen-max', process.env.ALIBABA_API_KEY);
5405
+ * ```
5406
+ */
2897
5407
  const alibaba = async (messages, model, apiKey) => {
2898
5408
  return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.AlibabaInference, model, apiKey);
2899
5409
  };
5410
+ /**
5411
+ * Generate structured trading signal from Zhipu AI GLM-4 models.
5412
+ *
5413
+ * Uses Zhipu AI's GLM-4 through OpenAI-compatible Z.ai API. Does NOT support token rotation.
5414
+ * GLM-4 is a powerful Chinese language model with strong reasoning capabilities.
5415
+ *
5416
+ * @param messages - Array of outline messages (user/assistant/system)
5417
+ * @param model - GLM-4 model name (e.g., "glm-4-plus", "glm-4-air")
5418
+ * @param apiKey - Single API key (token rotation not supported)
5419
+ * @returns Promise resolving to structured trading signal
5420
+ * @throws Error if apiKey is an array (token rotation not supported)
5421
+ *
5422
+ * @example
5423
+ * ```typescript
5424
+ * import { glm4 } from '@backtest-kit/ollama';
5425
+ *
5426
+ * const signal = await glm4(messages, 'glm-4-plus', process.env.ZAI_API_KEY);
5427
+ * console.log(`Position: ${signal.position}`);
5428
+ * console.log(`Entry: ${signal.priceOpen}`);
5429
+ * ```
5430
+ */
5431
+ const glm4 = async (messages, model, apiKey) => {
5432
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GLM4Inference, model, apiKey);
5433
+ };
2900
5434
 
5435
+ /**
5436
+ * Sets custom logger implementation for the framework.
5437
+ *
5438
+ * All log messages from internal services will be forwarded to the provided logger
5439
+ * with automatic context injection.
5440
+ *
5441
+ * @param logger - Custom logger implementing ILogger interface
5442
+ *
5443
+ * @example
5444
+ * ```typescript
5445
+ * setLogger({
5446
+ * log: (topic, ...args) => console.log(topic, args),
5447
+ * debug: (topic, ...args) => console.debug(topic, args),
5448
+ * info: (topic, ...args) => console.info(topic, args),
5449
+ * });
5450
+ * ```
5451
+ */
2901
5452
  const setLogger = (logger) => {
2902
5453
  lib.loggerService.setLogger(logger);
2903
5454
  };
2904
5455
 
2905
- export { alibaba, claude, cohere, deepseek, gpt5, grok, hf, engine as lib, mistral, ollama, perplexity, setLogger };
5456
+ export { alibaba, claude, cohere, deepseek, glm4, gpt5, grok, hf, engine as lib, mistral, ollama, perplexity, setLogger };