@backtest-kit/ollama 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/types.d.ts CHANGED
@@ -1,6 +1,25 @@
1
1
  import { IOutlineMessage, ISwarmCompletionArgs, ISwarmMessage, IOutlineCompletionArgs } from 'agent-swarm-kit';
2
2
  import * as di_scoped from 'di-scoped';
3
3
 
4
+ /**
5
+ * Generate structured trading signal from Ollama models.
6
+ *
7
+ * Supports token rotation by passing multiple API keys. Automatically enforces
8
+ * the signal JSON schema defined in Signal.schema.ts.
9
+ *
10
+ * @param messages - Array of outline messages (user/assistant/system)
11
+ * @param model - Ollama model name (e.g., "llama3.3:70b")
12
+ * @param apiKey - Single API key or array of keys for rotation
13
+ * @returns Promise resolving to structured trading signal
14
+ *
15
+ * @example
16
+ * ```typescript
17
+ * import { ollama } from '@backtest-kit/ollama';
18
+ *
19
+ * const signal = await ollama(messages, 'llama3.3:70b', ['key1', 'key2']);
20
+ * console.log(signal.position); // "long" | "short" | "wait"
21
+ * ```
22
+ */
4
23
  declare const ollama: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
5
24
  id: string;
6
25
  position: "long" | "short";
@@ -10,6 +29,24 @@ declare const ollama: (messages: IOutlineMessage[], model: string, apiKey?: stri
10
29
  note: string;
11
30
  priceOpen: number;
12
31
  }>;
32
+ /**
33
+ * Generate structured trading signal from Grok models.
34
+ *
35
+ * Uses xAI Grok models through direct API access. Does NOT support token rotation.
36
+ *
37
+ * @param messages - Array of outline messages (user/assistant/system)
38
+ * @param model - Grok model name (e.g., "grok-beta")
39
+ * @param apiKey - Single API key (token rotation not supported)
40
+ * @returns Promise resolving to structured trading signal
41
+ * @throws Error if apiKey is an array (token rotation not supported)
42
+ *
43
+ * @example
44
+ * ```typescript
45
+ * import { grok } from '@backtest-kit/ollama';
46
+ *
47
+ * const signal = await grok(messages, 'grok-beta', process.env.GROK_API_KEY);
48
+ * ```
49
+ */
13
50
  declare const grok: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
14
51
  id: string;
15
52
  position: "long" | "short";
@@ -19,6 +56,23 @@ declare const grok: (messages: IOutlineMessage[], model: string, apiKey?: string
19
56
  note: string;
20
57
  priceOpen: number;
21
58
  }>;
59
+ /**
60
+ * Generate structured trading signal from Hugging Face models.
61
+ *
62
+ * Uses HuggingFace Router API for model access. Does NOT support token rotation.
63
+ *
64
+ * @param messages - Array of outline messages (user/assistant/system)
65
+ * @param model - HuggingFace model name
66
+ * @param apiKey - Single API key (token rotation not supported)
67
+ * @returns Promise resolving to structured trading signal
68
+ *
69
+ * @example
70
+ * ```typescript
71
+ * import { hf } from '@backtest-kit/ollama';
72
+ *
73
+ * const signal = await hf(messages, 'meta-llama/Llama-3-70b', process.env.HF_API_KEY);
74
+ * ```
75
+ */
22
76
  declare const hf: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
23
77
  id: string;
24
78
  position: "long" | "short";
@@ -28,6 +82,24 @@ declare const hf: (messages: IOutlineMessage[], model: string, apiKey?: string |
28
82
  note: string;
29
83
  priceOpen: number;
30
84
  }>;
85
+ /**
86
+ * Generate structured trading signal from Claude models.
87
+ *
88
+ * Uses Anthropic Claude through OpenAI-compatible API. Does NOT support token rotation.
89
+ *
90
+ * @param messages - Array of outline messages (user/assistant/system)
91
+ * @param model - Claude model name (e.g., "claude-3-5-sonnet-20241022")
92
+ * @param apiKey - Single API key (token rotation not supported)
93
+ * @returns Promise resolving to structured trading signal
94
+ * @throws Error if apiKey is an array (token rotation not supported)
95
+ *
96
+ * @example
97
+ * ```typescript
98
+ * import { claude } from '@backtest-kit/ollama';
99
+ *
100
+ * const signal = await claude(messages, 'claude-3-5-sonnet-20241022', process.env.ANTHROPIC_API_KEY);
101
+ * ```
102
+ */
31
103
  declare const claude: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
32
104
  id: string;
33
105
  position: "long" | "short";
@@ -37,6 +109,24 @@ declare const claude: (messages: IOutlineMessage[], model: string, apiKey?: stri
37
109
  note: string;
38
110
  priceOpen: number;
39
111
  }>;
112
+ /**
113
+ * Generate structured trading signal from OpenAI GPT models.
114
+ *
115
+ * Uses official OpenAI SDK with JSON schema enforcement. Does NOT support token rotation.
116
+ *
117
+ * @param messages - Array of outline messages (user/assistant/system)
118
+ * @param model - OpenAI model name (e.g., "gpt-4o", "gpt-4-turbo")
119
+ * @param apiKey - Single API key (token rotation not supported)
120
+ * @returns Promise resolving to structured trading signal
121
+ * @throws Error if apiKey is an array (token rotation not supported)
122
+ *
123
+ * @example
124
+ * ```typescript
125
+ * import { gpt5 } from '@backtest-kit/ollama';
126
+ *
127
+ * const signal = await gpt5(messages, 'gpt-4o', process.env.OPENAI_API_KEY);
128
+ * ```
129
+ */
40
130
  declare const gpt5: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
41
131
  id: string;
42
132
  position: "long" | "short";
@@ -46,6 +136,24 @@ declare const gpt5: (messages: IOutlineMessage[], model: string, apiKey?: string
46
136
  note: string;
47
137
  priceOpen: number;
48
138
  }>;
139
+ /**
140
+ * Generate structured trading signal from DeepSeek models.
141
+ *
142
+ * Uses DeepSeek AI through OpenAI-compatible API. Does NOT support token rotation.
143
+ *
144
+ * @param messages - Array of outline messages (user/assistant/system)
145
+ * @param model - DeepSeek model name (e.g., "deepseek-chat")
146
+ * @param apiKey - Single API key (token rotation not supported)
147
+ * @returns Promise resolving to structured trading signal
148
+ * @throws Error if apiKey is an array (token rotation not supported)
149
+ *
150
+ * @example
151
+ * ```typescript
152
+ * import { deepseek } from '@backtest-kit/ollama';
153
+ *
154
+ * const signal = await deepseek(messages, 'deepseek-chat', process.env.DEEPSEEK_API_KEY);
155
+ * ```
156
+ */
49
157
  declare const deepseek: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
50
158
  id: string;
51
159
  position: "long" | "short";
@@ -55,6 +163,24 @@ declare const deepseek: (messages: IOutlineMessage[], model: string, apiKey?: st
55
163
  note: string;
56
164
  priceOpen: number;
57
165
  }>;
166
+ /**
167
+ * Generate structured trading signal from Mistral AI models.
168
+ *
169
+ * Uses Mistral AI through OpenAI-compatible API. Does NOT support token rotation.
170
+ *
171
+ * @param messages - Array of outline messages (user/assistant/system)
172
+ * @param model - Mistral model name (e.g., "mistral-large-latest")
173
+ * @param apiKey - Single API key (token rotation not supported)
174
+ * @returns Promise resolving to structured trading signal
175
+ * @throws Error if apiKey is an array (token rotation not supported)
176
+ *
177
+ * @example
178
+ * ```typescript
179
+ * import { mistral } from '@backtest-kit/ollama';
180
+ *
181
+ * const signal = await mistral(messages, 'mistral-large-latest', process.env.MISTRAL_API_KEY);
182
+ * ```
183
+ */
58
184
  declare const mistral: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
59
185
  id: string;
60
186
  position: "long" | "short";
@@ -64,6 +190,24 @@ declare const mistral: (messages: IOutlineMessage[], model: string, apiKey?: str
64
190
  note: string;
65
191
  priceOpen: number;
66
192
  }>;
193
+ /**
194
+ * Generate structured trading signal from Perplexity AI models.
195
+ *
196
+ * Uses Perplexity AI through OpenAI-compatible API. Does NOT support token rotation.
197
+ *
198
+ * @param messages - Array of outline messages (user/assistant/system)
199
+ * @param model - Perplexity model name (e.g., "llama-3.1-sonar-huge-128k-online")
200
+ * @param apiKey - Single API key (token rotation not supported)
201
+ * @returns Promise resolving to structured trading signal
202
+ * @throws Error if apiKey is an array (token rotation not supported)
203
+ *
204
+ * @example
205
+ * ```typescript
206
+ * import { perplexity } from '@backtest-kit/ollama';
207
+ *
208
+ * const signal = await perplexity(messages, 'llama-3.1-sonar-huge-128k-online', process.env.PERPLEXITY_API_KEY);
209
+ * ```
210
+ */
67
211
  declare const perplexity: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
68
212
  id: string;
69
213
  position: "long" | "short";
@@ -73,6 +217,24 @@ declare const perplexity: (messages: IOutlineMessage[], model: string, apiKey?:
73
217
  note: string;
74
218
  priceOpen: number;
75
219
  }>;
220
+ /**
221
+ * Generate structured trading signal from Cohere models.
222
+ *
223
+ * Uses Cohere AI through OpenAI-compatible API. Does NOT support token rotation.
224
+ *
225
+ * @param messages - Array of outline messages (user/assistant/system)
226
+ * @param model - Cohere model name (e.g., "command-r-plus")
227
+ * @param apiKey - Single API key (token rotation not supported)
228
+ * @returns Promise resolving to structured trading signal
229
+ * @throws Error if apiKey is an array (token rotation not supported)
230
+ *
231
+ * @example
232
+ * ```typescript
233
+ * import { cohere } from '@backtest-kit/ollama';
234
+ *
235
+ * const signal = await cohere(messages, 'command-r-plus', process.env.COHERE_API_KEY);
236
+ * ```
237
+ */
76
238
  declare const cohere: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
77
239
  id: string;
78
240
  position: "long" | "short";
@@ -82,6 +244,24 @@ declare const cohere: (messages: IOutlineMessage[], model: string, apiKey?: stri
82
244
  note: string;
83
245
  priceOpen: number;
84
246
  }>;
247
+ /**
248
+ * Generate structured trading signal from Alibaba Cloud Qwen models.
249
+ *
250
+ * Uses Alibaba DashScope API through direct HTTP requests. Does NOT support token rotation.
251
+ *
252
+ * @param messages - Array of outline messages (user/assistant/system)
253
+ * @param model - Qwen model name (e.g., "qwen-max")
254
+ * @param apiKey - Single API key (token rotation not supported)
255
+ * @returns Promise resolving to structured trading signal
256
+ * @throws Error if apiKey is an array (token rotation not supported)
257
+ *
258
+ * @example
259
+ * ```typescript
260
+ * import { alibaba } from '@backtest-kit/ollama';
261
+ *
262
+ * const signal = await alibaba(messages, 'qwen-max', process.env.ALIBABA_API_KEY);
263
+ * ```
264
+ */
85
265
  declare const alibaba: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
86
266
  id: string;
87
267
  position: "long" | "short";
@@ -91,34 +271,197 @@ declare const alibaba: (messages: IOutlineMessage[], model: string, apiKey?: str
91
271
  note: string;
92
272
  priceOpen: number;
93
273
  }>;
274
+ /**
275
+ * Generate structured trading signal from Zhipu AI GLM-4 models.
276
+ *
277
+ * Uses Zhipu AI's GLM-4 through OpenAI-compatible Z.ai API. Does NOT support token rotation.
278
+ * GLM-4 is a powerful Chinese language model with strong reasoning capabilities.
279
+ *
280
+ * @param messages - Array of outline messages (user/assistant/system)
281
+ * @param model - GLM-4 model name (e.g., "glm-4-plus", "glm-4-air")
282
+ * @param apiKey - Single API key (token rotation not supported)
283
+ * @returns Promise resolving to structured trading signal
284
+ * @throws Error if apiKey is an array (token rotation not supported)
285
+ *
286
+ * @example
287
+ * ```typescript
288
+ * import { glm4 } from '@backtest-kit/ollama';
289
+ *
290
+ * const signal = await glm4(messages, 'glm-4-plus', process.env.ZAI_API_KEY);
291
+ * console.log(`Position: ${signal.position}`);
292
+ * console.log(`Entry: ${signal.priceOpen}`);
293
+ * ```
294
+ */
295
+ declare const glm4: (messages: IOutlineMessage[], model: string, apiKey?: string | string[]) => Promise<{
296
+ id: string;
297
+ position: "long" | "short";
298
+ minuteEstimatedTime: number;
299
+ priceStopLoss: number;
300
+ priceTakeProfit: number;
301
+ note: string;
302
+ priceOpen: number;
303
+ }>;
94
304
 
305
+ /**
306
+ * Logger interface for application logging.
307
+ *
308
+ * Provides four logging levels for diagnostic output during LLM operations.
309
+ * Can be implemented with console.log, winston, pino, or any other logging backend.
310
+ *
311
+ * @example
312
+ * ```typescript
313
+ * import { setLogger } from '@backtest-kit/ollama';
314
+ *
315
+ * setLogger({
316
+ * log: console.log,
317
+ * debug: console.debug,
318
+ * info: console.info,
319
+ * warn: console.warn,
320
+ * });
321
+ * ```
322
+ */
95
323
  interface ILogger {
324
+ /**
325
+ * General logging for standard messages.
326
+ *
327
+ * @param topic - Log topic/category for filtering
328
+ * @param args - Additional arguments to log
329
+ */
96
330
  log(topic: string, ...args: any[]): void;
331
+ /**
332
+ * Debug-level logging for detailed diagnostic information.
333
+ *
334
+ * @param topic - Log topic/category for filtering
335
+ * @param args - Additional arguments to log
336
+ */
97
337
  debug(topic: string, ...args: any[]): void;
338
+ /**
339
+ * Info-level logging for informational messages.
340
+ *
341
+ * @param topic - Log topic/category for filtering
342
+ * @param args - Additional arguments to log
343
+ */
98
344
  info(topic: string, ...args: any[]): void;
345
+ /**
346
+ * Warning-level logging for non-critical issues.
347
+ *
348
+ * @param topic - Log topic/category for filtering
349
+ * @param args - Additional arguments to log
350
+ */
99
351
  warn(topic: string, ...args: any[]): void;
100
352
  }
101
353
 
354
+ /**
355
+ * Sets custom logger implementation for the framework.
356
+ *
357
+ * All log messages from internal services will be forwarded to the provided logger
358
+ * with automatic context injection.
359
+ *
360
+ * @param logger - Custom logger implementing ILogger interface
361
+ *
362
+ * @example
363
+ * ```typescript
364
+ * setLogger({
365
+ * log: (topic, ...args) => console.log(topic, args),
366
+ * debug: (topic, ...args) => console.debug(topic, args),
367
+ * info: (topic, ...args) => console.info(topic, args),
368
+ * });
369
+ * ```
370
+ */
102
371
  declare const setLogger: (logger: ILogger) => void;
103
372
 
373
+ /**
374
+ * Enumeration of supported LLM inference providers.
375
+ *
376
+ * Defines unique identifiers for each LLM provider supported by the library.
377
+ * Used internally for dependency injection and provider resolution.
378
+ *
379
+ * @example
380
+ * ```typescript
381
+ * import { InferenceName } from '@backtest-kit/ollama';
382
+ *
383
+ * const providerName = InferenceName.GPT5Inference;
384
+ * ```
385
+ */
104
386
  declare enum InferenceName {
387
+ /** Ollama provider for local/cloud LLM inference */
105
388
  OllamaInference = "ollama_inference",
389
+ /** Grok provider by X.AI (api.x.ai) */
106
390
  GrokInference = "grok_inference",
391
+ /** Hugging Face Inference API provider */
107
392
  HfInference = "hf_inference",
393
+ /** Claude provider by Anthropic (api.anthropic.com) */
108
394
  ClaudeInference = "claude_inference",
395
+ /** OpenAI GPT provider (api.openai.com) */
109
396
  GPT5Inference = "gpt5_inference",
397
+ /** Z.ai GPT Provider (api.z.ai/api/paas/v4) */
398
+ GLM4Inference = "glm4_inference",
399
+ /** DeepSeek provider (api.deepseek.com) */
110
400
  DeepseekInference = "deepseek_inference",
401
+ /** Mistral AI provider (api.mistral.ai) */
111
402
  MistralInference = "mistral_inference",
403
+ /** Perplexity AI provider (api.perplexity.ai) */
112
404
  PerplexityInference = "perplexity_inference",
405
+ /** Cohere provider (api.cohere.ai) */
113
406
  CohereInference = "cohere_inference",
407
+ /** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
114
408
  AlibabaInference = "alibaba_inference"
115
409
  }
116
410
 
411
+ /**
412
+ * Execution context for AI inference operations.
413
+ *
414
+ * Encapsulates the configuration needed to execute an AI completion request:
415
+ * - inference: Which AI provider to use (OpenAI, Claude, Ollama, etc.)
416
+ * - model: The specific model to use (e.g., "gpt-4", "claude-3-5-sonnet")
417
+ * - apiKey: Authentication credential(s) for the provider
418
+ *
419
+ * @example
420
+ * ```typescript
421
+ * const context: IContext = {
422
+ * inference: InferenceName.ClaudeInference,
423
+ * model: "claude-3-5-sonnet-20240620",
424
+ * apiKey: "sk-ant-..."
425
+ * };
426
+ * ```
427
+ */
117
428
  interface IContext {
429
+ /** AI inference provider identifier */
118
430
  inference: InferenceName;
431
+ /** Model name/identifier for the provider */
119
432
  model: string;
433
+ /** API key or array of keys for token rotation */
120
434
  apiKey: string | string[];
121
435
  }
436
+ /**
437
+ * Scoped context service for isolated execution contexts.
438
+ *
439
+ * Provides context isolation using async local storage through the di-scoped library.
440
+ * Each operation runs with its own context containing provider, model, and API key configuration.
441
+ * This enables multi-tenant scenarios where different requests use different AI providers or keys.
442
+ *
443
+ * Key features:
444
+ * - Scoped context isolation per execution
445
+ * - Support for single or multiple API keys (token rotation)
446
+ * - Thread-safe context propagation
447
+ * - Automatic cleanup after execution
448
+ *
449
+ * @example
450
+ * ```typescript
451
+ * import ContextService from "./services/base/ContextService";
452
+ *
453
+ * // Execute operation within scoped context
454
+ * const result = await ContextService.runInContext(async () => {
455
+ * // Code here has access to the context
456
+ * const model = contextService.context.model;
457
+ * return await someAiOperation();
458
+ * }, {
459
+ * inference: InferenceName.GPT5Inference,
460
+ * model: "gpt-5o-mini",
461
+ * apiKey: "sk-..."
462
+ * });
463
+ * ```
464
+ */
122
465
  declare const ContextService: (new () => {
123
466
  readonly context: IContext;
124
467
  }) & Omit<{
@@ -126,45 +469,420 @@ declare const ContextService: (new () => {
126
469
  readonly context: IContext;
127
470
  };
128
471
  }, "prototype"> & di_scoped.IScopedClassRun<[context: IContext]>;
472
+ /**
473
+ * Type alias for ContextService instances.
474
+ */
129
475
  type TContextService = InstanceType<typeof ContextService>;
130
476
 
477
+ /**
478
+ * Provider interface for LLM inference operations.
479
+ *
480
+ * Defines the contract for LLM providers (OpenAI, Claude, DeepSeek, etc.)
481
+ * to support completion, streaming, and structured output generation.
482
+ * All providers must implement these three methods.
483
+ *
484
+ * @example
485
+ * ```typescript
486
+ * class CustomProvider implements IProvider {
487
+ * async getCompletion(params: ISwarmCompletionArgs): Promise<ISwarmMessage> {
488
+ * // Return full completion
489
+ * }
490
+ * async getStreamCompletion(params: ISwarmCompletionArgs): Promise<ISwarmMessage> {
491
+ * // Return streamed completion
492
+ * }
493
+ * async getOutlineCompletion(params: IOutlineCompletionArgs): Promise<IOutlineMessage> {
494
+ * // Return structured JSON output
495
+ * }
496
+ * }
497
+ * ```
498
+ */
131
499
  interface IProvider {
500
+ /**
501
+ * Generate a standard completion from the LLM.
502
+ *
503
+ * @param params - Completion parameters (messages, model, temperature, etc.)
504
+ * @returns Promise resolving to completion message
505
+ */
132
506
  getCompletion(params: ISwarmCompletionArgs): Promise<ISwarmMessage>;
507
+ /**
508
+ * Generate a streaming completion from the LLM.
509
+ *
510
+ * @param params - Completion parameters (messages, model, temperature, etc.)
511
+ * @returns Promise resolving to completion message (streamed)
512
+ */
133
513
  getStreamCompletion(params: ISwarmCompletionArgs): Promise<ISwarmMessage>;
514
+ /**
515
+ * Generate a structured JSON completion using JSON schema enforcement.
516
+ *
517
+ * Used for trading signals and other structured outputs where format
518
+ * validation is critical. The outline parameter defines the JSON schema.
519
+ *
520
+ * @param params - Outline completion parameters with JSON schema
521
+ * @returns Promise resolving to structured message
522
+ */
134
523
  getOutlineCompletion(params: IOutlineCompletionArgs): Promise<IOutlineMessage>;
135
524
  }
136
525
 
526
+ /**
527
+ * Type for AI provider class constructor.
528
+ * Each provider must accept a context service and logger in its constructor.
529
+ */
137
530
  type RunnerClass = new (contextService: TContextService, logger: ILogger) => IProvider;
531
+ /**
532
+ * Private service managing AI inference provider registry and execution.
533
+ *
534
+ * Coordinates AI operations across multiple inference providers (OpenAI, Claude, Ollama, etc.).
535
+ * Maintains a registry of provider implementations and instantiates them on-demand.
536
+ * Uses memoization to cache provider instances for better performance.
537
+ *
538
+ * Key features:
539
+ * - Dynamic provider registration for multiple AI services
540
+ * - Lazy instantiation with memoization for performance
541
+ * - Context-aware provider selection based on inference type
542
+ * - Support for standard, streaming, and structured completions
543
+ * - Type-safe provider interface
544
+ *
545
+ * @example
546
+ * ```typescript
547
+ * // Provider registration (typically done at startup)
548
+ * const runnerPrivate = inject<RunnerPrivateService>(TYPES.runnerPrivateService);
549
+ * runnerPrivate.registerRunner(InferenceName.ClaudeInference, ClaudeProvider);
550
+ * runnerPrivate.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
551
+ *
552
+ * // Provider usage (automatically selected based on context)
553
+ * const result = await runnerPrivate.getCompletion({
554
+ * messages: [{ role: "user", content: "Analyze trade" }]
555
+ * });
556
+ * ```
557
+ */
138
558
  declare class RunnerPrivateService implements IProvider {
559
+ /** Context service providing execution context (model, API key, provider) */
139
560
  private readonly contextService;
561
+ /** Logger service for operation tracking */
140
562
  private readonly loggerService;
563
+ /** Registry storing provider class constructors by inference name */
141
564
  private _registry;
565
+ /**
566
+ * Memoized provider instance getter.
567
+ * Creates and caches provider instances per inference type.
568
+ */
142
569
  private getRunner;
570
+ /**
571
+ * Executes a standard AI completion using the provider specified in context.
572
+ *
573
+ * @param params - Completion parameters including messages and options
574
+ * @returns Promise resolving to AI response message
575
+ *
576
+ * @example
577
+ * ```typescript
578
+ * const result = await runnerPrivateService.getCompletion({
579
+ * messages: [
580
+ * { role: "system", content: "You are a trading assistant" },
581
+ * { role: "user", content: "Analyze BTC market" }
582
+ * ]
583
+ * });
584
+ * ```
585
+ */
143
586
  getCompletion: (params: ISwarmCompletionArgs) => Promise<ISwarmMessage>;
587
+ /**
588
+ * Executes a streaming AI completion using the provider specified in context.
589
+ *
590
+ * @param params - Completion parameters including messages and options
591
+ * @returns Promise resolving to accumulated AI response message
592
+ *
593
+ * @example
594
+ * ```typescript
595
+ * const result = await runnerPrivateService.getStreamCompletion({
596
+ * messages: [{ role: "user", content: "Generate signal" }]
597
+ * });
598
+ * ```
599
+ */
144
600
  getStreamCompletion: (params: ISwarmCompletionArgs) => Promise<ISwarmMessage>;
601
+ /**
602
+ * Executes a structured outline completion using the provider specified in context.
603
+ *
604
+ * @param params - Outline completion parameters including messages and schema
605
+ * @returns Promise resolving to structured AI response
606
+ *
607
+ * @example
608
+ * ```typescript
609
+ * const signal = await runnerPrivateService.getOutlineCompletion({
610
+ * messages: [{ role: "user", content: "Trading decision for ETH" }]
611
+ * });
612
+ * ```
613
+ */
145
614
  getOutlineCompletion: (params: IOutlineCompletionArgs) => Promise<IOutlineMessage>;
615
+ /**
616
+ * Registers a new AI provider implementation in the registry.
617
+ *
618
+ * @param name - Inference provider identifier
619
+ * @param runner - Provider class constructor
620
+ *
621
+ * @example
622
+ * ```typescript
623
+ * runnerPrivateService.registerRunner(
624
+ * InferenceName.ClaudeInference,
625
+ * ClaudeProvider
626
+ * );
627
+ * ```
628
+ */
146
629
  registerRunner: (name: InferenceName, runner: RunnerClass) => void;
147
630
  }
148
631
 
632
+ /**
633
+ * Public-facing service for AI inference operations with context management.
634
+ *
635
+ * Provides context-scoped access to AI completion operations.
636
+ * Acts as a facade that wraps RunnerPrivateService methods with context isolation.
637
+ * Each operation runs within a dedicated execution context to ensure proper API key
638
+ * and model configuration isolation.
639
+ *
640
+ * Key features:
641
+ * - Context-isolated execution for multi-tenant scenarios
642
+ * - Support for standard, streaming, and structured (outline) completions
643
+ * - Automatic context propagation to private service layer
644
+ * - Logging integration for operation tracking
645
+ *
646
+ * @example
647
+ * ```typescript
648
+ * import { engine } from "./lib";
649
+ * import { InferenceName } from "./enum/InferenceName";
650
+ *
651
+ * const context = {
652
+ * inference: InferenceName.ClaudeInference,
653
+ * model: "claude-3-5-sonnet-20240620",
654
+ * apiKey: "sk-ant-..."
655
+ * };
656
+ *
657
+ * // Standard completion
658
+ * const result = await engine.runnerPublicService.getCompletion({
659
+ * messages: [{ role: "user", content: "Analyze this trade..." }]
660
+ * }, context);
661
+ *
662
+ * // Streaming completion
663
+ * const stream = await engine.runnerPublicService.getStreamCompletion({
664
+ * messages: [{ role: "user", content: "Generate signal..." }]
665
+ * }, context);
666
+ *
667
+ * // Structured outline completion
668
+ * const outline = await engine.runnerPublicService.getOutlineCompletion({
669
+ * messages: [{ role: "user", content: "Trading decision..." }]
670
+ * }, context);
671
+ * ```
672
+ */
149
673
  declare class RunnerPublicService {
674
+ /** Private service handling AI provider operations */
150
675
  private readonly runnerPrivateService;
676
+ /** Logger service for operation tracking */
151
677
  private readonly loggerService;
678
+ /**
679
+ * Executes a standard AI completion within the specified context.
680
+ *
681
+ * @param params - Completion parameters including messages and options
682
+ * @param context - Execution context with inference provider, model, and API key
683
+ * @returns Promise resolving to AI response message
684
+ *
685
+ * @example
686
+ * ```typescript
687
+ * const result = await runnerPublicService.getCompletion({
688
+ * messages: [
689
+ * { role: "system", content: "You are a trading analyst" },
690
+ * { role: "user", content: "Analyze BTC/USDT" }
691
+ * ]
692
+ * }, {
693
+ * inference: InferenceName.ClaudeInference,
694
+ * model: "claude-3-5-sonnet-20240620",
695
+ * apiKey: "sk-ant-..."
696
+ * });
697
+ * ```
698
+ */
152
699
  getCompletion: (params: ISwarmCompletionArgs, context: IContext) => Promise<ISwarmMessage>;
700
+ /**
701
+ * Executes a streaming AI completion within the specified context.
702
+ *
703
+ * Similar to getCompletion but enables streaming mode where supported by the provider.
704
+ * The response is accumulated and returned as a complete message once streaming finishes.
705
+ *
706
+ * @param params - Completion parameters including messages and options
707
+ * @param context - Execution context with inference provider, model, and API key
708
+ * @returns Promise resolving to accumulated AI response message
709
+ *
710
+ * @example
711
+ * ```typescript
712
+ * const result = await runnerPublicService.getStreamCompletion({
713
+ * messages: [
714
+ * { role: "user", content: "Generate trading signal for ETH/USDT" }
715
+ * ]
716
+ * }, {
717
+ * inference: InferenceName.GPT5Inference,
718
+ * model: "gpt-5o-mini",
719
+ * apiKey: "sk-..."
720
+ * });
721
+ * ```
722
+ */
153
723
  getStreamCompletion: (params: ISwarmCompletionArgs, context: IContext) => Promise<ISwarmMessage>;
724
+ /**
725
+ * Executes a structured outline completion within the specified context.
726
+ *
727
+ * Uses structured output (JSON schema validation) to ensure the AI response
728
+ * conforms to a predefined format. Ideal for extracting structured data
729
+ * from AI responses (e.g., trading signals with specific fields).
730
+ *
731
+ * @param params - Outline completion parameters including messages and schema
732
+ * @param context - Execution context with inference provider, model, and API key
733
+ * @returns Promise resolving to structured AI response
734
+ *
735
+ * @example
736
+ * ```typescript
737
+ * const signal = await runnerPublicService.getOutlineCompletion({
738
+ * messages: [
739
+ * { role: "user", content: "Decide position for BTC/USDT" }
740
+ * ]
741
+ * }, {
742
+ * inference: InferenceName.DeepseekInference,
743
+ * model: "deepseek-chat",
744
+ * apiKey: "sk-..."
745
+ * });
746
+ * // Returns: { position: "long", price_open: 50000, ... }
747
+ * ```
748
+ */
154
749
  getOutlineCompletion: (params: IOutlineCompletionArgs, context: IContext) => Promise<IOutlineMessage>;
155
750
  }
156
751
 
752
+ /**
753
+ * Centralized logging service for the Ollama package.
754
+ *
755
+ * Provides a unified interface for logging operations across the application.
756
+ * Uses a delegate pattern to forward log calls to a configured logger implementation.
757
+ * Defaults to a no-op logger if no logger is set.
758
+ *
759
+ * Key features:
760
+ * - Supports multiple log levels: log, debug, info, warn
761
+ * - Configurable logger backend via setLogger
762
+ * - Async logging support
763
+ * - Safe default (no-op) when unconfigured
764
+ *
765
+ * @example
766
+ * ```typescript
767
+ * import { LoggerService } from "./services/common/LoggerService";
768
+ * import { setLogger } from "./function/setup.function";
769
+ *
770
+ * // Configure custom logger
771
+ * setLogger({
772
+ * log: async (topic, ...args) => console.log(topic, ...args),
773
+ * debug: async (topic, ...args) => console.debug(topic, ...args),
774
+ * info: async (topic, ...args) => console.info(topic, ...args),
775
+ * warn: async (topic, ...args) => console.warn(topic, ...args),
776
+ * });
777
+ *
778
+ * const loggerService = inject<LoggerService>(TYPES.loggerService);
779
+ * await loggerService.info("Operation completed", { status: "success" });
780
+ * ```
781
+ */
157
782
  declare class LoggerService implements ILogger {
783
+ /** Internal logger instance, defaults to NOOP_LOGGER */
158
784
  private _commonLogger;
785
+ /**
786
+ * Logs a general message with optional arguments.
787
+ *
788
+ * @param topic - Message topic or category
789
+ * @param args - Additional arguments to log
790
+ */
159
791
  log: (topic: string, ...args: any[]) => Promise<void>;
792
+ /**
793
+ * Logs a debug message with optional arguments.
794
+ * Used for detailed diagnostic information.
795
+ *
796
+ * @param topic - Message topic or category
797
+ * @param args - Additional arguments to log
798
+ */
160
799
  debug: (topic: string, ...args: any[]) => Promise<void>;
800
+ /**
801
+ * Logs an informational message with optional arguments.
802
+ * Used for general operational information.
803
+ *
804
+ * @param topic - Message topic or category
805
+ * @param args - Additional arguments to log
806
+ */
161
807
  info: (topic: string, ...args: any[]) => Promise<void>;
808
+ /**
809
+ * Logs a warning message with optional arguments.
810
+ * Used for potentially problematic situations.
811
+ *
812
+ * @param topic - Message topic or category
813
+ * @param args - Additional arguments to log
814
+ */
162
815
  warn: (topic: string, ...args: any[]) => Promise<void>;
816
+ /**
817
+ * Sets the logger implementation to use for all logging operations.
818
+ *
819
+ * @param logger - Logger implementation conforming to ILogger interface
820
+ *
821
+ * @example
822
+ * ```typescript
823
+ * const logger = new LoggerService();
824
+ * logger.setLogger({
825
+ * log: async (topic, ...args) => console.log(topic, ...args),
826
+ * debug: async (topic, ...args) => console.debug(topic, ...args),
827
+ * info: async (topic, ...args) => console.info(topic, ...args),
828
+ * warn: async (topic, ...args) => console.warn(topic, ...args),
829
+ * });
830
+ * ```
831
+ */
163
832
  setLogger: (logger: ILogger) => void;
164
833
  }
165
834
 
835
+ /**
836
+ * Private service for processing structured outline completions.
837
+ *
838
+ * Handles the core logic for executing outline-based AI completions with schema validation.
839
+ * Processes AI responses through the agent-swarm-kit json function to extract and validate
840
+ * structured trading signal data.
841
+ *
842
+ * Key features:
843
+ * - JSON schema validation using agent-swarm-kit
844
+ * - Trading signal extraction and transformation
845
+ * - Type conversion for numeric fields
846
+ * - Markdown formatting cleanup for notes
847
+ * - Error handling for validation failures
848
+ *
849
+ * @example
850
+ * ```typescript
851
+ * const outlinePrivate = inject<OutlinePrivateService>(TYPES.outlinePrivateService);
852
+ * const signal = await outlinePrivate.getCompletion([
853
+ * { role: "user", content: "Analyze market" }
854
+ * ]);
855
+ * ```
856
+ */
166
857
  declare class OutlinePrivateService {
858
+ /** Logger service for operation tracking */
167
859
  private readonly loggerService;
860
+ /**
861
+ * Processes outline completion messages and extracts structured signal data.
862
+ *
863
+ * Sends messages to the AI provider, validates the response against the signal schema,
864
+ * and transforms the data into a structured format. Returns null if the AI decides
865
+ * to wait (no position).
866
+ *
867
+ * @param messages - Array of conversation messages for the AI
868
+ * @returns Promise resolving to structured signal data or null if position is "wait"
869
+ * @throws Error if validation fails or AI returns an error
870
+ *
871
+ * @example
872
+ * ```typescript
873
+ * const signal = await outlinePrivateService.getCompletion([
874
+ * { role: "system", content: "Trading analyst role" },
875
+ * { role: "user", content: "Market analysis data..." }
876
+ * ]);
877
+ *
878
+ * if (signal) {
879
+ * console.log(`Position: ${signal.position}`);
880
+ * console.log(`Entry: ${signal.priceOpen}`);
881
+ * console.log(`SL: ${signal.priceStopLoss}`);
882
+ * console.log(`TP: ${signal.priceTakeProfit}`);
883
+ * }
884
+ * ```
885
+ */
168
886
  getCompletion: (messages: IOutlineMessage[]) => Promise<{
169
887
  id: string;
170
888
  position: "long" | "short";
@@ -176,9 +894,73 @@ declare class OutlinePrivateService {
176
894
  }>;
177
895
  }
178
896
 
897
+ /**
898
+ * Public-facing service for structured AI outline completions.
899
+ *
900
+ * Provides a simplified interface for executing structured AI completions with schema validation.
901
+ * Handles context creation and isolation for outline-based operations.
902
+ * Used for extracting structured data from AI responses (e.g., trading signals).
903
+ *
904
+ * Key features:
905
+ * - Simplified API with automatic context management
906
+ * - JSON schema validation for structured outputs
907
+ * - Support for multiple AI providers
908
+ * - Optional API key parameter with fallback
909
+ * - Logging integration
910
+ *
911
+ * @example
912
+ * ```typescript
913
+ * import { engine } from "./lib";
914
+ * import { InferenceName } from "./enum/InferenceName";
915
+ *
916
+ * const signal = await engine.outlinePublicService.getCompletion(
917
+ * [{ role: "user", content: "Analyze BTC/USDT and decide position" }],
918
+ * InferenceName.ClaudeInference,
919
+ * "claude-3-5-sonnet-20240620",
920
+ * "sk-ant-..."
921
+ * );
922
+ *
923
+ * // Returns structured signal:
924
+ * // {
925
+ * // position: "long",
926
+ * // priceOpen: 50000,
927
+ * // priceStopLoss: 48000,
928
+ * // priceTakeProfit: 52000,
929
+ * // minuteEstimatedTime: 120,
930
+ * // note: "Strong bullish momentum..."
931
+ * // }
932
+ * ```
933
+ */
179
934
  declare class OutlinePublicService {
935
+ /** Logger service for operation tracking */
180
936
  private readonly loggerService;
937
+ /** Private service handling outline completion logic */
181
938
  private readonly outlinePrivateService;
939
+ /**
940
+ * Executes a structured outline completion with schema validation.
941
+ *
942
+ * Creates an isolated execution context and processes messages through the AI provider
943
+ * to generate a structured response conforming to a predefined schema.
944
+ *
945
+ * @param messages - Array of conversation messages for the AI
946
+ * @param inference - AI provider identifier
947
+ * @param model - Model name/identifier
948
+ * @param apiKey - Optional API key(s), required for most providers
949
+ * @returns Promise resolving to structured signal data or null if position is "wait"
950
+ *
951
+ * @example
952
+ * ```typescript
953
+ * const result = await outlinePublicService.getCompletion(
954
+ * [
955
+ * { role: "system", content: "You are a trading analyst" },
956
+ * { role: "user", content: "Analyze current BTC market" }
957
+ * ],
958
+ * InferenceName.DeepseekInference,
959
+ * "deepseek-chat",
960
+ * "sk-..."
961
+ * );
962
+ * ```
963
+ */
182
964
  getCompletion: (messages: IOutlineMessage[], inference: InferenceName, model: string, apiKey?: string | string[]) => Promise<{
183
965
  id: string;
184
966
  position: "long" | "short";
@@ -190,6 +972,10 @@ declare class OutlinePublicService {
190
972
  }>;
191
973
  }
192
974
 
975
+ /**
976
+ * Main engine object containing all services.
977
+ * Provides unified access to the entire service layer.
978
+ */
193
979
  declare const engine: {
194
980
  runnerPublicService: RunnerPublicService;
195
981
  outlinePublicService: OutlinePublicService;
@@ -201,4 +987,4 @@ declare const engine: {
201
987
  loggerService: LoggerService;
202
988
  };
203
989
 
204
- export { alibaba, claude, cohere, deepseek, gpt5, grok, hf, engine as lib, mistral, ollama, perplexity, setLogger };
990
+ export { alibaba, claude, cohere, deepseek, glm4, gpt5, grok, hf, engine as lib, mistral, ollama, perplexity, setLogger };