@backtest-kit/ollama 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2905 @@
1
+ import { json, event, validateToolArguments, RoundRobin, addCompletion, addOutline, validate } from 'agent-swarm-kit';
2
+ import { scoped } from 'di-scoped';
3
+ import { createActivator } from 'di-kit';
4
+ import MarkdownIt from 'markdown-it';
5
+ import sanitizeHtml from 'sanitize-html';
6
+ import { lint } from 'markdownlint/promise';
7
+ import { applyFixes } from 'markdownlint';
8
+ import { ToolRegistry, memoize, singleshot, randomString, fetchApi, str } from 'functools-kit';
9
+ import OpenAI from 'openai';
10
+ import { AIMessage, SystemMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
11
+ import { ChatXAI } from '@langchain/xai';
12
+ import { jsonrepair } from 'jsonrepair';
13
+ import fs from 'fs/promises';
14
+ import { InferenceClient } from '@huggingface/inference';
15
+ import { ChatOpenAI } from '@langchain/openai';
16
+ import { get, set } from 'lodash-es';
17
+ import { Ollama } from 'ollama';
18
+ import { zodResponseFormat } from 'openai/helpers/zod';
19
+ import { z } from 'zod';
20
+
21
+ var CompletionName;
22
+ (function (CompletionName) {
23
+ CompletionName["RunnerCompletion"] = "runner_completion";
24
+ CompletionName["RunnerStreamCompletion"] = "runner_stream_completion";
25
+ CompletionName["RunnerOutlineCompletion"] = "runner_outline_completion";
26
+ })(CompletionName || (CompletionName = {}));
27
+ var CompletionName$1 = CompletionName;
28
+
29
+ const ContextService = scoped(class {
30
+ constructor(context) {
31
+ this.context = context;
32
+ }
33
+ });
34
+
35
+ const NOOP_LOGGER = {
36
+ log() {
37
+ },
38
+ debug() {
39
+ },
40
+ info() {
41
+ },
42
+ warn() {
43
+ },
44
+ };
45
+ class LoggerService {
46
+ constructor() {
47
+ this._commonLogger = NOOP_LOGGER;
48
+ this.log = async (topic, ...args) => {
49
+ await this._commonLogger.log(topic, ...args);
50
+ };
51
+ this.debug = async (topic, ...args) => {
52
+ await this._commonLogger.debug(topic, ...args);
53
+ };
54
+ this.info = async (topic, ...args) => {
55
+ await this._commonLogger.info(topic, ...args);
56
+ };
57
+ this.warn = async (topic, ...args) => {
58
+ await this._commonLogger.warn(topic, ...args);
59
+ };
60
+ this.setLogger = (logger) => {
61
+ this._commonLogger = logger;
62
+ };
63
+ }
64
+ }
65
+
66
+ const { provide, inject, init, override } = createActivator("ollama");
67
+
68
+ const commonServices$1 = {
69
+ loggerService: Symbol("loggerService"),
70
+ };
71
+ const baseServices$1 = {
72
+ contextService: Symbol('contextService'),
73
+ };
74
+ const privateServices$1 = {
75
+ runnerPrivateService: Symbol('runnerPrivateService'),
76
+ outlinePrivateService: Symbol('outlinePrivateService'),
77
+ };
78
+ const publicServices$1 = {
79
+ runnerPublicService: Symbol('runnerPublicService'),
80
+ outlinePublicService: Symbol('outlinePublicService'),
81
+ };
82
+ const TYPES = {
83
+ ...commonServices$1,
84
+ ...baseServices$1,
85
+ ...privateServices$1,
86
+ ...publicServices$1,
87
+ };
88
+
89
+ var OutlineName;
90
+ (function (OutlineName) {
91
+ OutlineName["SignalOutline"] = "signal_outline";
92
+ })(OutlineName || (OutlineName = {}));
93
+ var OutlineName$1 = OutlineName;
94
+
95
+ const toLintMarkdown = async (content) => {
96
+ if (!content) {
97
+ return "";
98
+ }
99
+ const { content: errors } = await lint({ strings: { content } });
100
+ if (!errors.length) {
101
+ return content;
102
+ }
103
+ const value = applyFixes(content, errors);
104
+ return value ? value : content;
105
+ };
106
+ globalThis.toLintMarkdown = toLintMarkdown;
107
+
108
+ const toPlainString = async (content) => {
109
+ if (!content) {
110
+ return "";
111
+ }
112
+ const markdown = await toLintMarkdown(content);
113
+ const md = new MarkdownIt({
114
+ html: false,
115
+ breaks: true,
116
+ linkify: true,
117
+ typographer: true,
118
+ });
119
+ let telegramHtml = md.render(markdown);
120
+ telegramHtml = sanitizeHtml(telegramHtml, {
121
+ allowedTags: [
122
+ "b",
123
+ "i",
124
+ "a",
125
+ "code",
126
+ "pre",
127
+ "s",
128
+ "u",
129
+ "tg-spoiler",
130
+ "blockquote",
131
+ "br",
132
+ ],
133
+ allowedAttributes: {
134
+ a: ["href"],
135
+ },
136
+ transformTags: {
137
+ h1: "",
138
+ h2: "",
139
+ h3: "",
140
+ h4: "",
141
+ h5: "",
142
+ h6: "",
143
+ a: "",
144
+ strong: "",
145
+ em: "",
146
+ p: () => "",
147
+ ul: () => "",
148
+ li: () => "• ",
149
+ ol: () => "",
150
+ hr: () => "\n",
151
+ br: () => "\n",
152
+ div: () => "",
153
+ },
154
+ });
155
+ return telegramHtml.replaceAll(/\n[\s\n]*\n/g, "\n").trim();
156
+ };
157
+
158
+ class OutlinePrivateService {
159
+ constructor() {
160
+ this.loggerService = inject(TYPES.loggerService);
161
+ this.getCompletion = async (messages) => {
162
+ this.loggerService.log("outlinePrivateService getCompletion", {
163
+ messages,
164
+ });
165
+ const { data, resultId, error } = await json(OutlineName$1.SignalOutline, messages);
166
+ if (error) {
167
+ throw new Error(error);
168
+ }
169
+ if (data.position === "wait") {
170
+ return null;
171
+ }
172
+ return {
173
+ id: resultId,
174
+ position: data.position,
175
+ minuteEstimatedTime: +data.minute_estimated_time,
176
+ priceStopLoss: +data.price_stop_loss,
177
+ priceTakeProfit: +data.price_take_profit,
178
+ note: await toPlainString(data.risk_note),
179
+ priceOpen: +data.price_open,
180
+ };
181
+ };
182
+ }
183
+ }
184
+
185
+ class RunnerPrivateService {
186
+ constructor() {
187
+ this.contextService = inject(TYPES.contextService);
188
+ this.loggerService = inject(TYPES.loggerService);
189
+ this._registry = new ToolRegistry("runner_registry");
190
+ this.getRunner = memoize(([inference]) => `${inference}`, (inference) => {
191
+ const Runner = this._registry.get(inference);
192
+ return new Runner(this.contextService, this.loggerService);
193
+ });
194
+ this.getCompletion = async (params) => {
195
+ this.loggerService.log("runnerPrivateService getCompletion");
196
+ const runner = this.getRunner(this.contextService.context.inference);
197
+ return await runner.getCompletion(params);
198
+ };
199
+ this.getStreamCompletion = async (params) => {
200
+ this.loggerService.log("runnerPrivateService getStreamCompletion");
201
+ const runner = this.getRunner(this.contextService.context.inference);
202
+ return await runner.getStreamCompletion(params);
203
+ };
204
+ this.getOutlineCompletion = async (params) => {
205
+ this.loggerService.log("runnerPrivateService getOutlineCompletion");
206
+ const runner = this.getRunner(this.contextService.context.inference);
207
+ return await runner.getOutlineCompletion(params);
208
+ };
209
+ this.registerRunner = (name, runner) => {
210
+ this._registry = this._registry.register(name, runner);
211
+ };
212
+ }
213
+ }
214
+
215
+ class OutlinePublicService {
216
+ constructor() {
217
+ this.loggerService = inject(TYPES.loggerService);
218
+ this.outlinePrivateService = inject(TYPES.outlinePrivateService);
219
+ this.getCompletion = async (messages, inference, model, apiKey) => {
220
+ this.loggerService.log("outlinePublicService getCompletion", {
221
+ messages,
222
+ model,
223
+ apiKey,
224
+ inference,
225
+ });
226
+ return await ContextService.runInContext(async () => {
227
+ return await this.outlinePrivateService.getCompletion(messages);
228
+ }, {
229
+ apiKey: apiKey,
230
+ inference,
231
+ model,
232
+ });
233
+ };
234
+ }
235
+ }
236
+
237
+ class RunnerPublicService {
238
+ constructor() {
239
+ this.runnerPrivateService = inject(TYPES.runnerPrivateService);
240
+ this.loggerService = inject(TYPES.loggerService);
241
+ this.getCompletion = async (params, context) => {
242
+ this.loggerService.log("runnerPublicService getCompletion");
243
+ return await ContextService.runInContext(async () => {
244
+ return await this.runnerPrivateService.getCompletion(params);
245
+ }, context);
246
+ };
247
+ this.getStreamCompletion = async (params, context) => {
248
+ this.loggerService.log("runnerPublicService getStreamCompletion");
249
+ return await ContextService.runInContext(async () => {
250
+ return await this.runnerPrivateService.getStreamCompletion(params);
251
+ }, context);
252
+ };
253
+ this.getOutlineCompletion = async (params, context) => {
254
+ this.loggerService.log("runnerPublicService getOutlineCompletion");
255
+ return await ContextService.runInContext(async () => {
256
+ return await this.runnerPrivateService.getOutlineCompletion(params);
257
+ }, context);
258
+ };
259
+ }
260
+ }
261
+
262
+ {
263
+ provide(TYPES.loggerService, () => new LoggerService());
264
+ }
265
+ {
266
+ provide(TYPES.contextService, () => new ContextService());
267
+ }
268
+ {
269
+ provide(TYPES.runnerPrivateService, () => new RunnerPrivateService());
270
+ provide(TYPES.outlinePrivateService, () => new OutlinePrivateService());
271
+ }
272
+ {
273
+ provide(TYPES.runnerPublicService, () => new RunnerPublicService());
274
+ provide(TYPES.outlinePublicService, () => new OutlinePublicService());
275
+ }
276
+
277
+ var InferenceName;
278
+ (function (InferenceName) {
279
+ InferenceName["OllamaInference"] = "ollama_inference";
280
+ InferenceName["GrokInference"] = "grok_inference";
281
+ InferenceName["HfInference"] = "hf_inference";
282
+ InferenceName["ClaudeInference"] = "claude_inference";
283
+ InferenceName["GPT5Inference"] = "gpt5_inference";
284
+ InferenceName["DeepseekInference"] = "deepseek_inference";
285
+ InferenceName["MistralInference"] = "mistral_inference";
286
+ InferenceName["PerplexityInference"] = "perplexity_inference";
287
+ InferenceName["CohereInference"] = "cohere_inference";
288
+ InferenceName["AlibabaInference"] = "alibaba_inference";
289
+ })(InferenceName || (InferenceName = {}));
290
+ var InferenceName$1 = InferenceName;
291
+
292
+ const getGrok = singleshot(() => {
293
+ const apiKey = lib.contextService.context.apiKey;
294
+ if (Array.isArray(apiKey)) {
295
+ getGrok.clear();
296
+ throw new Error("Grok provider does not support token rotation");
297
+ }
298
+ return new OpenAI({
299
+ baseURL: "https://api.x.ai/v1",
300
+ apiKey: apiKey,
301
+ });
302
+ });
303
+
304
+ const CC_ENABLE_DEBUG = "CC_ENABLE_DEBUG" in process.env ? !!parseInt(process.env.CC_ENABLE_DEBUG) : false;
305
+
306
+ class CustomChat extends ChatXAI {
307
+ async getNumTokens(content) {
308
+ if (typeof content !== "string") {
309
+ return 0;
310
+ }
311
+ return Math.ceil(content.length / 4);
312
+ }
313
+ }
314
+ const getChat$1 = (model, apiKey) => new CustomChat({
315
+ apiKey,
316
+ model,
317
+ streaming: true,
318
+ });
319
+ let GrokProvider$1 = class GrokProvider {
320
+ constructor(contextService, logger) {
321
+ this.contextService = contextService;
322
+ this.logger = logger;
323
+ }
324
+ async getCompletion(params) {
325
+ const grok = getGrok();
326
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
327
+ this.logger.log("grokProvider getCompletion", {
328
+ agentName,
329
+ mode,
330
+ clientId,
331
+ context: this.contextService.context,
332
+ });
333
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
334
+ role,
335
+ tool_call_id,
336
+ content,
337
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
338
+ ...rest,
339
+ function: {
340
+ name: f.name,
341
+ arguments: JSON.stringify(f.arguments),
342
+ },
343
+ })),
344
+ }));
345
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await grok.chat.completions.create({
346
+ model: this.contextService.context.model,
347
+ messages: messages,
348
+ tools: tools,
349
+ response_format: {
350
+ type: "text",
351
+ },
352
+ });
353
+ const result = {
354
+ content: content,
355
+ mode,
356
+ agentName,
357
+ role,
358
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
359
+ ...rest,
360
+ function: {
361
+ name: f.name,
362
+ arguments: JSON.parse(f.arguments),
363
+ },
364
+ })),
365
+ };
366
+ // Debug logging
367
+ if (CC_ENABLE_DEBUG) {
368
+ await fs.appendFile("./debug_grok_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
369
+ }
370
+ return result;
371
+ }
372
+ async getStreamCompletion(params) {
373
+ if (Array.isArray(this.contextService.context.apiKey)) {
374
+ throw new Error("Grok provider does not support token rotation");
375
+ }
376
+ const chat = getChat$1(this.contextService.context.model, this.contextService.context.apiKey);
377
+ const { agentName, messages: rawMessages, mode, tools: rawTools, clientId, } = params;
378
+ this.logger.log("grokProvider getStreamCompletion", {
379
+ agentName,
380
+ mode,
381
+ clientId,
382
+ context: this.contextService.context,
383
+ });
384
+ // Validate and format tools
385
+ const tools = rawTools?.map(({ type, function: f }) => ({
386
+ type: "function",
387
+ function: {
388
+ name: f.name,
389
+ description: f.description || "",
390
+ parameters: f.parameters || { type: "object", properties: {} },
391
+ },
392
+ }));
393
+ // Bind tools to chat instance if tools are provided
394
+ const chatInstance = tools?.length ? chat.bindTools(tools) : chat;
395
+ // Map raw messages to LangChain messages
396
+ const messages = rawMessages.map(({ role, tool_calls, tool_call_id, content }) => {
397
+ if (role === "assistant") {
398
+ return new AIMessage({
399
+ content,
400
+ tool_calls: tool_calls?.map(({ function: f, id }) => ({
401
+ id: id || randomString(),
402
+ name: f.name,
403
+ args: f.arguments,
404
+ })),
405
+ });
406
+ }
407
+ if (role === "system") {
408
+ return new SystemMessage({ content });
409
+ }
410
+ if (role === "user") {
411
+ return new HumanMessage({ content });
412
+ }
413
+ if (role === "developer") {
414
+ return new SystemMessage({ content });
415
+ }
416
+ if (role === "tool") {
417
+ return new ToolMessage({
418
+ tool_call_id: tool_call_id || randomString(),
419
+ content,
420
+ });
421
+ }
422
+ throw new Error(`Unsupported message role: ${role}`);
423
+ });
424
+ let textContent = "";
425
+ let toolCalls = [];
426
+ // Handle streaming response
427
+ const stream = await chatInstance.stream(messages);
428
+ // Aggregate tool calls and content from stream, emit chunks
429
+ for await (const chunk of stream) {
430
+ if (chunk.content) {
431
+ textContent += chunk.content;
432
+ await event(clientId, "llm-new-token", chunk.content); // Emit content chunk
433
+ }
434
+ if (chunk.tool_calls?.length) {
435
+ toolCalls = [...toolCalls, ...chunk.tool_calls];
436
+ }
437
+ }
438
+ // Process content if it's an array of parts
439
+ const finalContent = Array.isArray(textContent)
440
+ ? textContent
441
+ .filter((part) => part.type === "text")
442
+ .map((c) => c.text)
443
+ .join("")
444
+ : textContent;
445
+ await event(clientId, "llm-completion", {
446
+ content: finalContent.trim(),
447
+ agentName,
448
+ });
449
+ // Format tool calls for return
450
+ const formattedToolCalls = toolCalls.map(({ name, id, args }) => ({
451
+ id: id || randomString(),
452
+ type: "function",
453
+ function: {
454
+ name,
455
+ arguments: args,
456
+ },
457
+ }));
458
+ const result = {
459
+ content: finalContent,
460
+ mode,
461
+ agentName,
462
+ role: "assistant",
463
+ tool_calls: formattedToolCalls,
464
+ };
465
+ // Debug logging
466
+ if (CC_ENABLE_DEBUG) {
467
+ await fs.appendFile("./debug_grok_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
468
+ }
469
+ return result;
470
+ }
471
+ async getOutlineCompletion(params) {
472
+ const { messages: rawMessages, format } = params;
473
+ this.logger.log("grokProvider getOutlineCompletion", {
474
+ context: this.contextService.context,
475
+ });
476
+ if (Array.isArray(this.contextService.context.apiKey)) {
477
+ throw new Error("Grok provider does not support token rotation");
478
+ }
479
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
480
+ role,
481
+ tool_call_id,
482
+ content,
483
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
484
+ ...rest,
485
+ function: {
486
+ name: f.name,
487
+ arguments: JSON.stringify(f.arguments),
488
+ },
489
+ })),
490
+ }));
491
+ const { choices: [{ message: { refusal, content }, },], } = await fetchApi("https://api.x.ai/v1/chat/completions", {
492
+ method: "POST",
493
+ headers: {
494
+ "Content-Type": "application/json",
495
+ Authorization: `Bearer ${this.contextService.context.apiKey}`,
496
+ },
497
+ body: JSON.stringify({
498
+ messages,
499
+ context: this.contextService.context,
500
+ max_tokens: 5000,
501
+ response_format: format,
502
+ }),
503
+ });
504
+ if (refusal) {
505
+ throw new Error(refusal);
506
+ }
507
+ const json = jsonrepair(content);
508
+ const result = {
509
+ role: "assistant",
510
+ content: json,
511
+ };
512
+ // Debug logging
513
+ if (CC_ENABLE_DEBUG) {
514
+ await fs.appendFile("./debug_grok_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
515
+ }
516
+ return result;
517
+ }
518
+ };
519
+
520
+ const MAX_ATTEMPTS$5 = 5;
521
+ class HuggingFaceChat extends ChatOpenAI {
522
+ async getNumTokens(content) {
523
+ if (typeof content !== "string") {
524
+ return 0;
525
+ }
526
+ return Math.ceil(content.length / 4);
527
+ }
528
+ }
529
+ const getChat = (model, apiKey) => new HuggingFaceChat({
530
+ configuration: {
531
+ baseURL: "https://router.huggingface.co/v1",
532
+ apiKey,
533
+ },
534
+ model,
535
+ streaming: true,
536
+ });
537
+ const getInference = (apiKey) => new InferenceClient(apiKey);
538
+ class HfProvider {
539
+ constructor(contextService, logger) {
540
+ this.contextService = contextService;
541
+ this.logger = logger;
542
+ }
543
+ async getCompletion(params) {
544
+ if (Array.isArray(this.contextService.context.apiKey)) {
545
+ throw new Error("Hf provider does not support token rotation");
546
+ }
547
+ const inference = getInference(this.contextService.context.apiKey);
548
+ const { agentName, clientId, messages: rawMessages, mode, tools: rawTools } = params;
549
+ this.logger.log("hfProvider getCompletion", {
550
+ agentName,
551
+ mode,
552
+ clientId,
553
+ context: this.contextService.context,
554
+ });
555
+ const messages = rawMessages.map(({ role, content, tool_calls, tool_call_id }) => {
556
+ if (role === "tool") {
557
+ return {
558
+ role: "tool",
559
+ content,
560
+ tool_call_id: tool_call_id,
561
+ };
562
+ }
563
+ if (role === "assistant" && tool_calls) {
564
+ return {
565
+ role: "assistant",
566
+ content,
567
+ tool_calls: tool_calls.map((tc) => ({
568
+ id: tc.id,
569
+ type: tc.type,
570
+ function: {
571
+ name: tc.function.name,
572
+ arguments: typeof tc.function.arguments === "string"
573
+ ? tc.function.arguments
574
+ : JSON.stringify(tc.function.arguments),
575
+ },
576
+ })),
577
+ };
578
+ }
579
+ return {
580
+ role: role,
581
+ content,
582
+ };
583
+ });
584
+ const tools = rawTools?.map(({ function: f }) => ({
585
+ type: "function",
586
+ function: {
587
+ name: f.name,
588
+ description: f.description,
589
+ parameters: f.parameters,
590
+ },
591
+ }));
592
+ const completion = await inference.chatCompletion({
593
+ model: this.contextService.context.model,
594
+ messages,
595
+ ...(tools && { tools }),
596
+ });
597
+ const choice = completion.choices[0];
598
+ const text = choice.message.content || "";
599
+ const tool_calls = choice.message.tool_calls || [];
600
+ const result = {
601
+ content: text,
602
+ mode,
603
+ agentName: agentName,
604
+ role: "assistant",
605
+ tool_calls: tool_calls.map(({ id, type, function: f }) => ({
606
+ id: id,
607
+ type: type,
608
+ function: {
609
+ name: f.name,
610
+ arguments: typeof f.arguments === "string"
611
+ ? JSON.parse(f.arguments)
612
+ : f.arguments,
613
+ },
614
+ })),
615
+ };
616
+ // Debug logging
617
+ if (CC_ENABLE_DEBUG) {
618
+ await fs.appendFile("./debug_hf_provider.txt", JSON.stringify({
619
+ params,
620
+ answer: result,
621
+ }, null, 2) + "\n\n");
622
+ }
623
+ return result;
624
+ }
625
+ async getStreamCompletion(params) {
626
+ if (Array.isArray(this.contextService.context.apiKey)) {
627
+ throw new Error("Hf provider does not support token rotation");
628
+ }
629
+ const chat = getChat(this.contextService.context.model, this.contextService.context.apiKey);
630
+ const { agentName, messages: rawMessages, mode, tools: rawTools, clientId, } = params;
631
+ this.logger.log("hfProvider getStreamCompletion", {
632
+ agentName,
633
+ mode,
634
+ clientId,
635
+ context: this.contextService.context,
636
+ });
637
+ const tools = rawTools?.map(({ type, function: f }) => ({
638
+ type: type,
639
+ function: {
640
+ name: f.name,
641
+ parameters: f.parameters,
642
+ },
643
+ }));
644
+ const chatInstance = tools ? chat.bindTools(tools) : chat;
645
+ const { content, tool_calls } = await chatInstance.invoke(rawMessages.map(({ role, tool_calls, tool_call_id, content }) => {
646
+ if (role === "assistant") {
647
+ return new AIMessage({
648
+ tool_calls: tool_calls?.map(({ function: f, id }) => ({
649
+ id: id,
650
+ name: f.name,
651
+ args: f.arguments,
652
+ })),
653
+ content,
654
+ });
655
+ }
656
+ if (role === "system") {
657
+ return new SystemMessage({
658
+ content,
659
+ });
660
+ }
661
+ if (role === "user") {
662
+ return new HumanMessage({
663
+ content,
664
+ });
665
+ }
666
+ if (role === "developer") {
667
+ return new SystemMessage({
668
+ content,
669
+ });
670
+ }
671
+ if (role === "tool") {
672
+ return new ToolMessage({
673
+ tool_call_id: tool_call_id,
674
+ content,
675
+ });
676
+ }
677
+ return "";
678
+ }), {
679
+ callbacks: [
680
+ {
681
+ handleLLMNewToken(token) {
682
+ event(clientId, "llm-new-token", token);
683
+ },
684
+ },
685
+ ],
686
+ });
687
+ const text = typeof content === "string"
688
+ ? content
689
+ : content
690
+ .filter((part) => part.type === "text")
691
+ .map((c) => c.text)
692
+ .join("");
693
+ await event(clientId, "llm-completion", {
694
+ content: text.trim(),
695
+ agentName,
696
+ });
697
+ const result = {
698
+ content: text,
699
+ mode,
700
+ agentName,
701
+ role: "assistant",
702
+ tool_calls: tool_calls?.map(({ name, id, args }) => ({
703
+ id: id ?? randomString(),
704
+ type: "function",
705
+ function: {
706
+ name,
707
+ arguments: args,
708
+ },
709
+ })),
710
+ };
711
+ // Debug logging
712
+ if (CC_ENABLE_DEBUG) {
713
+ await fs.appendFile("./debug_hf_provider_stream.txt", JSON.stringify({
714
+ params,
715
+ answer: result,
716
+ }, null, 2) + "\n\n");
717
+ }
718
+ return result;
719
+ }
720
+ async getOutlineCompletion(params) {
721
+ const { messages: rawMessages, format } = params;
722
+ this.logger.log("hfProvider getOutlineCompletion", {
723
+ context: this.contextService.context,
724
+ });
725
+ if (Array.isArray(this.contextService.context.apiKey)) {
726
+ throw new Error("Hf provider does not support token rotation");
727
+ }
728
+ // Create tool definition based on format schema
729
+ const schema = "json_schema" in format
730
+ ? get(format, "json_schema.schema", format)
731
+ : format;
732
+ const toolDefinition = {
733
+ type: "function",
734
+ function: {
735
+ name: "provide_answer",
736
+ description: "Предоставить ответ в требуемом формате",
737
+ parameters: schema,
738
+ },
739
+ };
740
+ // Add system instruction for tool usage
741
+ const systemMessage = {
742
+ role: "system",
743
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
744
+ };
745
+ const messages = [
746
+ systemMessage,
747
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
748
+ role,
749
+ tool_call_id,
750
+ content,
751
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
752
+ ...rest,
753
+ function: {
754
+ name: f.name,
755
+ arguments: JSON.stringify(f.arguments),
756
+ },
757
+ })),
758
+ })),
759
+ ];
760
+ let attempt = 0;
761
+ const addToolRequestMessage = singleshot(() => {
762
+ messages.push({
763
+ role: "user",
764
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
765
+ });
766
+ });
767
+ while (attempt < MAX_ATTEMPTS$5) {
768
+ const { choices: [{ message }], } = await fetchApi("https://router.huggingface.co/v1/chat/completions", {
769
+ method: "POST",
770
+ headers: {
771
+ "Content-Type": "application/json",
772
+ Authorization: `Bearer ${this.contextService.context.apiKey}`,
773
+ },
774
+ body: JSON.stringify({
775
+ messages,
776
+ model: this.contextService.context.model,
777
+ tools: [toolDefinition],
778
+ tool_choice: {
779
+ type: "function",
780
+ function: { name: "provide_answer" },
781
+ },
782
+ }),
783
+ });
784
+ const { refusal, tool_calls, reasoning_content } = message;
785
+ if (refusal) {
786
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
787
+ attempt++;
788
+ continue;
789
+ }
790
+ if (!tool_calls?.length) {
791
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
792
+ addToolRequestMessage();
793
+ attempt++;
794
+ continue;
795
+ }
796
+ if (tool_calls && tool_calls.length > 0) {
797
+ const toolCall = tool_calls[0];
798
+ if (toolCall.function?.name === "provide_answer") {
799
+ // Parse JSON with repair
800
+ let parsedArguments;
801
+ try {
802
+ const json = jsonrepair(toolCall.function.arguments);
803
+ parsedArguments = JSON.parse(json);
804
+ }
805
+ catch (error) {
806
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
807
+ addToolRequestMessage();
808
+ attempt++;
809
+ continue;
810
+ }
811
+ const validation = validateToolArguments(parsedArguments, schema);
812
+ if (!validation.success) {
813
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
814
+ addToolRequestMessage();
815
+ attempt++;
816
+ continue;
817
+ }
818
+ set(validation.data, "_thinking", reasoning_content);
819
+ set(validation.data, "_context", this.contextService.context);
820
+ const result = {
821
+ role: "assistant",
822
+ content: JSON.stringify(validation.data),
823
+ };
824
+ // Debug logging
825
+ if (CC_ENABLE_DEBUG) {
826
+ await fs.appendFile("./debug_hf_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
827
+ }
828
+ return result;
829
+ }
830
+ }
831
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
832
+ attempt++;
833
+ }
834
+ throw new Error("Model failed to use tool after maximum attempts");
835
+ }
836
+ }
837
+
838
+ class OllamaWrapper {
839
+ constructor(_config) {
840
+ this._config = _config;
841
+ this._chatFn = RoundRobin.create(lib.contextService.context.apiKey, (token) => {
842
+ const ollama = new Ollama({
843
+ ...this._config,
844
+ headers: {
845
+ Authorization: `Bearer ${token}`,
846
+ },
847
+ });
848
+ return async (request) => {
849
+ if (request.stream === true) {
850
+ return await ollama.chat(request);
851
+ }
852
+ else {
853
+ return await ollama.chat(request);
854
+ }
855
+ };
856
+ });
857
+ if (!lib.contextService.context.apiKey) {
858
+ throw new Error("OllamaRotate required apiKey[] to process token rotation");
859
+ }
860
+ }
861
+ async chat(request) {
862
+ return await this._chatFn(request);
863
+ }
864
+ }
865
+ const getOllamaRotate = singleshot(() => new OllamaWrapper({
866
+ host: "https://ollama.com",
867
+ }));
868
+
869
+ const getOllama = singleshot(() => {
870
+ const apiKey = lib.contextService.context.apiKey;
871
+ if (Array.isArray(apiKey)) {
872
+ return getOllamaRotate();
873
+ }
874
+ if (!apiKey) {
875
+ return new Ollama();
876
+ }
877
+ return new Ollama({
878
+ host: "https://ollama.com",
879
+ headers: {
880
+ Authorization: `Bearer ${apiKey}`,
881
+ },
882
+ });
883
+ });
884
+
885
+ const MAX_ATTEMPTS$4 = 3;
886
+ class OllamaProvider {
887
+ constructor(contextService, logger) {
888
+ this.contextService = contextService;
889
+ this.logger = logger;
890
+ }
891
+ async getCompletion(params) {
892
+ const { agentName, messages: rawMessages, mode, tools, clientId } = params;
893
+ const ollama = getOllama();
894
+ this.logger.log("ollamaProvider getCompletion", {
895
+ agentName,
896
+ mode,
897
+ clientId,
898
+ context: this.contextService.context,
899
+ });
900
+ const messages = [...rawMessages];
901
+ const response = await ollama.chat({
902
+ model: this.contextService.context.model,
903
+ messages: messages.map((message) => ({
904
+ content: message.content,
905
+ role: message.role,
906
+ tool_calls: message.tool_calls?.map((call) => ({
907
+ function: call.function,
908
+ })),
909
+ })),
910
+ tools,
911
+ });
912
+ const message = response.message;
913
+ const result = {
914
+ ...message,
915
+ tool_calls: response.message.tool_calls?.map((call) => ({
916
+ function: call.function,
917
+ type: "function",
918
+ id: randomString(),
919
+ })),
920
+ mode,
921
+ agentName,
922
+ role: response.message.role,
923
+ };
924
+ // Debug logging
925
+ if (CC_ENABLE_DEBUG) {
926
+ await fs.appendFile("./debug_ollama_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
927
+ }
928
+ return result;
929
+ }
930
+ async getStreamCompletion(params) {
931
+ const { agentName, messages: rawMessages, mode, tools, clientId } = params;
932
+ const ollama = getOllama();
933
+ this.logger.log("ollamaProvider getStreamCompletion", {
934
+ agentName,
935
+ mode,
936
+ clientId,
937
+ context: this.contextService.context,
938
+ });
939
+ const messages = rawMessages.map((message) => ({
940
+ content: message.content,
941
+ role: message.role,
942
+ tool_calls: message.tool_calls?.map((call) => ({
943
+ function: call.function,
944
+ })),
945
+ }));
946
+ let content = "";
947
+ let toolCalls = [];
948
+ // Stream the response
949
+ const stream = await ollama.chat({
950
+ model: this.contextService.context.model,
951
+ messages,
952
+ tools,
953
+ stream: true,
954
+ });
955
+ for await (const chunk of stream) {
956
+ if (chunk.message.tool_calls) {
957
+ // Accumulate tool calls
958
+ for (const tool of chunk.message.tool_calls) {
959
+ toolCalls.push(tool);
960
+ }
961
+ }
962
+ else if (chunk.message.content) {
963
+ // Stream content tokens
964
+ content += chunk.message.content;
965
+ await event(clientId, "llm-new-token", chunk.message.content);
966
+ }
967
+ }
968
+ // Send completion event
969
+ await event(clientId, "llm-completion", {
970
+ content: content.trim(),
971
+ agentName,
972
+ });
973
+ const result = {
974
+ content,
975
+ mode,
976
+ agentName,
977
+ role: "assistant",
978
+ tool_calls: toolCalls.map((call) => ({
979
+ function: call.function,
980
+ type: "function",
981
+ id: randomString(),
982
+ })),
983
+ };
984
+ // Debug logging
985
+ if (CC_ENABLE_DEBUG) {
986
+ await fs.appendFile("./debug_ollama_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
987
+ }
988
+ return result;
989
+ }
990
+ async getOutlineCompletion(params) {
991
+ const { messages: rawMessages, format } = params;
992
+ const ollama = getOllama();
993
+ this.logger.log("ollamaProvider getOutlineCompletion", {
994
+ context: this.contextService.context,
995
+ });
996
+ // Create tool definition based on format schema
997
+ const schema = "json_schema" in format
998
+ ? get(format, "json_schema.schema", format)
999
+ : format;
1000
+ const toolDefinition = {
1001
+ type: "function",
1002
+ function: {
1003
+ name: "provide_answer",
1004
+ description: "Предоставить ответ в требуемом формате",
1005
+ parameters: schema,
1006
+ },
1007
+ };
1008
+ // Add system instruction for tool usage
1009
+ const systemMessage = {
1010
+ role: "system",
1011
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1012
+ };
1013
+ const messages = [
1014
+ systemMessage,
1015
+ ...rawMessages.map(({ role, content }) => ({
1016
+ role,
1017
+ content,
1018
+ })),
1019
+ ];
1020
+ let attempt = 0;
1021
+ const addToolRequestMessage = singleshot(() => {
1022
+ messages.push({
1023
+ role: "user",
1024
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1025
+ });
1026
+ });
1027
+ while (attempt < MAX_ATTEMPTS$4) {
1028
+ const response = await ollama.chat({
1029
+ model: this.contextService.context.model,
1030
+ messages,
1031
+ tools: [toolDefinition],
1032
+ });
1033
+ const { tool_calls } = response.message;
1034
+ if (!tool_calls?.length) {
1035
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1036
+ addToolRequestMessage();
1037
+ attempt++;
1038
+ continue;
1039
+ }
1040
+ if (tool_calls && tool_calls.length > 0) {
1041
+ const toolCall = tool_calls[0];
1042
+ if (toolCall.function?.name === "provide_answer") {
1043
+ // Parse JSON with repair
1044
+ let parsedArguments;
1045
+ try {
1046
+ const argumentsString = typeof toolCall.function.arguments === 'string'
1047
+ ? toolCall.function.arguments
1048
+ : JSON.stringify(toolCall.function.arguments);
1049
+ const json = jsonrepair(argumentsString);
1050
+ parsedArguments = JSON.parse(json);
1051
+ }
1052
+ catch (error) {
1053
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1054
+ addToolRequestMessage();
1055
+ attempt++;
1056
+ continue;
1057
+ }
1058
+ const validation = validateToolArguments(parsedArguments, schema);
1059
+ if (!validation.success) {
1060
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1061
+ addToolRequestMessage();
1062
+ attempt++;
1063
+ continue;
1064
+ }
1065
+ set(validation.data, "_context", this.contextService.context);
1066
+ const result = {
1067
+ role: "assistant",
1068
+ content: JSON.stringify(validation.data),
1069
+ };
1070
+ // Debug logging
1071
+ if (CC_ENABLE_DEBUG) {
1072
+ await fs.appendFile("./debug_ollama_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1073
+ }
1074
+ return result;
1075
+ }
1076
+ }
1077
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1078
+ attempt++;
1079
+ }
1080
+ throw new Error("Model failed to use tool after maximum attempts");
1081
+ }
1082
+ }
1083
+
1084
+ const getClaude = singleshot(() => {
1085
+ const apiKey = lib.contextService.context.apiKey;
1086
+ if (Array.isArray(apiKey)) {
1087
+ getClaude.clear();
1088
+ throw new Error("Claude provider does not support token rotation");
1089
+ }
1090
+ return new OpenAI({
1091
+ baseURL: "https://api.anthropic.com/v1/",
1092
+ apiKey,
1093
+ });
1094
+ });
1095
+
1096
+ const MAX_ATTEMPTS$3 = 5;
1097
+ class GrokProvider {
1098
+ constructor(contextService, logger) {
1099
+ this.contextService = contextService;
1100
+ this.logger = logger;
1101
+ }
1102
+ async getCompletion(params) {
1103
+ const claude = getClaude();
1104
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1105
+ this.logger.log("claudeProvider getCompletion", {
1106
+ agentName,
1107
+ mode,
1108
+ clientId,
1109
+ context: this.contextService.context,
1110
+ });
1111
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1112
+ role,
1113
+ tool_call_id,
1114
+ content,
1115
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1116
+ ...rest,
1117
+ function: {
1118
+ name: f.name,
1119
+ arguments: JSON.stringify(f.arguments),
1120
+ },
1121
+ })),
1122
+ }));
1123
+ // Prepare request options
1124
+ const requestOptions = {
1125
+ model: this.contextService.context.model,
1126
+ messages: messages,
1127
+ response_format: {
1128
+ type: "text",
1129
+ },
1130
+ };
1131
+ // Only add tools if they exist and have at least one item
1132
+ if (tools && tools.length > 0) {
1133
+ requestOptions.tools = tools;
1134
+ }
1135
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await claude.chat.completions.create(requestOptions);
1136
+ const result = {
1137
+ content: content,
1138
+ mode,
1139
+ agentName,
1140
+ role,
1141
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1142
+ ...rest,
1143
+ function: {
1144
+ name: f.name,
1145
+ arguments: JSON.parse(f.arguments),
1146
+ },
1147
+ })),
1148
+ };
1149
+ // Debug logging
1150
+ if (CC_ENABLE_DEBUG) {
1151
+ await fs.appendFile("./debug_claude_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1152
+ }
1153
+ return result;
1154
+ }
1155
+ async getStreamCompletion(params) {
1156
+ const openai = getClaude();
1157
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1158
+ this.logger.log("claudeProvider getStreamCompletion", {
1159
+ agentName,
1160
+ mode,
1161
+ clientId,
1162
+ context: this.contextService.context,
1163
+ });
1164
+ // Map raw messages to OpenAI format
1165
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1166
+ role,
1167
+ tool_call_id,
1168
+ content,
1169
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1170
+ ...rest,
1171
+ function: {
1172
+ name: f.name,
1173
+ arguments: JSON.stringify(f.arguments),
1174
+ },
1175
+ })),
1176
+ }));
1177
+ // Map tools to OpenAI format
1178
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1179
+ type: type,
1180
+ function: {
1181
+ name: f.name,
1182
+ parameters: f.parameters,
1183
+ },
1184
+ }));
1185
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1186
+ model: this.contextService.context.model,
1187
+ messages: messages,
1188
+ tools: formattedTools,
1189
+ });
1190
+ // Emit events to mimic streaming behavior
1191
+ if (content) {
1192
+ await event(clientId, "llm-completion", {
1193
+ content: content.trim(),
1194
+ agentName,
1195
+ });
1196
+ }
1197
+ const result = {
1198
+ content: content || "",
1199
+ mode,
1200
+ agentName,
1201
+ role,
1202
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1203
+ ...rest,
1204
+ function: {
1205
+ name: f.name,
1206
+ arguments: JSON.parse(f.arguments),
1207
+ },
1208
+ })),
1209
+ };
1210
+ // Debug logging
1211
+ if (CC_ENABLE_DEBUG) {
1212
+ await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1213
+ }
1214
+ return result;
1215
+ }
1216
+ async getOutlineCompletion(params) {
1217
+ const { messages: rawMessages, format } = params;
1218
+ const claude = getClaude();
1219
+ this.logger.log("claudeProvider getOutlineCompletion", {
1220
+ context: this.contextService.context,
1221
+ });
1222
+ // Create tool definition based on format schema
1223
+ const schema = "json_schema" in format
1224
+ ? get(format, "json_schema.schema", format)
1225
+ : format;
1226
+ const toolDefinition = {
1227
+ type: "function",
1228
+ function: {
1229
+ name: "provide_answer",
1230
+ description: "Предоставить ответ в требуемом формате",
1231
+ parameters: schema,
1232
+ },
1233
+ };
1234
+ // Add system instruction for tool usage
1235
+ const systemMessage = {
1236
+ role: "system",
1237
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1238
+ };
1239
+ const messages = [
1240
+ systemMessage,
1241
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1242
+ role,
1243
+ tool_call_id,
1244
+ content,
1245
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1246
+ ...rest,
1247
+ function: {
1248
+ name: f.name,
1249
+ arguments: JSON.stringify(f.arguments),
1250
+ },
1251
+ })),
1252
+ })),
1253
+ ];
1254
+ let attempt = 0;
1255
+ const addToolRequestMessage = singleshot(() => {
1256
+ messages.push({
1257
+ role: "user",
1258
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1259
+ });
1260
+ });
1261
+ while (attempt < MAX_ATTEMPTS$3) {
1262
+ // Prepare request options
1263
+ const requestOptions = {
1264
+ model: this.contextService.context.model,
1265
+ messages: messages,
1266
+ tools: [toolDefinition],
1267
+ tool_choice: {
1268
+ type: "function",
1269
+ function: { name: "provide_answer" },
1270
+ },
1271
+ };
1272
+ const { choices: [{ message }], } = await claude.chat.completions.create(requestOptions);
1273
+ const { refusal, tool_calls } = message;
1274
+ if (refusal) {
1275
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1276
+ attempt++;
1277
+ continue;
1278
+ }
1279
+ if (!tool_calls?.length) {
1280
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1281
+ addToolRequestMessage();
1282
+ attempt++;
1283
+ continue;
1284
+ }
1285
+ if (tool_calls && tool_calls.length > 0) {
1286
+ const toolCall = tool_calls[0];
1287
+ if (toolCall.function?.name === "provide_answer") {
1288
+ // Parse JSON with repair
1289
+ let parsedArguments;
1290
+ try {
1291
+ const json = jsonrepair(toolCall.function.arguments);
1292
+ parsedArguments = JSON.parse(json);
1293
+ }
1294
+ catch (error) {
1295
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1296
+ addToolRequestMessage();
1297
+ attempt++;
1298
+ continue;
1299
+ }
1300
+ const validation = validateToolArguments(parsedArguments, schema);
1301
+ if (!validation.success) {
1302
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1303
+ addToolRequestMessage();
1304
+ attempt++;
1305
+ continue;
1306
+ }
1307
+ set(validation.data, "_context", this.contextService.context);
1308
+ const result = {
1309
+ role: "assistant",
1310
+ content: JSON.stringify(validation.data),
1311
+ };
1312
+ // Debug logging
1313
+ if (CC_ENABLE_DEBUG) {
1314
+ await fs.appendFile("./debug_claude_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1315
+ }
1316
+ return result;
1317
+ }
1318
+ }
1319
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1320
+ attempt++;
1321
+ }
1322
+ throw new Error("Model failed to use tool after maximum attempts");
1323
+ }
1324
+ }
1325
+
1326
+ const getOpenAi = singleshot(() => {
1327
+ const apiKey = lib.contextService.context.apiKey;
1328
+ if (Array.isArray(apiKey)) {
1329
+ getOpenAi.clear();
1330
+ throw new Error("OpenAI provider does not support token rotation");
1331
+ }
1332
+ return new OpenAI({
1333
+ apiKey: apiKey,
1334
+ });
1335
+ });
1336
+
1337
+ class GPT5Provider {
1338
+ constructor(contextService, logger) {
1339
+ this.contextService = contextService;
1340
+ this.logger = logger;
1341
+ }
1342
+ async getCompletion(params) {
1343
+ const openai = getOpenAi();
1344
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1345
+ this.logger.log("gpt5Provider getCompletion", {
1346
+ agentName,
1347
+ mode,
1348
+ clientId,
1349
+ context: this.contextService.context,
1350
+ });
1351
+ // Map raw messages to OpenAI format
1352
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1353
+ role,
1354
+ tool_call_id,
1355
+ content,
1356
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1357
+ ...rest,
1358
+ function: {
1359
+ name: f.name,
1360
+ arguments: JSON.stringify(f.arguments),
1361
+ },
1362
+ })),
1363
+ }));
1364
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1365
+ model: this.contextService.context.model,
1366
+ messages: messages,
1367
+ tools: tools,
1368
+ });
1369
+ const result = {
1370
+ content: content,
1371
+ mode,
1372
+ agentName,
1373
+ role,
1374
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1375
+ ...rest,
1376
+ function: {
1377
+ name: f.name,
1378
+ arguments: JSON.parse(f.arguments),
1379
+ },
1380
+ })),
1381
+ };
1382
+ // Debug logging
1383
+ if (CC_ENABLE_DEBUG) {
1384
+ await fs.appendFile("./debug_gpt5_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1385
+ }
1386
+ return result;
1387
+ }
1388
+ async getStreamCompletion(params) {
1389
+ const openai = getOpenAi();
1390
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1391
+ this.logger.log("gpt5Provider getStreamCompletion", {
1392
+ agentName,
1393
+ mode,
1394
+ clientId,
1395
+ context: this.contextService.context,
1396
+ });
1397
+ // Map raw messages to OpenAI format
1398
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1399
+ role,
1400
+ tool_call_id,
1401
+ content,
1402
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1403
+ ...rest,
1404
+ function: {
1405
+ name: f.name,
1406
+ arguments: JSON.stringify(f.arguments),
1407
+ },
1408
+ })),
1409
+ }));
1410
+ // Map tools to OpenAI format
1411
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1412
+ type: type,
1413
+ function: {
1414
+ name: f.name,
1415
+ parameters: f.parameters,
1416
+ },
1417
+ }));
1418
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1419
+ model: this.contextService.context.model,
1420
+ messages: messages,
1421
+ tools: formattedTools,
1422
+ });
1423
+ // Emit events to mimic streaming behavior
1424
+ if (content) {
1425
+ await event(clientId, "llm-completion", {
1426
+ content: content.trim(),
1427
+ agentName,
1428
+ });
1429
+ }
1430
+ const result = {
1431
+ content: content || "",
1432
+ mode,
1433
+ agentName,
1434
+ role,
1435
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1436
+ ...rest,
1437
+ function: {
1438
+ name: f.name,
1439
+ arguments: JSON.parse(f.arguments),
1440
+ },
1441
+ })),
1442
+ };
1443
+ // Debug logging
1444
+ if (CC_ENABLE_DEBUG) {
1445
+ await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1446
+ }
1447
+ return result;
1448
+ }
1449
+ async getOutlineCompletion(params) {
1450
+ const { messages: rawMessages, format } = params;
1451
+ const openai = getOpenAi();
1452
+ this.logger.log("gpt5Provider getOutlineCompletion", {
1453
+ context: this.contextService.context,
1454
+ });
1455
+ // Map raw messages to OpenAI format
1456
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1457
+ role,
1458
+ tool_call_id,
1459
+ content,
1460
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1461
+ ...rest,
1462
+ function: {
1463
+ name: f.name,
1464
+ arguments: JSON.stringify(f.arguments),
1465
+ },
1466
+ })),
1467
+ }));
1468
+ // Extract response format
1469
+ const response_format = "json_schema" in format
1470
+ ? format
1471
+ : { type: "json_schema", json_schema: { schema: format } };
1472
+ const completion = await openai.chat.completions.create({
1473
+ messages: messages,
1474
+ model: this.contextService.context.model,
1475
+ response_format: response_format,
1476
+ });
1477
+ const choice = completion.choices[0];
1478
+ if (choice.message.refusal) {
1479
+ throw new Error(choice.message.refusal);
1480
+ }
1481
+ const json = jsonrepair(choice.message.content || "");
1482
+ const result = {
1483
+ role: "assistant",
1484
+ content: json,
1485
+ };
1486
+ // Debug logging
1487
+ if (CC_ENABLE_DEBUG) {
1488
+ await fs.appendFile("./debug_gpt5_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1489
+ }
1490
+ return result;
1491
+ }
1492
+ }
1493
+
1494
+ const getDeepseek = singleshot(() => {
1495
+ const apiKey = lib.contextService.context.apiKey;
1496
+ if (Array.isArray(apiKey)) {
1497
+ getDeepseek.clear();
1498
+ throw new Error("Deepseek provider does not support token rotation");
1499
+ }
1500
+ return new OpenAI({
1501
+ baseURL: "https://api.deepseek.com",
1502
+ apiKey: apiKey,
1503
+ });
1504
+ });
1505
+
1506
+ const MAX_ATTEMPTS$2 = 3;
1507
+ class DeepseekProvider {
1508
+ constructor(contextService, logger) {
1509
+ this.contextService = contextService;
1510
+ this.logger = logger;
1511
+ }
1512
+ async getCompletion(params) {
1513
+ const deepseek = getDeepseek();
1514
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1515
+ this.logger.log("deepseekProvider getCompletion", {
1516
+ agentName,
1517
+ mode,
1518
+ clientId,
1519
+ context: this.contextService.context,
1520
+ });
1521
+ // Map raw messages to OpenAI format
1522
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1523
+ role,
1524
+ tool_call_id,
1525
+ content,
1526
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1527
+ ...rest,
1528
+ function: {
1529
+ name: f.name,
1530
+ arguments: JSON.stringify(f.arguments),
1531
+ },
1532
+ })),
1533
+ }));
1534
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1535
+ type: type,
1536
+ function: {
1537
+ name: f.name,
1538
+ parameters: f.parameters,
1539
+ },
1540
+ }));
1541
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await deepseek.chat.completions.create({
1542
+ model: this.contextService.context.model,
1543
+ messages: messages,
1544
+ tools: formattedTools?.length ? formattedTools : undefined,
1545
+ });
1546
+ const result = {
1547
+ content: content,
1548
+ mode,
1549
+ agentName,
1550
+ role,
1551
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1552
+ ...rest,
1553
+ function: {
1554
+ name: f.name,
1555
+ arguments: JSON.parse(f.arguments),
1556
+ },
1557
+ })),
1558
+ };
1559
+ // Debug logging
1560
+ if (CC_ENABLE_DEBUG) {
1561
+ await fs.appendFile("./debug_deepseek_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1562
+ }
1563
+ return result;
1564
+ }
1565
+ async getStreamCompletion(params) {
1566
+ const deepseek = getDeepseek();
1567
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1568
+ this.logger.log("deepseekProvider getStreamCompletion", {
1569
+ agentName,
1570
+ mode,
1571
+ clientId,
1572
+ context: this.contextService.context,
1573
+ });
1574
+ // Map raw messages to OpenAI format
1575
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1576
+ role,
1577
+ tool_call_id,
1578
+ content,
1579
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1580
+ ...rest,
1581
+ function: {
1582
+ name: f.name,
1583
+ arguments: JSON.stringify(f.arguments),
1584
+ },
1585
+ })),
1586
+ }));
1587
+ // Map tools to OpenAI format
1588
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1589
+ type: type,
1590
+ function: {
1591
+ name: f.name,
1592
+ parameters: f.parameters,
1593
+ },
1594
+ }));
1595
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await deepseek.chat.completions.create({
1596
+ model: this.contextService.context.model,
1597
+ messages: messages,
1598
+ tools: formattedTools?.length ? formattedTools : undefined,
1599
+ });
1600
+ // Emit events to mimic streaming behavior
1601
+ if (content) {
1602
+ await event(clientId, "llm-completion", {
1603
+ content: content.trim(),
1604
+ agentName,
1605
+ });
1606
+ }
1607
+ const result = {
1608
+ content: content || "",
1609
+ mode,
1610
+ agentName,
1611
+ role,
1612
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1613
+ ...rest,
1614
+ function: {
1615
+ name: f.name,
1616
+ arguments: JSON.parse(f.arguments),
1617
+ },
1618
+ })),
1619
+ };
1620
+ // Debug logging
1621
+ if (CC_ENABLE_DEBUG) {
1622
+ await fs.appendFile("./debug_deepseek_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1623
+ }
1624
+ return result;
1625
+ }
1626
+ async getOutlineCompletion(params) {
1627
+ const { messages: rawMessages, format } = params;
1628
+ const deepseek = getDeepseek();
1629
+ this.logger.log("deepseekProvider getOutlineCompletion", {
1630
+ context: this.contextService.context,
1631
+ });
1632
+ // Create tool definition based on format schema
1633
+ const schema = "json_schema" in format
1634
+ ? get(format, "json_schema.schema", format)
1635
+ : format;
1636
+ const toolDefinition = {
1637
+ type: "function",
1638
+ function: {
1639
+ name: "provide_answer",
1640
+ description: "Предоставить ответ в требуемом формате",
1641
+ parameters: schema,
1642
+ },
1643
+ };
1644
+ // Add system instruction for tool usage
1645
+ const systemMessage = {
1646
+ role: "system",
1647
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1648
+ };
1649
+ const messages = [
1650
+ systemMessage,
1651
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1652
+ role,
1653
+ tool_call_id,
1654
+ content,
1655
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1656
+ ...rest,
1657
+ function: {
1658
+ name: f.name,
1659
+ arguments: JSON.stringify(f.arguments),
1660
+ },
1661
+ })),
1662
+ })),
1663
+ ];
1664
+ let attempt = 0;
1665
+ const addToolRequestMessage = singleshot(() => {
1666
+ messages.push({
1667
+ role: "user",
1668
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1669
+ });
1670
+ });
1671
+ while (attempt < MAX_ATTEMPTS$2) {
1672
+ // Prepare request options
1673
+ const requestOptions = {
1674
+ model: this.contextService.context.model,
1675
+ messages: messages,
1676
+ tools: [toolDefinition],
1677
+ tool_choice: {
1678
+ type: "function",
1679
+ function: { name: "provide_answer" },
1680
+ },
1681
+ };
1682
+ const { choices: [{ message }], } = await deepseek.chat.completions.create(requestOptions);
1683
+ const { refusal, tool_calls } = message;
1684
+ if (refusal) {
1685
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1686
+ attempt++;
1687
+ continue;
1688
+ }
1689
+ if (!tool_calls?.length) {
1690
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1691
+ addToolRequestMessage();
1692
+ attempt++;
1693
+ continue;
1694
+ }
1695
+ if (tool_calls && tool_calls.length > 0) {
1696
+ const toolCall = tool_calls[0];
1697
+ if (toolCall.function?.name === "provide_answer") {
1698
+ // Parse JSON with repair
1699
+ let parsedArguments;
1700
+ try {
1701
+ const json = jsonrepair(toolCall.function.arguments);
1702
+ parsedArguments = JSON.parse(json);
1703
+ }
1704
+ catch (error) {
1705
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1706
+ addToolRequestMessage();
1707
+ attempt++;
1708
+ continue;
1709
+ }
1710
+ const validation = validateToolArguments(parsedArguments, schema);
1711
+ if (!validation.success) {
1712
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1713
+ addToolRequestMessage();
1714
+ attempt++;
1715
+ continue;
1716
+ }
1717
+ set(validation.data, "_context", this.contextService.context);
1718
+ const result = {
1719
+ role: "assistant",
1720
+ content: JSON.stringify(validation.data),
1721
+ };
1722
+ // Debug logging
1723
+ if (CC_ENABLE_DEBUG) {
1724
+ await fs.appendFile("./debug_deepseek_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1725
+ }
1726
+ return result;
1727
+ }
1728
+ }
1729
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1730
+ attempt++;
1731
+ }
1732
+ throw new Error("Model failed to use tool after maximum attempts");
1733
+ }
1734
+ }
1735
+
1736
+ const getMistral = singleshot(() => {
1737
+ const apiKey = lib.contextService.context.apiKey;
1738
+ if (Array.isArray(apiKey)) {
1739
+ getMistral.clear();
1740
+ throw new Error("Mistral provider does not support token rotation");
1741
+ }
1742
+ return new OpenAI({
1743
+ baseURL: "https://api.mistral.ai/v1",
1744
+ apiKey: apiKey,
1745
+ });
1746
+ });
1747
+
1748
+ const MAX_ATTEMPTS$1 = 3;
1749
+ class MistralProvider {
1750
+ constructor(contextService, logger) {
1751
+ this.contextService = contextService;
1752
+ this.logger = logger;
1753
+ }
1754
+ async getCompletion(params) {
1755
+ const mistral = getMistral();
1756
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1757
+ this.logger.log("mistralProvider getCompletion", {
1758
+ agentName,
1759
+ mode,
1760
+ clientId,
1761
+ context: this.contextService.context,
1762
+ });
1763
+ // Map raw messages to OpenAI format
1764
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1765
+ role,
1766
+ tool_call_id,
1767
+ content,
1768
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1769
+ ...rest,
1770
+ function: {
1771
+ name: f.name,
1772
+ arguments: JSON.stringify(f.arguments),
1773
+ },
1774
+ })),
1775
+ }));
1776
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1777
+ type: type,
1778
+ function: {
1779
+ name: f.name,
1780
+ parameters: f.parameters,
1781
+ },
1782
+ }));
1783
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await mistral.chat.completions.create({
1784
+ model: this.contextService.context.model,
1785
+ messages: messages,
1786
+ tools: formattedTools?.length ? formattedTools : undefined,
1787
+ });
1788
+ const result = {
1789
+ content: content,
1790
+ mode,
1791
+ agentName,
1792
+ role,
1793
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1794
+ ...rest,
1795
+ function: {
1796
+ name: f.name,
1797
+ arguments: JSON.parse(f.arguments),
1798
+ },
1799
+ })),
1800
+ };
1801
+ // Debug logging
1802
+ if (CC_ENABLE_DEBUG) {
1803
+ await fs.appendFile("./debug_mistral_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1804
+ }
1805
+ return result;
1806
+ }
1807
+ async getStreamCompletion(params) {
1808
+ const mistral = getMistral();
1809
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1810
+ this.logger.log("mistralProvider getStreamCompletion", {
1811
+ agentName,
1812
+ mode,
1813
+ clientId,
1814
+ context: this.contextService.context,
1815
+ });
1816
+ // Map raw messages to OpenAI format
1817
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1818
+ role,
1819
+ tool_call_id,
1820
+ content,
1821
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1822
+ ...rest,
1823
+ function: {
1824
+ name: f.name,
1825
+ arguments: JSON.stringify(f.arguments),
1826
+ },
1827
+ })),
1828
+ }));
1829
+ // Map tools to OpenAI format
1830
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1831
+ type: type,
1832
+ function: {
1833
+ name: f.name,
1834
+ parameters: f.parameters,
1835
+ },
1836
+ }));
1837
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await mistral.chat.completions.create({
1838
+ model: this.contextService.context.model,
1839
+ messages: messages,
1840
+ tools: formattedTools?.length ? formattedTools : undefined,
1841
+ });
1842
+ // Emit events to mimic streaming behavior
1843
+ if (content) {
1844
+ await event(clientId, "llm-completion", {
1845
+ content: content.trim(),
1846
+ agentName,
1847
+ });
1848
+ }
1849
+ const result = {
1850
+ content: content || "",
1851
+ mode,
1852
+ agentName,
1853
+ role,
1854
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1855
+ ...rest,
1856
+ function: {
1857
+ name: f.name,
1858
+ arguments: JSON.parse(f.arguments),
1859
+ },
1860
+ })),
1861
+ };
1862
+ // Debug logging
1863
+ if (CC_ENABLE_DEBUG) {
1864
+ await fs.appendFile("./debug_mistral_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1865
+ }
1866
+ return result;
1867
+ }
1868
+ async getOutlineCompletion(params) {
1869
+ const { messages: rawMessages, format } = params;
1870
+ const mistral = getMistral();
1871
+ this.logger.log("mistralProvider getOutlineCompletion", {
1872
+ context: this.contextService.context,
1873
+ });
1874
+ // Create tool definition based on format schema
1875
+ const schema = "json_schema" in format
1876
+ ? get(format, "json_schema.schema", format)
1877
+ : format;
1878
+ const toolDefinition = {
1879
+ type: "function",
1880
+ function: {
1881
+ name: "provide_answer",
1882
+ description: "Предоставить ответ в требуемом формате",
1883
+ parameters: schema,
1884
+ },
1885
+ };
1886
+ // Add system instruction for tool usage
1887
+ const systemMessage = {
1888
+ role: "system",
1889
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1890
+ };
1891
+ const messages = [
1892
+ systemMessage,
1893
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1894
+ role,
1895
+ tool_call_id,
1896
+ content,
1897
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1898
+ ...rest,
1899
+ function: {
1900
+ name: f.name,
1901
+ arguments: JSON.stringify(f.arguments),
1902
+ },
1903
+ })),
1904
+ })),
1905
+ ];
1906
+ let attempt = 0;
1907
+ const addToolRequestMessage = singleshot(() => {
1908
+ messages.push({
1909
+ role: "user",
1910
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1911
+ });
1912
+ });
1913
+ while (attempt < MAX_ATTEMPTS$1) {
1914
+ // Prepare request options
1915
+ const requestOptions = {
1916
+ model: this.contextService.context.model,
1917
+ messages: messages,
1918
+ tools: [toolDefinition],
1919
+ tool_choice: {
1920
+ type: "function",
1921
+ function: { name: "provide_answer" },
1922
+ },
1923
+ };
1924
+ const { choices: [{ message }], } = await mistral.chat.completions.create(requestOptions);
1925
+ const { refusal, tool_calls } = message;
1926
+ if (refusal) {
1927
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1928
+ attempt++;
1929
+ continue;
1930
+ }
1931
+ if (!tool_calls?.length) {
1932
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1933
+ addToolRequestMessage();
1934
+ attempt++;
1935
+ continue;
1936
+ }
1937
+ if (tool_calls && tool_calls.length > 0) {
1938
+ const toolCall = tool_calls[0];
1939
+ if (toolCall.function?.name === "provide_answer") {
1940
+ // Parse JSON with repair
1941
+ let parsedArguments;
1942
+ try {
1943
+ const json = jsonrepair(toolCall.function.arguments);
1944
+ parsedArguments = JSON.parse(json);
1945
+ }
1946
+ catch (error) {
1947
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1948
+ addToolRequestMessage();
1949
+ attempt++;
1950
+ continue;
1951
+ }
1952
+ const validation = validateToolArguments(parsedArguments, schema);
1953
+ if (!validation.success) {
1954
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1955
+ addToolRequestMessage();
1956
+ attempt++;
1957
+ continue;
1958
+ }
1959
+ set(validation.data, "_context", this.contextService.context);
1960
+ const result = {
1961
+ role: "assistant",
1962
+ content: JSON.stringify(validation.data),
1963
+ };
1964
+ // Debug logging
1965
+ if (CC_ENABLE_DEBUG) {
1966
+ await fs.appendFile("./debug_mistral_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1967
+ }
1968
+ return result;
1969
+ }
1970
+ }
1971
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1972
+ attempt++;
1973
+ }
1974
+ throw new Error("Model failed to use tool after maximum attempts");
1975
+ }
1976
+ }
1977
+
1978
+ const getPerplexity = singleshot(() => {
1979
+ const apiKey = lib.contextService.context.apiKey;
1980
+ if (Array.isArray(apiKey)) {
1981
+ getPerplexity.clear();
1982
+ throw new Error("Perplexity provider does not support token rotation");
1983
+ }
1984
+ return new OpenAI({
1985
+ baseURL: "https://api.perplexity.ai",
1986
+ apiKey: apiKey,
1987
+ });
1988
+ });
1989
+
1990
+ class PerplexityProvider {
1991
+ constructor(contextService, logger) {
1992
+ this.contextService = contextService;
1993
+ this.logger = logger;
1994
+ }
1995
+ async getCompletion(params) {
1996
+ const perplexity = getPerplexity();
1997
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1998
+ this.logger.log("perplexityProvider getCompletion", {
1999
+ agentName,
2000
+ mode,
2001
+ clientId,
2002
+ context: this.contextService.context,
2003
+ });
2004
+ // Filter and sort messages like in example.ts
2005
+ const messages = rawMessages
2006
+ .filter(({ role }) => role === "user" || role === "assistant")
2007
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2008
+ role,
2009
+ tool_call_id,
2010
+ content,
2011
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2012
+ ...rest,
2013
+ function: {
2014
+ name: f.name,
2015
+ arguments: JSON.stringify(f.arguments),
2016
+ },
2017
+ })),
2018
+ }));
2019
+ const systemPrompt = rawMessages
2020
+ .filter(({ role }) => role === "system")
2021
+ .reduce((acm, { content }) => str.newline(acm, content), "");
2022
+ if (systemPrompt) {
2023
+ messages.unshift({
2024
+ role: "system",
2025
+ content: systemPrompt,
2026
+ });
2027
+ }
2028
+ // Merge consecutive assistant messages
2029
+ for (let i = messages.length - 1; i > 0; i--) {
2030
+ if (messages[i].role === "assistant" &&
2031
+ messages[i - 1].role === "assistant") {
2032
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2033
+ // Merge tool_calls if they exist
2034
+ if (messages[i].tool_calls || messages[i - 1].tool_calls) {
2035
+ messages[i - 1].tool_calls = [
2036
+ ...(messages[i - 1].tool_calls || []),
2037
+ ...(messages[i].tool_calls || []),
2038
+ ];
2039
+ }
2040
+ messages.splice(i, 1);
2041
+ }
2042
+ }
2043
+ // Merge consecutive user messages
2044
+ for (let i = messages.length - 1; i > 0; i--) {
2045
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2046
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2047
+ messages.splice(i, 1);
2048
+ }
2049
+ }
2050
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2051
+ type: type,
2052
+ function: {
2053
+ name: f.name,
2054
+ description: f.description ?? "", // Perplexity API requires description
2055
+ parameters: f.parameters,
2056
+ },
2057
+ }));
2058
+ const result = await perplexity.chat.completions.create({
2059
+ model: this.contextService.context.model,
2060
+ messages: messages,
2061
+ tools: formattedTools?.length ? formattedTools : undefined,
2062
+ tool_choice: "auto",
2063
+ });
2064
+ const { choices: [{ message: { content, role, tool_calls }, },], } = result;
2065
+ const finalResult = {
2066
+ content: content,
2067
+ mode,
2068
+ agentName,
2069
+ role,
2070
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2071
+ ...rest,
2072
+ function: {
2073
+ name: f.name,
2074
+ arguments: JSON.parse(f.arguments),
2075
+ },
2076
+ })),
2077
+ };
2078
+ // Debug logging
2079
+ if (CC_ENABLE_DEBUG) {
2080
+ await fs.appendFile("./debug_perplexity_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2081
+ }
2082
+ return finalResult;
2083
+ }
2084
+ async getStreamCompletion(params) {
2085
+ const { clientId, agentName, mode } = params;
2086
+ this.logger.log("perplexityProvider getStreamCompletion", {
2087
+ agentName,
2088
+ mode,
2089
+ clientId,
2090
+ context: this.contextService.context,
2091
+ });
2092
+ const result = {
2093
+ content: "Выбранная в настройках языковая модель не поддерживает tool_calling",
2094
+ mode,
2095
+ agentName,
2096
+ role: "assistant",
2097
+ };
2098
+ return result;
2099
+ }
2100
+ async getOutlineCompletion(params) {
2101
+ const { messages: rawMessages, format } = params;
2102
+ const perplexity = getPerplexity();
2103
+ this.logger.log("perplexityProvider getOutlineCompletion", {
2104
+ context: this.contextService.context,
2105
+ });
2106
+ // Filter and sort messages like GPT5Provider
2107
+ const messages = rawMessages
2108
+ .filter(({ role }) => role === "user" || role === "assistant")
2109
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2110
+ role,
2111
+ tool_call_id,
2112
+ content,
2113
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2114
+ ...rest,
2115
+ function: {
2116
+ name: f.name,
2117
+ arguments: JSON.stringify(f.arguments),
2118
+ },
2119
+ })),
2120
+ }));
2121
+ const systemPrompt = rawMessages
2122
+ .filter(({ role }) => role === "system")
2123
+ .reduce((acm, { content }) => str.newline(acm, content), "");
2124
+ if (systemPrompt) {
2125
+ messages.unshift({
2126
+ role: "system",
2127
+ content: systemPrompt,
2128
+ });
2129
+ }
2130
+ // Merge consecutive assistant messages
2131
+ for (let i = messages.length - 1; i > 0; i--) {
2132
+ if (messages[i].role === "assistant" &&
2133
+ messages[i - 1].role === "assistant") {
2134
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2135
+ // Merge tool_calls if they exist
2136
+ if (messages[i].tool_calls || messages[i - 1].tool_calls) {
2137
+ messages[i - 1].tool_calls = [
2138
+ ...(messages[i - 1].tool_calls || []),
2139
+ ...(messages[i].tool_calls || []),
2140
+ ];
2141
+ }
2142
+ messages.splice(i, 1);
2143
+ }
2144
+ }
2145
+ // Merge consecutive user messages
2146
+ for (let i = messages.length - 1; i > 0; i--) {
2147
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2148
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2149
+ messages.splice(i, 1);
2150
+ }
2151
+ }
2152
+ // Extract response format like GPT5Provider
2153
+ const response_format = "json_schema" in format
2154
+ ? format
2155
+ : { type: "json_schema", json_schema: { schema: format } };
2156
+ const completion = await perplexity.chat.completions.create({
2157
+ messages: messages,
2158
+ model: this.contextService.context.model,
2159
+ response_format: response_format,
2160
+ });
2161
+ const choice = completion.choices[0];
2162
+ if (choice.message.refusal) {
2163
+ throw new Error(choice.message.refusal);
2164
+ }
2165
+ const json = jsonrepair(choice.message.content || "");
2166
+ const result = {
2167
+ role: "assistant",
2168
+ content: json,
2169
+ };
2170
+ // Debug logging
2171
+ if (CC_ENABLE_DEBUG) {
2172
+ await fs.appendFile("./debug_perplexity_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2173
+ }
2174
+ return result;
2175
+ }
2176
+ }
2177
+
2178
+ const getCohere = singleshot(() => {
2179
+ const apiKey = lib.contextService.context.apiKey;
2180
+ if (Array.isArray(apiKey)) {
2181
+ getCohere.clear();
2182
+ throw new Error("Cohere provider does not support token rotation");
2183
+ }
2184
+ return new OpenAI({
2185
+ baseURL: "https://api.cohere.ai/compatibility/v1",
2186
+ apiKey: apiKey,
2187
+ });
2188
+ });
2189
+
2190
+ class CohereProvider {
2191
+ constructor(contextService, logger) {
2192
+ this.contextService = contextService;
2193
+ this.logger = logger;
2194
+ }
2195
+ async getCompletion(params) {
2196
+ const cohere = getCohere();
2197
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2198
+ this.logger.log("cohereProvider getCompletion", {
2199
+ agentName,
2200
+ mode,
2201
+ clientId,
2202
+ context: this.contextService.context,
2203
+ });
2204
+ // Filter and sort messages - INCLUDE TOOL MESSAGES for Cohere
2205
+ const messages = rawMessages
2206
+ .filter(({ role }) => role === "user" || role === "assistant" || role === "tool")
2207
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2208
+ role,
2209
+ tool_call_id,
2210
+ content,
2211
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2212
+ ...rest,
2213
+ function: {
2214
+ name: f.name,
2215
+ arguments: JSON.stringify(f.arguments),
2216
+ },
2217
+ })),
2218
+ }));
2219
+ const systemPrompt = rawMessages
2220
+ .filter(({ role }) => role === "system")
2221
+ .reduce((acm, { content }) => str.newline(acm, content), "");
2222
+ if (systemPrompt) {
2223
+ messages.unshift({
2224
+ role: "system",
2225
+ content: systemPrompt,
2226
+ });
2227
+ }
2228
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2229
+ // Cohere requires strict tool_calls -> tool_responses sequence
2230
+ // Only merge consecutive user messages (safe)
2231
+ for (let i = messages.length - 1; i > 0; i--) {
2232
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2233
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2234
+ messages.splice(i, 1);
2235
+ }
2236
+ }
2237
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2238
+ type: type,
2239
+ function: {
2240
+ name: f.name,
2241
+ description: f.description ?? "", // Cohere API requires description
2242
+ parameters: f.parameters,
2243
+ },
2244
+ }));
2245
+ const result = await cohere.chat.completions.create({
2246
+ model: this.contextService.context.model,
2247
+ messages: messages,
2248
+ tools: formattedTools?.length ? formattedTools : undefined,
2249
+ tool_choice: "auto",
2250
+ });
2251
+ const { choices: [{ message: { content, role, tool_calls }, },], } = result;
2252
+ const finalResult = {
2253
+ content: content,
2254
+ mode,
2255
+ agentName,
2256
+ role,
2257
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2258
+ ...rest,
2259
+ function: {
2260
+ name: f.name,
2261
+ arguments: JSON.parse(f.arguments),
2262
+ },
2263
+ })),
2264
+ };
2265
+ // Debug logging
2266
+ if (CC_ENABLE_DEBUG) {
2267
+ await fs.appendFile("./debug_cohere_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2268
+ }
2269
+ return finalResult;
2270
+ }
2271
+ async getStreamCompletion(params) {
2272
+ const cohere = getCohere();
2273
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2274
+ this.logger.log("cohereProvider getStreamCompletion", {
2275
+ agentName,
2276
+ mode,
2277
+ clientId,
2278
+ context: this.contextService.context,
2279
+ });
2280
+ // Filter and sort messages - INCLUDE TOOL MESSAGES for Cohere
2281
+ const messages = rawMessages
2282
+ .filter(({ role }) => role === "user" || role === "assistant" || role === "tool")
2283
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2284
+ role,
2285
+ tool_call_id,
2286
+ content,
2287
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2288
+ ...rest,
2289
+ function: {
2290
+ name: f.name,
2291
+ arguments: JSON.stringify(f.arguments),
2292
+ },
2293
+ })),
2294
+ }));
2295
+ const systemPrompt = rawMessages
2296
+ .filter(({ role }) => role === "system")
2297
+ .reduce((acm, { content }) => str.newline(acm, content), "");
2298
+ if (systemPrompt) {
2299
+ messages.unshift({
2300
+ role: "system",
2301
+ content: systemPrompt,
2302
+ });
2303
+ }
2304
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2305
+ // Cohere requires strict tool_calls -> tool_responses sequence
2306
+ // Merge consecutive user messages
2307
+ for (let i = messages.length - 1; i > 0; i--) {
2308
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2309
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2310
+ messages.splice(i, 1);
2311
+ }
2312
+ }
2313
+ // Map tools to OpenAI format
2314
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2315
+ type: type,
2316
+ function: {
2317
+ name: f.name,
2318
+ description: f.description ?? "", // Cohere API requires description
2319
+ parameters: f.parameters,
2320
+ },
2321
+ }));
2322
+ const completion = await cohere.chat.completions.create({
2323
+ model: this.contextService.context.model,
2324
+ messages: messages,
2325
+ tools: formattedTools?.length ? formattedTools : undefined,
2326
+ tool_choice: "auto",
2327
+ });
2328
+ const { choices: [{ message: { content, role, tool_calls }, },], } = completion;
2329
+ // Emit events to mimic streaming behavior
2330
+ if (content) {
2331
+ await event(clientId, "llm-completion", {
2332
+ content: content.trim(),
2333
+ agentName,
2334
+ });
2335
+ }
2336
+ const result = {
2337
+ content: content || "",
2338
+ mode,
2339
+ agentName,
2340
+ role,
2341
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2342
+ ...rest,
2343
+ function: {
2344
+ name: f.name,
2345
+ arguments: JSON.parse(f.arguments),
2346
+ },
2347
+ })),
2348
+ };
2349
+ // Debug logging
2350
+ if (CC_ENABLE_DEBUG) {
2351
+ await fs.appendFile("./debug_cohere_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2352
+ }
2353
+ return result;
2354
+ }
2355
+ async getOutlineCompletion(params) {
2356
+ const { messages: rawMessages, format } = params;
2357
+ const cohere = getCohere();
2358
+ this.logger.log("cohereProvider getOutlineCompletion", {
2359
+ context: this.contextService.context,
2360
+ });
2361
+ // Filter and sort messages like GPT5Provider
2362
+ const messages = rawMessages
2363
+ .filter(({ role }) => role === "user" || role === "assistant")
2364
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2365
+ role,
2366
+ tool_call_id,
2367
+ content,
2368
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2369
+ ...rest,
2370
+ function: {
2371
+ name: f.name,
2372
+ arguments: JSON.stringify(f.arguments),
2373
+ },
2374
+ })),
2375
+ }));
2376
+ const systemPrompt = rawMessages
2377
+ .filter(({ role }) => role === "system")
2378
+ .reduce((acm, { content }) => str.newline(acm, content), "");
2379
+ if (systemPrompt) {
2380
+ messages.unshift({
2381
+ role: "system",
2382
+ content: systemPrompt,
2383
+ });
2384
+ }
2385
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2386
+ // Cohere requires strict tool_calls -> tool_responses sequence
2387
+ // Merge consecutive user messages
2388
+ for (let i = messages.length - 1; i > 0; i--) {
2389
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2390
+ messages[i - 1].content = str.newline(messages[i - 1].content, messages[i].content);
2391
+ messages.splice(i, 1);
2392
+ }
2393
+ }
2394
+ // Extract response format like GPT5Provider
2395
+ const response_format = "json_schema" in format
2396
+ ? format
2397
+ : { type: "json_schema", json_schema: { schema: format } };
2398
+ const completion = await cohere.chat.completions.create({
2399
+ messages: messages,
2400
+ model: this.contextService.context.model,
2401
+ response_format: response_format,
2402
+ });
2403
+ const choice = completion.choices[0];
2404
+ if (choice.message.refusal) {
2405
+ throw new Error(choice.message.refusal);
2406
+ }
2407
+ const json = jsonrepair(choice.message.content || "");
2408
+ const result = {
2409
+ role: "assistant",
2410
+ content: json,
2411
+ };
2412
+ // Debug logging
2413
+ if (CC_ENABLE_DEBUG) {
2414
+ await fs.appendFile("./debug_cohere_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2415
+ }
2416
+ return result;
2417
+ }
2418
+ }
2419
+
2420
+ const MAX_ATTEMPTS = 3;
2421
+ const BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
2422
+ class AlibabaProvider {
2423
+ constructor(contextService, logger) {
2424
+ this.contextService = contextService;
2425
+ this.logger = logger;
2426
+ }
2427
+ async getCompletion(params) {
2428
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2429
+ this.logger.log("alibabaProvider getCompletion", {
2430
+ agentName,
2431
+ mode,
2432
+ clientId,
2433
+ context: this.contextService.context,
2434
+ });
2435
+ if (Array.isArray(this.contextService.context.apiKey)) {
2436
+ throw new Error("Alibaba provider does not support token rotation");
2437
+ }
2438
+ // Map raw messages to OpenAI format
2439
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2440
+ role,
2441
+ tool_call_id,
2442
+ content,
2443
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2444
+ ...rest,
2445
+ function: {
2446
+ name: f.name,
2447
+ arguments: JSON.stringify(f.arguments),
2448
+ },
2449
+ })),
2450
+ }));
2451
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2452
+ type: type,
2453
+ function: {
2454
+ name: f.name,
2455
+ parameters: f.parameters,
2456
+ },
2457
+ }));
2458
+ // Prepare request body with enable_thinking parameter
2459
+ const requestBody = {
2460
+ model: this.contextService.context.model,
2461
+ messages: messages,
2462
+ tools: formattedTools?.length ? formattedTools : undefined,
2463
+ enable_thinking: false,
2464
+ };
2465
+ // Use fetchApi from functools-kit
2466
+ const responseData = await fetchApi(`${BASE_URL}/chat/completions`, {
2467
+ method: "POST",
2468
+ headers: {
2469
+ "Content-Type": "application/json",
2470
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2471
+ },
2472
+ body: JSON.stringify(requestBody),
2473
+ });
2474
+ const { choices: [{ message: { content, role, tool_calls }, },], } = responseData;
2475
+ const result = {
2476
+ content: content,
2477
+ mode,
2478
+ agentName,
2479
+ role,
2480
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2481
+ ...rest,
2482
+ function: {
2483
+ name: f.name,
2484
+ arguments: JSON.parse(f.arguments),
2485
+ },
2486
+ })),
2487
+ };
2488
+ // Debug logging
2489
+ if (CC_ENABLE_DEBUG) {
2490
+ await fs.appendFile("./debug_alibaba_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2491
+ }
2492
+ return result;
2493
+ }
2494
+ async getStreamCompletion(params) {
2495
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2496
+ this.logger.log("alibabaProvider getStreamCompletion", {
2497
+ agentName,
2498
+ mode,
2499
+ clientId,
2500
+ context: this.contextService.context,
2501
+ });
2502
+ if (Array.isArray(this.contextService.context.apiKey)) {
2503
+ throw new Error("Alibaba provider does not support token rotation");
2504
+ }
2505
+ // Map raw messages to OpenAI format
2506
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2507
+ role,
2508
+ tool_call_id,
2509
+ content,
2510
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2511
+ ...rest,
2512
+ function: {
2513
+ name: f.name,
2514
+ arguments: JSON.stringify(f.arguments),
2515
+ },
2516
+ })),
2517
+ }));
2518
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2519
+ type: type,
2520
+ function: {
2521
+ name: f.name,
2522
+ parameters: f.parameters,
2523
+ },
2524
+ }));
2525
+ // Prepare request body with enable_thinking parameter
2526
+ const requestBody = {
2527
+ model: this.contextService.context.model,
2528
+ messages: messages,
2529
+ tools: formattedTools?.length ? formattedTools : undefined,
2530
+ enable_thinking: false,
2531
+ };
2532
+ // Use fetchApi from functools-kit
2533
+ const responseData = await fetchApi(`${BASE_URL}/chat/completions`, {
2534
+ method: "POST",
2535
+ headers: {
2536
+ "Content-Type": "application/json",
2537
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2538
+ },
2539
+ body: JSON.stringify(requestBody),
2540
+ });
2541
+ const { choices: [{ message: { content, role, tool_calls }, },], } = responseData;
2542
+ // Emit events to mimic streaming behavior
2543
+ if (content) {
2544
+ await event(clientId, "llm-completion", {
2545
+ content: content.trim(),
2546
+ agentName,
2547
+ });
2548
+ }
2549
+ const result = {
2550
+ content: content,
2551
+ mode,
2552
+ agentName,
2553
+ role,
2554
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2555
+ ...rest,
2556
+ function: {
2557
+ name: f.name,
2558
+ arguments: JSON.parse(f.arguments),
2559
+ },
2560
+ })),
2561
+ };
2562
+ // Debug logging
2563
+ if (CC_ENABLE_DEBUG) {
2564
+ await fs.appendFile("./debug_alibaba_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2565
+ }
2566
+ return result;
2567
+ }
2568
+ async getOutlineCompletion(params) {
2569
+ const { messages: rawMessages, format } = params;
2570
+ this.logger.log("alibabaProvider getOutlineCompletion", {
2571
+ context: this.contextService.context,
2572
+ });
2573
+ if (Array.isArray(this.contextService.context.apiKey)) {
2574
+ throw new Error("Alibaba provider does not support token rotation");
2575
+ }
2576
+ // Create tool definition based on format schema
2577
+ const schema = "json_schema" in format
2578
+ ? get(format, "json_schema.schema", format)
2579
+ : format;
2580
+ const toolDefinition = {
2581
+ type: "function",
2582
+ function: {
2583
+ name: "provide_answer",
2584
+ description: "Предоставить ответ в требуемом формате",
2585
+ parameters: schema,
2586
+ },
2587
+ };
2588
+ // Add system instruction for tool usage
2589
+ const systemMessage = {
2590
+ role: "system",
2591
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
2592
+ };
2593
+ const messages = [
2594
+ systemMessage,
2595
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2596
+ role,
2597
+ tool_call_id,
2598
+ content,
2599
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2600
+ ...rest,
2601
+ function: {
2602
+ name: f.name,
2603
+ arguments: JSON.stringify(f.arguments),
2604
+ },
2605
+ })),
2606
+ })),
2607
+ ];
2608
+ let attempt = 0;
2609
+ const addToolRequestMessage = singleshot(() => {
2610
+ messages.push({
2611
+ role: "user",
2612
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
2613
+ });
2614
+ });
2615
+ while (attempt < MAX_ATTEMPTS) {
2616
+ // Prepare request body with enable_thinking parameter
2617
+ const requestBody = {
2618
+ model: this.contextService.context.model,
2619
+ messages: messages,
2620
+ tools: [toolDefinition],
2621
+ tool_choice: {
2622
+ type: "function",
2623
+ function: { name: "provide_answer" },
2624
+ },
2625
+ enable_thinking: false,
2626
+ };
2627
+ // Use fetchApi from functools-kit
2628
+ const responseData = await fetchApi(`${BASE_URL}/chat/completions`, {
2629
+ method: "POST",
2630
+ headers: {
2631
+ "Content-Type": "application/json",
2632
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2633
+ },
2634
+ body: JSON.stringify(requestBody),
2635
+ });
2636
+ const { choices: [{ message }], } = responseData;
2637
+ const { refusal, tool_calls } = message;
2638
+ if (refusal) {
2639
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
2640
+ attempt++;
2641
+ continue;
2642
+ }
2643
+ if (!tool_calls?.length) {
2644
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
2645
+ addToolRequestMessage();
2646
+ attempt++;
2647
+ continue;
2648
+ }
2649
+ if (tool_calls && tool_calls.length > 0) {
2650
+ const toolCall = tool_calls[0];
2651
+ if (toolCall.function?.name === "provide_answer") {
2652
+ // Parse JSON with repair
2653
+ let parsedArguments;
2654
+ try {
2655
+ const json = jsonrepair(toolCall.function.arguments);
2656
+ parsedArguments = JSON.parse(json);
2657
+ }
2658
+ catch (error) {
2659
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
2660
+ addToolRequestMessage();
2661
+ attempt++;
2662
+ continue;
2663
+ }
2664
+ const validation = validateToolArguments(parsedArguments, schema);
2665
+ if (!validation.success) {
2666
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
2667
+ addToolRequestMessage();
2668
+ attempt++;
2669
+ continue;
2670
+ }
2671
+ set(validation.data, "_context", this.contextService.context);
2672
+ const result = {
2673
+ role: "assistant",
2674
+ content: JSON.stringify(validation.data),
2675
+ };
2676
+ // Debug logging
2677
+ if (CC_ENABLE_DEBUG) {
2678
+ await fs.appendFile("./debug_alibaba_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2679
+ }
2680
+ return result;
2681
+ }
2682
+ }
2683
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
2684
+ attempt++;
2685
+ }
2686
+ throw new Error("Model failed to use tool after maximum attempts");
2687
+ }
2688
+ }
2689
+
2690
+ const commonServices = {
2691
+ loggerService: inject(TYPES.loggerService),
2692
+ };
2693
+ const baseServices = {
2694
+ contextService: inject(TYPES.contextService),
2695
+ };
2696
+ const privateServices = {
2697
+ runnerPrivateService: inject(TYPES.runnerPrivateService),
2698
+ outlinePrivateService: inject(TYPES.outlinePrivateService),
2699
+ };
2700
+ const publicServices = {
2701
+ runnerPublicService: inject(TYPES.runnerPublicService),
2702
+ outlinePublicService: inject(TYPES.outlinePublicService),
2703
+ };
2704
+ const engine = {
2705
+ ...commonServices,
2706
+ ...baseServices,
2707
+ ...privateServices,
2708
+ ...publicServices,
2709
+ };
2710
+ init();
2711
+ {
2712
+ engine.runnerPrivateService.registerRunner(InferenceName.OllamaInference, OllamaProvider);
2713
+ engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider$1);
2714
+ engine.runnerPrivateService.registerRunner(InferenceName.HfInference, HfProvider);
2715
+ engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference, GrokProvider);
2716
+ engine.runnerPrivateService.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
2717
+ engine.runnerPrivateService.registerRunner(InferenceName.DeepseekInference, DeepseekProvider);
2718
+ engine.runnerPrivateService.registerRunner(InferenceName.MistralInference, MistralProvider);
2719
+ engine.runnerPrivateService.registerRunner(InferenceName.PerplexityInference, PerplexityProvider);
2720
+ engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
2721
+ engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
2722
+ }
2723
+ Object.assign(globalThis, { engine });
2724
+ var lib = engine;
2725
+
2726
+ addCompletion({
2727
+ completionName: CompletionName.RunnerOutlineCompletion,
2728
+ getCompletion: async (params) => {
2729
+ return await engine.runnerPrivateService.getOutlineCompletion(params);
2730
+ },
2731
+ json: true,
2732
+ });
2733
+
2734
+ addCompletion({
2735
+ completionName: CompletionName.RunnerStreamCompletion,
2736
+ getCompletion: async (params) => {
2737
+ return await engine.runnerPrivateService.getStreamCompletion(params);
2738
+ },
2739
+ });
2740
+
2741
+ addCompletion({
2742
+ completionName: CompletionName.RunnerCompletion,
2743
+ getCompletion: async (params) => {
2744
+ return await engine.runnerPrivateService.getCompletion(params);
2745
+ },
2746
+ });
2747
+
2748
+ const SignalSchema = z.object({
2749
+ position: z
2750
+ .enum(["long", "short", "wait"])
2751
+ .describe(str.newline("Position direction (ALWAYS required):", "long: market shows consistent bullish signals, uptrend or growth potential", "short: market shows consistent bearish signals, downtrend or decline potential", "wait: conflicting signals between timeframes OR unfavorable trading conditions")),
2752
+ price_open: z
2753
+ .number()
2754
+ .describe(str.newline("Position opening price in USD", "Use the current market price at the time of analysis")),
2755
+ price_stop_loss: z
2756
+ .number()
2757
+ .describe(str.newline("Stop-loss price in USD", "For LONG: price below price_open (protection against decline)", "For SHORT: price above price_open (protection against rise)", "NEVER set SL in 'empty space' without technical justification")),
2758
+ price_take_profit: z
2759
+ .number()
2760
+ .describe(str.newline("Take-profit price in USD", "For LONG: price above price_open (growth target)", "For SHORT: price below price_open (decline target)", "NEVER set TP based on trend without technical justification")),
2761
+ minute_estimated_time: z
2762
+ .number()
2763
+ .describe(str.newline("Estimated time to reach Take Profit in minutes", "Calculated based on HONEST technical analysis, using:", "ATR, ADX, MACD, Momentum, Slope and other metrics")),
2764
+ risk_note: z
2765
+ .string()
2766
+ .describe(str.newline("Description of current market situation risks:", "", "Analyze and specify applicable risks:", "1. Whale manipulations (volume spikes, long shadows, pin bars, candle engulfing, false breakouts)", "2. Order book (order book walls, spoofing, bid/ask imbalance, low liquidity)", "3. P&L history (recurring mistakes on similar patterns)", "4. Time factors (trading session, low liquidity, upcoming events)", "5. Correlations (overall market trend, conflicting trends across timeframes)", "6. Technical risks (indicator divergences, weak volumes, critical levels)", "7. Gaps and anomalies (price gaps, unfilled gaps, movements without volume)", "", "Provide SPECIFIC numbers, percentages and probabilities.")),
2767
+ });
2768
+
2769
+ addOutline({
2770
+ outlineName: OutlineName.SignalOutline,
2771
+ completion: CompletionName.RunnerOutlineCompletion,
2772
+ format: zodResponseFormat(SignalSchema, "position_open_decision"),
2773
+ getOutlineHistory: async ({ history, param: messages = [] }) => {
2774
+ await history.push(messages);
2775
+ },
2776
+ validations: [
2777
+ {
2778
+ validate: ({ data }) => {
2779
+ if (!data.position) {
2780
+ throw new Error("The position field is not filled");
2781
+ }
2782
+ },
2783
+ docDescription: "Validates that position direction (long/short/wait) is specified.",
2784
+ },
2785
+ {
2786
+ validate: ({ data }) => {
2787
+ if (!data.risk_note) {
2788
+ throw new Error("The risk_note field is not filled");
2789
+ }
2790
+ },
2791
+ docDescription: "Validates that risk description is provided.",
2792
+ },
2793
+ {
2794
+ validate: ({ data }) => {
2795
+ if (!data.price_open || data.price_open <= 0) {
2796
+ throw new Error("The price_open field must contain a positive price");
2797
+ }
2798
+ },
2799
+ docDescription: "Validates that opening price is specified and positive.",
2800
+ },
2801
+ {
2802
+ validate: ({ data }) => {
2803
+ if (data.position !== "wait" &&
2804
+ (!data.price_stop_loss || data.price_stop_loss <= 0)) {
2805
+ throw new Error("When position='long' or 'short', the price_stop_loss field is required and must be positive");
2806
+ }
2807
+ },
2808
+ docDescription: "Validates that stop-loss is specified when opening a position.",
2809
+ },
2810
+ {
2811
+ validate: ({ data }) => {
2812
+ if (data.position !== "wait" &&
2813
+ (!data.price_take_profit || data.price_take_profit <= 0)) {
2814
+ throw new Error("When position='long' or 'short', the price_take_profit field is required and must be positive");
2815
+ }
2816
+ },
2817
+ docDescription: "Validates that take-profit is specified when opening a position.",
2818
+ },
2819
+ {
2820
+ validate: ({ data }) => {
2821
+ if (data.position === "long") {
2822
+ if (data.price_stop_loss >= data.price_open) {
2823
+ throw new Error("For LONG position, price_stop_loss must be below price_open");
2824
+ }
2825
+ if (data.price_take_profit <= data.price_open) {
2826
+ throw new Error("For LONG position, price_take_profit must be above price_open");
2827
+ }
2828
+ }
2829
+ },
2830
+ docDescription: "Validates price correctness for LONG position.",
2831
+ },
2832
+ {
2833
+ validate: ({ data }) => {
2834
+ if (data.position === "short") {
2835
+ if (data.price_stop_loss <= data.price_open) {
2836
+ throw new Error("For SHORT position, price_stop_loss must be above price_open");
2837
+ }
2838
+ if (data.price_take_profit >= data.price_open) {
2839
+ throw new Error("For SHORT position, price_take_profit must be below price_open");
2840
+ }
2841
+ }
2842
+ },
2843
+ docDescription: "Validates price correctness for SHORT position.",
2844
+ },
2845
+ {
2846
+ validate: ({ data }) => {
2847
+ if (data.position !== "wait" &&
2848
+ (!data.minute_estimated_time || data.minute_estimated_time <= 0)) {
2849
+ throw new Error("When position='long' or 'short', the minute_estimated_time field is required and must be positive");
2850
+ }
2851
+ },
2852
+ docDescription: "Validates that estimated time to TP is specified when opening a position.",
2853
+ },
2854
+ {
2855
+ validate: ({ data }) => {
2856
+ if (data.position !== "wait" && data.minute_estimated_time > 360) {
2857
+ throw new Error("Estimated time to reach TP exceeds 6 hours (360 minutes). Use position='wait' for low volatility conditions");
2858
+ }
2859
+ },
2860
+ docDescription: "Validates that estimated time to reach TP does not exceed 6 hours.",
2861
+ },
2862
+ ],
2863
+ });
2864
+
2865
+ validate({
2866
+ CompletionName: CompletionName$1,
2867
+ OutlineName: OutlineName$1,
2868
+ });
2869
+
2870
+ const ollama = async (messages, model, apiKey) => {
2871
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.OllamaInference, model, apiKey);
2872
+ };
2873
+ const grok = async (messages, model, apiKey) => {
2874
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GrokInference, model, apiKey);
2875
+ };
2876
+ const hf = async (messages, model, apiKey) => {
2877
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.HfInference, model, apiKey);
2878
+ };
2879
+ const claude = async (messages, model, apiKey) => {
2880
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.ClaudeInference, model, apiKey);
2881
+ };
2882
+ const gpt5 = async (messages, model, apiKey) => {
2883
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GPT5Inference, model, apiKey);
2884
+ };
2885
+ const deepseek = async (messages, model, apiKey) => {
2886
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.DeepseekInference, model, apiKey);
2887
+ };
2888
+ const mistral = async (messages, model, apiKey) => {
2889
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.MistralInference, model, apiKey);
2890
+ };
2891
+ const perplexity = async (messages, model, apiKey) => {
2892
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.PerplexityInference, model, apiKey);
2893
+ };
2894
+ const cohere = async (messages, model, apiKey) => {
2895
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.CohereInference, model, apiKey);
2896
+ };
2897
+ const alibaba = async (messages, model, apiKey) => {
2898
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.AlibabaInference, model, apiKey);
2899
+ };
2900
+
2901
+ const setLogger = (logger) => {
2902
+ lib.loggerService.setLogger(logger);
2903
+ };
2904
+
2905
+ export { alibaba, claude, cohere, deepseek, gpt5, grok, hf, engine as lib, mistral, ollama, perplexity, setLogger };