@backtest-kit/ollama 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2918 @@
1
+ 'use strict';
2
+
3
+ var agentSwarmKit = require('agent-swarm-kit');
4
+ var diScoped = require('di-scoped');
5
+ var diKit = require('di-kit');
6
+ var MarkdownIt = require('markdown-it');
7
+ var sanitizeHtml = require('sanitize-html');
8
+ var promise = require('markdownlint/promise');
9
+ var markdownlint = require('markdownlint');
10
+ var functoolsKit = require('functools-kit');
11
+ var OpenAI = require('openai');
12
+ var messages = require('@langchain/core/messages');
13
+ var xai = require('@langchain/xai');
14
+ var jsonrepair = require('jsonrepair');
15
+ var fs = require('fs/promises');
16
+ var inference = require('@huggingface/inference');
17
+ var openai = require('@langchain/openai');
18
+ var lodashEs = require('lodash-es');
19
+ var ollama$1 = require('ollama');
20
+ var zod$1 = require('openai/helpers/zod');
21
+ var zod = require('zod');
22
+
23
+ var CompletionName;
24
+ (function (CompletionName) {
25
+ CompletionName["RunnerCompletion"] = "runner_completion";
26
+ CompletionName["RunnerStreamCompletion"] = "runner_stream_completion";
27
+ CompletionName["RunnerOutlineCompletion"] = "runner_outline_completion";
28
+ })(CompletionName || (CompletionName = {}));
29
+ var CompletionName$1 = CompletionName;
30
+
31
+ const ContextService = diScoped.scoped(class {
32
+ constructor(context) {
33
+ this.context = context;
34
+ }
35
+ });
36
+
37
+ const NOOP_LOGGER = {
38
+ log() {
39
+ },
40
+ debug() {
41
+ },
42
+ info() {
43
+ },
44
+ warn() {
45
+ },
46
+ };
47
+ class LoggerService {
48
+ constructor() {
49
+ this._commonLogger = NOOP_LOGGER;
50
+ this.log = async (topic, ...args) => {
51
+ await this._commonLogger.log(topic, ...args);
52
+ };
53
+ this.debug = async (topic, ...args) => {
54
+ await this._commonLogger.debug(topic, ...args);
55
+ };
56
+ this.info = async (topic, ...args) => {
57
+ await this._commonLogger.info(topic, ...args);
58
+ };
59
+ this.warn = async (topic, ...args) => {
60
+ await this._commonLogger.warn(topic, ...args);
61
+ };
62
+ this.setLogger = (logger) => {
63
+ this._commonLogger = logger;
64
+ };
65
+ }
66
+ }
67
+
68
+ const { provide, inject, init, override } = diKit.createActivator("ollama");
69
+
70
+ const commonServices$1 = {
71
+ loggerService: Symbol("loggerService"),
72
+ };
73
+ const baseServices$1 = {
74
+ contextService: Symbol('contextService'),
75
+ };
76
+ const privateServices$1 = {
77
+ runnerPrivateService: Symbol('runnerPrivateService'),
78
+ outlinePrivateService: Symbol('outlinePrivateService'),
79
+ };
80
+ const publicServices$1 = {
81
+ runnerPublicService: Symbol('runnerPublicService'),
82
+ outlinePublicService: Symbol('outlinePublicService'),
83
+ };
84
+ const TYPES = {
85
+ ...commonServices$1,
86
+ ...baseServices$1,
87
+ ...privateServices$1,
88
+ ...publicServices$1,
89
+ };
90
+
91
+ var OutlineName;
92
+ (function (OutlineName) {
93
+ OutlineName["SignalOutline"] = "signal_outline";
94
+ })(OutlineName || (OutlineName = {}));
95
+ var OutlineName$1 = OutlineName;
96
+
97
+ const toLintMarkdown = async (content) => {
98
+ if (!content) {
99
+ return "";
100
+ }
101
+ const { content: errors } = await promise.lint({ strings: { content } });
102
+ if (!errors.length) {
103
+ return content;
104
+ }
105
+ const value = markdownlint.applyFixes(content, errors);
106
+ return value ? value : content;
107
+ };
108
+ globalThis.toLintMarkdown = toLintMarkdown;
109
+
110
+ const toPlainString = async (content) => {
111
+ if (!content) {
112
+ return "";
113
+ }
114
+ const markdown = await toLintMarkdown(content);
115
+ const md = new MarkdownIt({
116
+ html: false,
117
+ breaks: true,
118
+ linkify: true,
119
+ typographer: true,
120
+ });
121
+ let telegramHtml = md.render(markdown);
122
+ telegramHtml = sanitizeHtml(telegramHtml, {
123
+ allowedTags: [
124
+ "b",
125
+ "i",
126
+ "a",
127
+ "code",
128
+ "pre",
129
+ "s",
130
+ "u",
131
+ "tg-spoiler",
132
+ "blockquote",
133
+ "br",
134
+ ],
135
+ allowedAttributes: {
136
+ a: ["href"],
137
+ },
138
+ transformTags: {
139
+ h1: "",
140
+ h2: "",
141
+ h3: "",
142
+ h4: "",
143
+ h5: "",
144
+ h6: "",
145
+ a: "",
146
+ strong: "",
147
+ em: "",
148
+ p: () => "",
149
+ ul: () => "",
150
+ li: () => "• ",
151
+ ol: () => "",
152
+ hr: () => "\n",
153
+ br: () => "\n",
154
+ div: () => "",
155
+ },
156
+ });
157
+ return telegramHtml.replaceAll(/\n[\s\n]*\n/g, "\n").trim();
158
+ };
159
+
160
+ class OutlinePrivateService {
161
+ constructor() {
162
+ this.loggerService = inject(TYPES.loggerService);
163
+ this.getCompletion = async (messages) => {
164
+ this.loggerService.log("outlinePrivateService getCompletion", {
165
+ messages,
166
+ });
167
+ const { data, resultId, error } = await agentSwarmKit.json(OutlineName$1.SignalOutline, messages);
168
+ if (error) {
169
+ throw new Error(error);
170
+ }
171
+ if (data.position === "wait") {
172
+ return null;
173
+ }
174
+ return {
175
+ id: resultId,
176
+ position: data.position,
177
+ minuteEstimatedTime: +data.minute_estimated_time,
178
+ priceStopLoss: +data.price_stop_loss,
179
+ priceTakeProfit: +data.price_take_profit,
180
+ note: await toPlainString(data.risk_note),
181
+ priceOpen: +data.price_open,
182
+ };
183
+ };
184
+ }
185
+ }
186
+
187
+ class RunnerPrivateService {
188
+ constructor() {
189
+ this.contextService = inject(TYPES.contextService);
190
+ this.loggerService = inject(TYPES.loggerService);
191
+ this._registry = new functoolsKit.ToolRegistry("runner_registry");
192
+ this.getRunner = functoolsKit.memoize(([inference]) => `${inference}`, (inference) => {
193
+ const Runner = this._registry.get(inference);
194
+ return new Runner(this.contextService, this.loggerService);
195
+ });
196
+ this.getCompletion = async (params) => {
197
+ this.loggerService.log("runnerPrivateService getCompletion");
198
+ const runner = this.getRunner(this.contextService.context.inference);
199
+ return await runner.getCompletion(params);
200
+ };
201
+ this.getStreamCompletion = async (params) => {
202
+ this.loggerService.log("runnerPrivateService getStreamCompletion");
203
+ const runner = this.getRunner(this.contextService.context.inference);
204
+ return await runner.getStreamCompletion(params);
205
+ };
206
+ this.getOutlineCompletion = async (params) => {
207
+ this.loggerService.log("runnerPrivateService getOutlineCompletion");
208
+ const runner = this.getRunner(this.contextService.context.inference);
209
+ return await runner.getOutlineCompletion(params);
210
+ };
211
+ this.registerRunner = (name, runner) => {
212
+ this._registry = this._registry.register(name, runner);
213
+ };
214
+ }
215
+ }
216
+
217
+ class OutlinePublicService {
218
+ constructor() {
219
+ this.loggerService = inject(TYPES.loggerService);
220
+ this.outlinePrivateService = inject(TYPES.outlinePrivateService);
221
+ this.getCompletion = async (messages, inference, model, apiKey) => {
222
+ this.loggerService.log("outlinePublicService getCompletion", {
223
+ messages,
224
+ model,
225
+ apiKey,
226
+ inference,
227
+ });
228
+ return await ContextService.runInContext(async () => {
229
+ return await this.outlinePrivateService.getCompletion(messages);
230
+ }, {
231
+ apiKey: apiKey,
232
+ inference,
233
+ model,
234
+ });
235
+ };
236
+ }
237
+ }
238
+
239
+ class RunnerPublicService {
240
+ constructor() {
241
+ this.runnerPrivateService = inject(TYPES.runnerPrivateService);
242
+ this.loggerService = inject(TYPES.loggerService);
243
+ this.getCompletion = async (params, context) => {
244
+ this.loggerService.log("runnerPublicService getCompletion");
245
+ return await ContextService.runInContext(async () => {
246
+ return await this.runnerPrivateService.getCompletion(params);
247
+ }, context);
248
+ };
249
+ this.getStreamCompletion = async (params, context) => {
250
+ this.loggerService.log("runnerPublicService getStreamCompletion");
251
+ return await ContextService.runInContext(async () => {
252
+ return await this.runnerPrivateService.getStreamCompletion(params);
253
+ }, context);
254
+ };
255
+ this.getOutlineCompletion = async (params, context) => {
256
+ this.loggerService.log("runnerPublicService getOutlineCompletion");
257
+ return await ContextService.runInContext(async () => {
258
+ return await this.runnerPrivateService.getOutlineCompletion(params);
259
+ }, context);
260
+ };
261
+ }
262
+ }
263
+
264
+ {
265
+ provide(TYPES.loggerService, () => new LoggerService());
266
+ }
267
+ {
268
+ provide(TYPES.contextService, () => new ContextService());
269
+ }
270
+ {
271
+ provide(TYPES.runnerPrivateService, () => new RunnerPrivateService());
272
+ provide(TYPES.outlinePrivateService, () => new OutlinePrivateService());
273
+ }
274
+ {
275
+ provide(TYPES.runnerPublicService, () => new RunnerPublicService());
276
+ provide(TYPES.outlinePublicService, () => new OutlinePublicService());
277
+ }
278
+
279
+ var InferenceName;
280
+ (function (InferenceName) {
281
+ InferenceName["OllamaInference"] = "ollama_inference";
282
+ InferenceName["GrokInference"] = "grok_inference";
283
+ InferenceName["HfInference"] = "hf_inference";
284
+ InferenceName["ClaudeInference"] = "claude_inference";
285
+ InferenceName["GPT5Inference"] = "gpt5_inference";
286
+ InferenceName["DeepseekInference"] = "deepseek_inference";
287
+ InferenceName["MistralInference"] = "mistral_inference";
288
+ InferenceName["PerplexityInference"] = "perplexity_inference";
289
+ InferenceName["CohereInference"] = "cohere_inference";
290
+ InferenceName["AlibabaInference"] = "alibaba_inference";
291
+ })(InferenceName || (InferenceName = {}));
292
+ var InferenceName$1 = InferenceName;
293
+
294
+ const getGrok = functoolsKit.singleshot(() => {
295
+ const apiKey = lib.contextService.context.apiKey;
296
+ if (Array.isArray(apiKey)) {
297
+ getGrok.clear();
298
+ throw new Error("Grok provider does not support token rotation");
299
+ }
300
+ return new OpenAI({
301
+ baseURL: "https://api.x.ai/v1",
302
+ apiKey: apiKey,
303
+ });
304
+ });
305
+
306
+ const CC_ENABLE_DEBUG = "CC_ENABLE_DEBUG" in process.env ? !!parseInt(process.env.CC_ENABLE_DEBUG) : false;
307
+
308
+ class CustomChat extends xai.ChatXAI {
309
+ async getNumTokens(content) {
310
+ if (typeof content !== "string") {
311
+ return 0;
312
+ }
313
+ return Math.ceil(content.length / 4);
314
+ }
315
+ }
316
+ const getChat$1 = (model, apiKey) => new CustomChat({
317
+ apiKey,
318
+ model,
319
+ streaming: true,
320
+ });
321
+ let GrokProvider$1 = class GrokProvider {
322
+ constructor(contextService, logger) {
323
+ this.contextService = contextService;
324
+ this.logger = logger;
325
+ }
326
+ async getCompletion(params) {
327
+ const grok = getGrok();
328
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
329
+ this.logger.log("grokProvider getCompletion", {
330
+ agentName,
331
+ mode,
332
+ clientId,
333
+ context: this.contextService.context,
334
+ });
335
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
336
+ role,
337
+ tool_call_id,
338
+ content,
339
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
340
+ ...rest,
341
+ function: {
342
+ name: f.name,
343
+ arguments: JSON.stringify(f.arguments),
344
+ },
345
+ })),
346
+ }));
347
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await grok.chat.completions.create({
348
+ model: this.contextService.context.model,
349
+ messages: messages,
350
+ tools: tools,
351
+ response_format: {
352
+ type: "text",
353
+ },
354
+ });
355
+ const result = {
356
+ content: content,
357
+ mode,
358
+ agentName,
359
+ role,
360
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
361
+ ...rest,
362
+ function: {
363
+ name: f.name,
364
+ arguments: JSON.parse(f.arguments),
365
+ },
366
+ })),
367
+ };
368
+ // Debug logging
369
+ if (CC_ENABLE_DEBUG) {
370
+ await fs.appendFile("./debug_grok_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
371
+ }
372
+ return result;
373
+ }
374
+ async getStreamCompletion(params) {
375
+ if (Array.isArray(this.contextService.context.apiKey)) {
376
+ throw new Error("Grok provider does not support token rotation");
377
+ }
378
+ const chat = getChat$1(this.contextService.context.model, this.contextService.context.apiKey);
379
+ const { agentName, messages: rawMessages, mode, tools: rawTools, clientId, } = params;
380
+ this.logger.log("grokProvider getStreamCompletion", {
381
+ agentName,
382
+ mode,
383
+ clientId,
384
+ context: this.contextService.context,
385
+ });
386
+ // Validate and format tools
387
+ const tools = rawTools?.map(({ type, function: f }) => ({
388
+ type: "function",
389
+ function: {
390
+ name: f.name,
391
+ description: f.description || "",
392
+ parameters: f.parameters || { type: "object", properties: {} },
393
+ },
394
+ }));
395
+ // Bind tools to chat instance if tools are provided
396
+ const chatInstance = tools?.length ? chat.bindTools(tools) : chat;
397
+ // Map raw messages to LangChain messages
398
+ const messages$1 = rawMessages.map(({ role, tool_calls, tool_call_id, content }) => {
399
+ if (role === "assistant") {
400
+ return new messages.AIMessage({
401
+ content,
402
+ tool_calls: tool_calls?.map(({ function: f, id }) => ({
403
+ id: id || functoolsKit.randomString(),
404
+ name: f.name,
405
+ args: f.arguments,
406
+ })),
407
+ });
408
+ }
409
+ if (role === "system") {
410
+ return new messages.SystemMessage({ content });
411
+ }
412
+ if (role === "user") {
413
+ return new messages.HumanMessage({ content });
414
+ }
415
+ if (role === "developer") {
416
+ return new messages.SystemMessage({ content });
417
+ }
418
+ if (role === "tool") {
419
+ return new messages.ToolMessage({
420
+ tool_call_id: tool_call_id || functoolsKit.randomString(),
421
+ content,
422
+ });
423
+ }
424
+ throw new Error(`Unsupported message role: ${role}`);
425
+ });
426
+ let textContent = "";
427
+ let toolCalls = [];
428
+ // Handle streaming response
429
+ const stream = await chatInstance.stream(messages$1);
430
+ // Aggregate tool calls and content from stream, emit chunks
431
+ for await (const chunk of stream) {
432
+ if (chunk.content) {
433
+ textContent += chunk.content;
434
+ await agentSwarmKit.event(clientId, "llm-new-token", chunk.content); // Emit content chunk
435
+ }
436
+ if (chunk.tool_calls?.length) {
437
+ toolCalls = [...toolCalls, ...chunk.tool_calls];
438
+ }
439
+ }
440
+ // Process content if it's an array of parts
441
+ const finalContent = Array.isArray(textContent)
442
+ ? textContent
443
+ .filter((part) => part.type === "text")
444
+ .map((c) => c.text)
445
+ .join("")
446
+ : textContent;
447
+ await agentSwarmKit.event(clientId, "llm-completion", {
448
+ content: finalContent.trim(),
449
+ agentName,
450
+ });
451
+ // Format tool calls for return
452
+ const formattedToolCalls = toolCalls.map(({ name, id, args }) => ({
453
+ id: id || functoolsKit.randomString(),
454
+ type: "function",
455
+ function: {
456
+ name,
457
+ arguments: args,
458
+ },
459
+ }));
460
+ const result = {
461
+ content: finalContent,
462
+ mode,
463
+ agentName,
464
+ role: "assistant",
465
+ tool_calls: formattedToolCalls,
466
+ };
467
+ // Debug logging
468
+ if (CC_ENABLE_DEBUG) {
469
+ await fs.appendFile("./debug_grok_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
470
+ }
471
+ return result;
472
+ }
473
+ async getOutlineCompletion(params) {
474
+ const { messages: rawMessages, format } = params;
475
+ this.logger.log("grokProvider getOutlineCompletion", {
476
+ context: this.contextService.context,
477
+ });
478
+ if (Array.isArray(this.contextService.context.apiKey)) {
479
+ throw new Error("Grok provider does not support token rotation");
480
+ }
481
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
482
+ role,
483
+ tool_call_id,
484
+ content,
485
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
486
+ ...rest,
487
+ function: {
488
+ name: f.name,
489
+ arguments: JSON.stringify(f.arguments),
490
+ },
491
+ })),
492
+ }));
493
+ const { choices: [{ message: { refusal, content }, },], } = await functoolsKit.fetchApi("https://api.x.ai/v1/chat/completions", {
494
+ method: "POST",
495
+ headers: {
496
+ "Content-Type": "application/json",
497
+ Authorization: `Bearer ${this.contextService.context.apiKey}`,
498
+ },
499
+ body: JSON.stringify({
500
+ messages,
501
+ context: this.contextService.context,
502
+ max_tokens: 5000,
503
+ response_format: format,
504
+ }),
505
+ });
506
+ if (refusal) {
507
+ throw new Error(refusal);
508
+ }
509
+ const json = jsonrepair.jsonrepair(content);
510
+ const result = {
511
+ role: "assistant",
512
+ content: json,
513
+ };
514
+ // Debug logging
515
+ if (CC_ENABLE_DEBUG) {
516
+ await fs.appendFile("./debug_grok_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
517
+ }
518
+ return result;
519
+ }
520
+ };
521
+
522
+ const MAX_ATTEMPTS$5 = 5;
523
+ class HuggingFaceChat extends openai.ChatOpenAI {
524
+ async getNumTokens(content) {
525
+ if (typeof content !== "string") {
526
+ return 0;
527
+ }
528
+ return Math.ceil(content.length / 4);
529
+ }
530
+ }
531
+ const getChat = (model, apiKey) => new HuggingFaceChat({
532
+ configuration: {
533
+ baseURL: "https://router.huggingface.co/v1",
534
+ apiKey,
535
+ },
536
+ model,
537
+ streaming: true,
538
+ });
539
+ const getInference = (apiKey) => new inference.InferenceClient(apiKey);
540
+ class HfProvider {
541
+ constructor(contextService, logger) {
542
+ this.contextService = contextService;
543
+ this.logger = logger;
544
+ }
545
+ async getCompletion(params) {
546
+ if (Array.isArray(this.contextService.context.apiKey)) {
547
+ throw new Error("Hf provider does not support token rotation");
548
+ }
549
+ const inference = getInference(this.contextService.context.apiKey);
550
+ const { agentName, clientId, messages: rawMessages, mode, tools: rawTools } = params;
551
+ this.logger.log("hfProvider getCompletion", {
552
+ agentName,
553
+ mode,
554
+ clientId,
555
+ context: this.contextService.context,
556
+ });
557
+ const messages = rawMessages.map(({ role, content, tool_calls, tool_call_id }) => {
558
+ if (role === "tool") {
559
+ return {
560
+ role: "tool",
561
+ content,
562
+ tool_call_id: tool_call_id,
563
+ };
564
+ }
565
+ if (role === "assistant" && tool_calls) {
566
+ return {
567
+ role: "assistant",
568
+ content,
569
+ tool_calls: tool_calls.map((tc) => ({
570
+ id: tc.id,
571
+ type: tc.type,
572
+ function: {
573
+ name: tc.function.name,
574
+ arguments: typeof tc.function.arguments === "string"
575
+ ? tc.function.arguments
576
+ : JSON.stringify(tc.function.arguments),
577
+ },
578
+ })),
579
+ };
580
+ }
581
+ return {
582
+ role: role,
583
+ content,
584
+ };
585
+ });
586
+ const tools = rawTools?.map(({ function: f }) => ({
587
+ type: "function",
588
+ function: {
589
+ name: f.name,
590
+ description: f.description,
591
+ parameters: f.parameters,
592
+ },
593
+ }));
594
+ const completion = await inference.chatCompletion({
595
+ model: this.contextService.context.model,
596
+ messages,
597
+ ...(tools && { tools }),
598
+ });
599
+ const choice = completion.choices[0];
600
+ const text = choice.message.content || "";
601
+ const tool_calls = choice.message.tool_calls || [];
602
+ const result = {
603
+ content: text,
604
+ mode,
605
+ agentName: agentName,
606
+ role: "assistant",
607
+ tool_calls: tool_calls.map(({ id, type, function: f }) => ({
608
+ id: id,
609
+ type: type,
610
+ function: {
611
+ name: f.name,
612
+ arguments: typeof f.arguments === "string"
613
+ ? JSON.parse(f.arguments)
614
+ : f.arguments,
615
+ },
616
+ })),
617
+ };
618
+ // Debug logging
619
+ if (CC_ENABLE_DEBUG) {
620
+ await fs.appendFile("./debug_hf_provider.txt", JSON.stringify({
621
+ params,
622
+ answer: result,
623
+ }, null, 2) + "\n\n");
624
+ }
625
+ return result;
626
+ }
627
+ async getStreamCompletion(params) {
628
+ if (Array.isArray(this.contextService.context.apiKey)) {
629
+ throw new Error("Hf provider does not support token rotation");
630
+ }
631
+ const chat = getChat(this.contextService.context.model, this.contextService.context.apiKey);
632
+ const { agentName, messages: rawMessages, mode, tools: rawTools, clientId, } = params;
633
+ this.logger.log("hfProvider getStreamCompletion", {
634
+ agentName,
635
+ mode,
636
+ clientId,
637
+ context: this.contextService.context,
638
+ });
639
+ const tools = rawTools?.map(({ type, function: f }) => ({
640
+ type: type,
641
+ function: {
642
+ name: f.name,
643
+ parameters: f.parameters,
644
+ },
645
+ }));
646
+ const chatInstance = tools ? chat.bindTools(tools) : chat;
647
+ const { content, tool_calls } = await chatInstance.invoke(rawMessages.map(({ role, tool_calls, tool_call_id, content }) => {
648
+ if (role === "assistant") {
649
+ return new messages.AIMessage({
650
+ tool_calls: tool_calls?.map(({ function: f, id }) => ({
651
+ id: id,
652
+ name: f.name,
653
+ args: f.arguments,
654
+ })),
655
+ content,
656
+ });
657
+ }
658
+ if (role === "system") {
659
+ return new messages.SystemMessage({
660
+ content,
661
+ });
662
+ }
663
+ if (role === "user") {
664
+ return new messages.HumanMessage({
665
+ content,
666
+ });
667
+ }
668
+ if (role === "developer") {
669
+ return new messages.SystemMessage({
670
+ content,
671
+ });
672
+ }
673
+ if (role === "tool") {
674
+ return new messages.ToolMessage({
675
+ tool_call_id: tool_call_id,
676
+ content,
677
+ });
678
+ }
679
+ return "";
680
+ }), {
681
+ callbacks: [
682
+ {
683
+ handleLLMNewToken(token) {
684
+ agentSwarmKit.event(clientId, "llm-new-token", token);
685
+ },
686
+ },
687
+ ],
688
+ });
689
+ const text = typeof content === "string"
690
+ ? content
691
+ : content
692
+ .filter((part) => part.type === "text")
693
+ .map((c) => c.text)
694
+ .join("");
695
+ await agentSwarmKit.event(clientId, "llm-completion", {
696
+ content: text.trim(),
697
+ agentName,
698
+ });
699
+ const result = {
700
+ content: text,
701
+ mode,
702
+ agentName,
703
+ role: "assistant",
704
+ tool_calls: tool_calls?.map(({ name, id, args }) => ({
705
+ id: id ?? functoolsKit.randomString(),
706
+ type: "function",
707
+ function: {
708
+ name,
709
+ arguments: args,
710
+ },
711
+ })),
712
+ };
713
+ // Debug logging
714
+ if (CC_ENABLE_DEBUG) {
715
+ await fs.appendFile("./debug_hf_provider_stream.txt", JSON.stringify({
716
+ params,
717
+ answer: result,
718
+ }, null, 2) + "\n\n");
719
+ }
720
+ return result;
721
+ }
722
+ async getOutlineCompletion(params) {
723
+ const { messages: rawMessages, format } = params;
724
+ this.logger.log("hfProvider getOutlineCompletion", {
725
+ context: this.contextService.context,
726
+ });
727
+ if (Array.isArray(this.contextService.context.apiKey)) {
728
+ throw new Error("Hf provider does not support token rotation");
729
+ }
730
+ // Create tool definition based on format schema
731
+ const schema = "json_schema" in format
732
+ ? lodashEs.get(format, "json_schema.schema", format)
733
+ : format;
734
+ const toolDefinition = {
735
+ type: "function",
736
+ function: {
737
+ name: "provide_answer",
738
+ description: "Предоставить ответ в требуемом формате",
739
+ parameters: schema,
740
+ },
741
+ };
742
+ // Add system instruction for tool usage
743
+ const systemMessage = {
744
+ role: "system",
745
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
746
+ };
747
+ const messages = [
748
+ systemMessage,
749
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
750
+ role,
751
+ tool_call_id,
752
+ content,
753
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
754
+ ...rest,
755
+ function: {
756
+ name: f.name,
757
+ arguments: JSON.stringify(f.arguments),
758
+ },
759
+ })),
760
+ })),
761
+ ];
762
+ let attempt = 0;
763
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
764
+ messages.push({
765
+ role: "user",
766
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
767
+ });
768
+ });
769
+ while (attempt < MAX_ATTEMPTS$5) {
770
+ const { choices: [{ message }], } = await functoolsKit.fetchApi("https://router.huggingface.co/v1/chat/completions", {
771
+ method: "POST",
772
+ headers: {
773
+ "Content-Type": "application/json",
774
+ Authorization: `Bearer ${this.contextService.context.apiKey}`,
775
+ },
776
+ body: JSON.stringify({
777
+ messages,
778
+ model: this.contextService.context.model,
779
+ tools: [toolDefinition],
780
+ tool_choice: {
781
+ type: "function",
782
+ function: { name: "provide_answer" },
783
+ },
784
+ }),
785
+ });
786
+ const { refusal, tool_calls, reasoning_content } = message;
787
+ if (refusal) {
788
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
789
+ attempt++;
790
+ continue;
791
+ }
792
+ if (!tool_calls?.length) {
793
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
794
+ addToolRequestMessage();
795
+ attempt++;
796
+ continue;
797
+ }
798
+ if (tool_calls && tool_calls.length > 0) {
799
+ const toolCall = tool_calls[0];
800
+ if (toolCall.function?.name === "provide_answer") {
801
+ // Parse JSON with repair
802
+ let parsedArguments;
803
+ try {
804
+ const json = jsonrepair.jsonrepair(toolCall.function.arguments);
805
+ parsedArguments = JSON.parse(json);
806
+ }
807
+ catch (error) {
808
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
809
+ addToolRequestMessage();
810
+ attempt++;
811
+ continue;
812
+ }
813
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
814
+ if (!validation.success) {
815
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
816
+ addToolRequestMessage();
817
+ attempt++;
818
+ continue;
819
+ }
820
+ lodashEs.set(validation.data, "_thinking", reasoning_content);
821
+ lodashEs.set(validation.data, "_context", this.contextService.context);
822
+ const result = {
823
+ role: "assistant",
824
+ content: JSON.stringify(validation.data),
825
+ };
826
+ // Debug logging
827
+ if (CC_ENABLE_DEBUG) {
828
+ await fs.appendFile("./debug_hf_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
829
+ }
830
+ return result;
831
+ }
832
+ }
833
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
834
+ attempt++;
835
+ }
836
+ throw new Error("Model failed to use tool after maximum attempts");
837
+ }
838
+ }
839
+
840
+ class OllamaWrapper {
841
+ constructor(_config) {
842
+ this._config = _config;
843
+ this._chatFn = agentSwarmKit.RoundRobin.create(lib.contextService.context.apiKey, (token) => {
844
+ const ollama = new ollama$1.Ollama({
845
+ ...this._config,
846
+ headers: {
847
+ Authorization: `Bearer ${token}`,
848
+ },
849
+ });
850
+ return async (request) => {
851
+ if (request.stream === true) {
852
+ return await ollama.chat(request);
853
+ }
854
+ else {
855
+ return await ollama.chat(request);
856
+ }
857
+ };
858
+ });
859
+ if (!lib.contextService.context.apiKey) {
860
+ throw new Error("OllamaRotate required apiKey[] to process token rotation");
861
+ }
862
+ }
863
+ async chat(request) {
864
+ return await this._chatFn(request);
865
+ }
866
+ }
867
+ const getOllamaRotate = functoolsKit.singleshot(() => new OllamaWrapper({
868
+ host: "https://ollama.com",
869
+ }));
870
+
871
+ const getOllama = functoolsKit.singleshot(() => {
872
+ const apiKey = lib.contextService.context.apiKey;
873
+ if (Array.isArray(apiKey)) {
874
+ return getOllamaRotate();
875
+ }
876
+ if (!apiKey) {
877
+ return new ollama$1.Ollama();
878
+ }
879
+ return new ollama$1.Ollama({
880
+ host: "https://ollama.com",
881
+ headers: {
882
+ Authorization: `Bearer ${apiKey}`,
883
+ },
884
+ });
885
+ });
886
+
887
+ const MAX_ATTEMPTS$4 = 3;
888
+ class OllamaProvider {
889
+ constructor(contextService, logger) {
890
+ this.contextService = contextService;
891
+ this.logger = logger;
892
+ }
893
+ async getCompletion(params) {
894
+ const { agentName, messages: rawMessages, mode, tools, clientId } = params;
895
+ const ollama = getOllama();
896
+ this.logger.log("ollamaProvider getCompletion", {
897
+ agentName,
898
+ mode,
899
+ clientId,
900
+ context: this.contextService.context,
901
+ });
902
+ const messages = [...rawMessages];
903
+ const response = await ollama.chat({
904
+ model: this.contextService.context.model,
905
+ messages: messages.map((message) => ({
906
+ content: message.content,
907
+ role: message.role,
908
+ tool_calls: message.tool_calls?.map((call) => ({
909
+ function: call.function,
910
+ })),
911
+ })),
912
+ tools,
913
+ });
914
+ const message = response.message;
915
+ const result = {
916
+ ...message,
917
+ tool_calls: response.message.tool_calls?.map((call) => ({
918
+ function: call.function,
919
+ type: "function",
920
+ id: functoolsKit.randomString(),
921
+ })),
922
+ mode,
923
+ agentName,
924
+ role: response.message.role,
925
+ };
926
+ // Debug logging
927
+ if (CC_ENABLE_DEBUG) {
928
+ await fs.appendFile("./debug_ollama_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
929
+ }
930
+ return result;
931
+ }
932
+ async getStreamCompletion(params) {
933
+ const { agentName, messages: rawMessages, mode, tools, clientId } = params;
934
+ const ollama = getOllama();
935
+ this.logger.log("ollamaProvider getStreamCompletion", {
936
+ agentName,
937
+ mode,
938
+ clientId,
939
+ context: this.contextService.context,
940
+ });
941
+ const messages = rawMessages.map((message) => ({
942
+ content: message.content,
943
+ role: message.role,
944
+ tool_calls: message.tool_calls?.map((call) => ({
945
+ function: call.function,
946
+ })),
947
+ }));
948
+ let content = "";
949
+ let toolCalls = [];
950
+ // Stream the response
951
+ const stream = await ollama.chat({
952
+ model: this.contextService.context.model,
953
+ messages,
954
+ tools,
955
+ stream: true,
956
+ });
957
+ for await (const chunk of stream) {
958
+ if (chunk.message.tool_calls) {
959
+ // Accumulate tool calls
960
+ for (const tool of chunk.message.tool_calls) {
961
+ toolCalls.push(tool);
962
+ }
963
+ }
964
+ else if (chunk.message.content) {
965
+ // Stream content tokens
966
+ content += chunk.message.content;
967
+ await agentSwarmKit.event(clientId, "llm-new-token", chunk.message.content);
968
+ }
969
+ }
970
+ // Send completion event
971
+ await agentSwarmKit.event(clientId, "llm-completion", {
972
+ content: content.trim(),
973
+ agentName,
974
+ });
975
+ const result = {
976
+ content,
977
+ mode,
978
+ agentName,
979
+ role: "assistant",
980
+ tool_calls: toolCalls.map((call) => ({
981
+ function: call.function,
982
+ type: "function",
983
+ id: functoolsKit.randomString(),
984
+ })),
985
+ };
986
+ // Debug logging
987
+ if (CC_ENABLE_DEBUG) {
988
+ await fs.appendFile("./debug_ollama_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
989
+ }
990
+ return result;
991
+ }
992
+ async getOutlineCompletion(params) {
993
+ const { messages: rawMessages, format } = params;
994
+ const ollama = getOllama();
995
+ this.logger.log("ollamaProvider getOutlineCompletion", {
996
+ context: this.contextService.context,
997
+ });
998
+ // Create tool definition based on format schema
999
+ const schema = "json_schema" in format
1000
+ ? lodashEs.get(format, "json_schema.schema", format)
1001
+ : format;
1002
+ const toolDefinition = {
1003
+ type: "function",
1004
+ function: {
1005
+ name: "provide_answer",
1006
+ description: "Предоставить ответ в требуемом формате",
1007
+ parameters: schema,
1008
+ },
1009
+ };
1010
+ // Add system instruction for tool usage
1011
+ const systemMessage = {
1012
+ role: "system",
1013
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1014
+ };
1015
+ const messages = [
1016
+ systemMessage,
1017
+ ...rawMessages.map(({ role, content }) => ({
1018
+ role,
1019
+ content,
1020
+ })),
1021
+ ];
1022
+ let attempt = 0;
1023
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
1024
+ messages.push({
1025
+ role: "user",
1026
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1027
+ });
1028
+ });
1029
+ while (attempt < MAX_ATTEMPTS$4) {
1030
+ const response = await ollama.chat({
1031
+ model: this.contextService.context.model,
1032
+ messages,
1033
+ tools: [toolDefinition],
1034
+ });
1035
+ const { tool_calls } = response.message;
1036
+ if (!tool_calls?.length) {
1037
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1038
+ addToolRequestMessage();
1039
+ attempt++;
1040
+ continue;
1041
+ }
1042
+ if (tool_calls && tool_calls.length > 0) {
1043
+ const toolCall = tool_calls[0];
1044
+ if (toolCall.function?.name === "provide_answer") {
1045
+ // Parse JSON with repair
1046
+ let parsedArguments;
1047
+ try {
1048
+ const argumentsString = typeof toolCall.function.arguments === 'string'
1049
+ ? toolCall.function.arguments
1050
+ : JSON.stringify(toolCall.function.arguments);
1051
+ const json = jsonrepair.jsonrepair(argumentsString);
1052
+ parsedArguments = JSON.parse(json);
1053
+ }
1054
+ catch (error) {
1055
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1056
+ addToolRequestMessage();
1057
+ attempt++;
1058
+ continue;
1059
+ }
1060
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
1061
+ if (!validation.success) {
1062
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1063
+ addToolRequestMessage();
1064
+ attempt++;
1065
+ continue;
1066
+ }
1067
+ lodashEs.set(validation.data, "_context", this.contextService.context);
1068
+ const result = {
1069
+ role: "assistant",
1070
+ content: JSON.stringify(validation.data),
1071
+ };
1072
+ // Debug logging
1073
+ if (CC_ENABLE_DEBUG) {
1074
+ await fs.appendFile("./debug_ollama_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1075
+ }
1076
+ return result;
1077
+ }
1078
+ }
1079
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1080
+ attempt++;
1081
+ }
1082
+ throw new Error("Model failed to use tool after maximum attempts");
1083
+ }
1084
+ }
1085
+
1086
+ const getClaude = functoolsKit.singleshot(() => {
1087
+ const apiKey = lib.contextService.context.apiKey;
1088
+ if (Array.isArray(apiKey)) {
1089
+ getClaude.clear();
1090
+ throw new Error("Claude provider does not support token rotation");
1091
+ }
1092
+ return new OpenAI({
1093
+ baseURL: "https://api.anthropic.com/v1/",
1094
+ apiKey,
1095
+ });
1096
+ });
1097
+
1098
+ const MAX_ATTEMPTS$3 = 5;
1099
+ class GrokProvider {
1100
+ constructor(contextService, logger) {
1101
+ this.contextService = contextService;
1102
+ this.logger = logger;
1103
+ }
1104
+ async getCompletion(params) {
1105
+ const claude = getClaude();
1106
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1107
+ this.logger.log("claudeProvider getCompletion", {
1108
+ agentName,
1109
+ mode,
1110
+ clientId,
1111
+ context: this.contextService.context,
1112
+ });
1113
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1114
+ role,
1115
+ tool_call_id,
1116
+ content,
1117
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1118
+ ...rest,
1119
+ function: {
1120
+ name: f.name,
1121
+ arguments: JSON.stringify(f.arguments),
1122
+ },
1123
+ })),
1124
+ }));
1125
+ // Prepare request options
1126
+ const requestOptions = {
1127
+ model: this.contextService.context.model,
1128
+ messages: messages,
1129
+ response_format: {
1130
+ type: "text",
1131
+ },
1132
+ };
1133
+ // Only add tools if they exist and have at least one item
1134
+ if (tools && tools.length > 0) {
1135
+ requestOptions.tools = tools;
1136
+ }
1137
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await claude.chat.completions.create(requestOptions);
1138
+ const result = {
1139
+ content: content,
1140
+ mode,
1141
+ agentName,
1142
+ role,
1143
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1144
+ ...rest,
1145
+ function: {
1146
+ name: f.name,
1147
+ arguments: JSON.parse(f.arguments),
1148
+ },
1149
+ })),
1150
+ };
1151
+ // Debug logging
1152
+ if (CC_ENABLE_DEBUG) {
1153
+ await fs.appendFile("./debug_claude_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1154
+ }
1155
+ return result;
1156
+ }
1157
+ async getStreamCompletion(params) {
1158
+ const openai = getClaude();
1159
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1160
+ this.logger.log("claudeProvider getStreamCompletion", {
1161
+ agentName,
1162
+ mode,
1163
+ clientId,
1164
+ context: this.contextService.context,
1165
+ });
1166
+ // Map raw messages to OpenAI format
1167
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1168
+ role,
1169
+ tool_call_id,
1170
+ content,
1171
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1172
+ ...rest,
1173
+ function: {
1174
+ name: f.name,
1175
+ arguments: JSON.stringify(f.arguments),
1176
+ },
1177
+ })),
1178
+ }));
1179
+ // Map tools to OpenAI format
1180
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1181
+ type: type,
1182
+ function: {
1183
+ name: f.name,
1184
+ parameters: f.parameters,
1185
+ },
1186
+ }));
1187
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1188
+ model: this.contextService.context.model,
1189
+ messages: messages,
1190
+ tools: formattedTools,
1191
+ });
1192
+ // Emit events to mimic streaming behavior
1193
+ if (content) {
1194
+ await agentSwarmKit.event(clientId, "llm-completion", {
1195
+ content: content.trim(),
1196
+ agentName,
1197
+ });
1198
+ }
1199
+ const result = {
1200
+ content: content || "",
1201
+ mode,
1202
+ agentName,
1203
+ role,
1204
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1205
+ ...rest,
1206
+ function: {
1207
+ name: f.name,
1208
+ arguments: JSON.parse(f.arguments),
1209
+ },
1210
+ })),
1211
+ };
1212
+ // Debug logging
1213
+ if (CC_ENABLE_DEBUG) {
1214
+ await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1215
+ }
1216
+ return result;
1217
+ }
1218
+ async getOutlineCompletion(params) {
1219
+ const { messages: rawMessages, format } = params;
1220
+ const claude = getClaude();
1221
+ this.logger.log("claudeProvider getOutlineCompletion", {
1222
+ context: this.contextService.context,
1223
+ });
1224
+ // Create tool definition based on format schema
1225
+ const schema = "json_schema" in format
1226
+ ? lodashEs.get(format, "json_schema.schema", format)
1227
+ : format;
1228
+ const toolDefinition = {
1229
+ type: "function",
1230
+ function: {
1231
+ name: "provide_answer",
1232
+ description: "Предоставить ответ в требуемом формате",
1233
+ parameters: schema,
1234
+ },
1235
+ };
1236
+ // Add system instruction for tool usage
1237
+ const systemMessage = {
1238
+ role: "system",
1239
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1240
+ };
1241
+ const messages = [
1242
+ systemMessage,
1243
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1244
+ role,
1245
+ tool_call_id,
1246
+ content,
1247
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1248
+ ...rest,
1249
+ function: {
1250
+ name: f.name,
1251
+ arguments: JSON.stringify(f.arguments),
1252
+ },
1253
+ })),
1254
+ })),
1255
+ ];
1256
+ let attempt = 0;
1257
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
1258
+ messages.push({
1259
+ role: "user",
1260
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1261
+ });
1262
+ });
1263
+ while (attempt < MAX_ATTEMPTS$3) {
1264
+ // Prepare request options
1265
+ const requestOptions = {
1266
+ model: this.contextService.context.model,
1267
+ messages: messages,
1268
+ tools: [toolDefinition],
1269
+ tool_choice: {
1270
+ type: "function",
1271
+ function: { name: "provide_answer" },
1272
+ },
1273
+ };
1274
+ const { choices: [{ message }], } = await claude.chat.completions.create(requestOptions);
1275
+ const { refusal, tool_calls } = message;
1276
+ if (refusal) {
1277
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1278
+ attempt++;
1279
+ continue;
1280
+ }
1281
+ if (!tool_calls?.length) {
1282
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1283
+ addToolRequestMessage();
1284
+ attempt++;
1285
+ continue;
1286
+ }
1287
+ if (tool_calls && tool_calls.length > 0) {
1288
+ const toolCall = tool_calls[0];
1289
+ if (toolCall.function?.name === "provide_answer") {
1290
+ // Parse JSON with repair
1291
+ let parsedArguments;
1292
+ try {
1293
+ const json = jsonrepair.jsonrepair(toolCall.function.arguments);
1294
+ parsedArguments = JSON.parse(json);
1295
+ }
1296
+ catch (error) {
1297
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1298
+ addToolRequestMessage();
1299
+ attempt++;
1300
+ continue;
1301
+ }
1302
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
1303
+ if (!validation.success) {
1304
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1305
+ addToolRequestMessage();
1306
+ attempt++;
1307
+ continue;
1308
+ }
1309
+ lodashEs.set(validation.data, "_context", this.contextService.context);
1310
+ const result = {
1311
+ role: "assistant",
1312
+ content: JSON.stringify(validation.data),
1313
+ };
1314
+ // Debug logging
1315
+ if (CC_ENABLE_DEBUG) {
1316
+ await fs.appendFile("./debug_claude_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1317
+ }
1318
+ return result;
1319
+ }
1320
+ }
1321
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1322
+ attempt++;
1323
+ }
1324
+ throw new Error("Model failed to use tool after maximum attempts");
1325
+ }
1326
+ }
1327
+
1328
+ const getOpenAi = functoolsKit.singleshot(() => {
1329
+ const apiKey = lib.contextService.context.apiKey;
1330
+ if (Array.isArray(apiKey)) {
1331
+ getOpenAi.clear();
1332
+ throw new Error("OpenAI provider does not support token rotation");
1333
+ }
1334
+ return new OpenAI({
1335
+ apiKey: apiKey,
1336
+ });
1337
+ });
1338
+
1339
+ class GPT5Provider {
1340
+ constructor(contextService, logger) {
1341
+ this.contextService = contextService;
1342
+ this.logger = logger;
1343
+ }
1344
+ async getCompletion(params) {
1345
+ const openai = getOpenAi();
1346
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1347
+ this.logger.log("gpt5Provider getCompletion", {
1348
+ agentName,
1349
+ mode,
1350
+ clientId,
1351
+ context: this.contextService.context,
1352
+ });
1353
+ // Map raw messages to OpenAI format
1354
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1355
+ role,
1356
+ tool_call_id,
1357
+ content,
1358
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1359
+ ...rest,
1360
+ function: {
1361
+ name: f.name,
1362
+ arguments: JSON.stringify(f.arguments),
1363
+ },
1364
+ })),
1365
+ }));
1366
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1367
+ model: this.contextService.context.model,
1368
+ messages: messages,
1369
+ tools: tools,
1370
+ });
1371
+ const result = {
1372
+ content: content,
1373
+ mode,
1374
+ agentName,
1375
+ role,
1376
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1377
+ ...rest,
1378
+ function: {
1379
+ name: f.name,
1380
+ arguments: JSON.parse(f.arguments),
1381
+ },
1382
+ })),
1383
+ };
1384
+ // Debug logging
1385
+ if (CC_ENABLE_DEBUG) {
1386
+ await fs.appendFile("./debug_gpt5_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1387
+ }
1388
+ return result;
1389
+ }
1390
+ async getStreamCompletion(params) {
1391
+ const openai = getOpenAi();
1392
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1393
+ this.logger.log("gpt5Provider getStreamCompletion", {
1394
+ agentName,
1395
+ mode,
1396
+ clientId,
1397
+ context: this.contextService.context,
1398
+ });
1399
+ // Map raw messages to OpenAI format
1400
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1401
+ role,
1402
+ tool_call_id,
1403
+ content,
1404
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1405
+ ...rest,
1406
+ function: {
1407
+ name: f.name,
1408
+ arguments: JSON.stringify(f.arguments),
1409
+ },
1410
+ })),
1411
+ }));
1412
+ // Map tools to OpenAI format
1413
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1414
+ type: type,
1415
+ function: {
1416
+ name: f.name,
1417
+ parameters: f.parameters,
1418
+ },
1419
+ }));
1420
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await openai.chat.completions.create({
1421
+ model: this.contextService.context.model,
1422
+ messages: messages,
1423
+ tools: formattedTools,
1424
+ });
1425
+ // Emit events to mimic streaming behavior
1426
+ if (content) {
1427
+ await agentSwarmKit.event(clientId, "llm-completion", {
1428
+ content: content.trim(),
1429
+ agentName,
1430
+ });
1431
+ }
1432
+ const result = {
1433
+ content: content || "",
1434
+ mode,
1435
+ agentName,
1436
+ role,
1437
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1438
+ ...rest,
1439
+ function: {
1440
+ name: f.name,
1441
+ arguments: JSON.parse(f.arguments),
1442
+ },
1443
+ })),
1444
+ };
1445
+ // Debug logging
1446
+ if (CC_ENABLE_DEBUG) {
1447
+ await fs.appendFile("./debug_gpt5_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1448
+ }
1449
+ return result;
1450
+ }
1451
+ async getOutlineCompletion(params) {
1452
+ const { messages: rawMessages, format } = params;
1453
+ const openai = getOpenAi();
1454
+ this.logger.log("gpt5Provider getOutlineCompletion", {
1455
+ context: this.contextService.context,
1456
+ });
1457
+ // Map raw messages to OpenAI format
1458
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1459
+ role,
1460
+ tool_call_id,
1461
+ content,
1462
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1463
+ ...rest,
1464
+ function: {
1465
+ name: f.name,
1466
+ arguments: JSON.stringify(f.arguments),
1467
+ },
1468
+ })),
1469
+ }));
1470
+ // Extract response format
1471
+ const response_format = "json_schema" in format
1472
+ ? format
1473
+ : { type: "json_schema", json_schema: { schema: format } };
1474
+ const completion = await openai.chat.completions.create({
1475
+ messages: messages,
1476
+ model: this.contextService.context.model,
1477
+ response_format: response_format,
1478
+ });
1479
+ const choice = completion.choices[0];
1480
+ if (choice.message.refusal) {
1481
+ throw new Error(choice.message.refusal);
1482
+ }
1483
+ const json = jsonrepair.jsonrepair(choice.message.content || "");
1484
+ const result = {
1485
+ role: "assistant",
1486
+ content: json,
1487
+ };
1488
+ // Debug logging
1489
+ if (CC_ENABLE_DEBUG) {
1490
+ await fs.appendFile("./debug_gpt5_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1491
+ }
1492
+ return result;
1493
+ }
1494
+ }
1495
+
1496
+ const getDeepseek = functoolsKit.singleshot(() => {
1497
+ const apiKey = lib.contextService.context.apiKey;
1498
+ if (Array.isArray(apiKey)) {
1499
+ getDeepseek.clear();
1500
+ throw new Error("Deepseek provider does not support token rotation");
1501
+ }
1502
+ return new OpenAI({
1503
+ baseURL: "https://api.deepseek.com",
1504
+ apiKey: apiKey,
1505
+ });
1506
+ });
1507
+
1508
+ const MAX_ATTEMPTS$2 = 3;
1509
+ class DeepseekProvider {
1510
+ constructor(contextService, logger) {
1511
+ this.contextService = contextService;
1512
+ this.logger = logger;
1513
+ }
1514
+ async getCompletion(params) {
1515
+ const deepseek = getDeepseek();
1516
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1517
+ this.logger.log("deepseekProvider getCompletion", {
1518
+ agentName,
1519
+ mode,
1520
+ clientId,
1521
+ context: this.contextService.context,
1522
+ });
1523
+ // Map raw messages to OpenAI format
1524
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1525
+ role,
1526
+ tool_call_id,
1527
+ content,
1528
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1529
+ ...rest,
1530
+ function: {
1531
+ name: f.name,
1532
+ arguments: JSON.stringify(f.arguments),
1533
+ },
1534
+ })),
1535
+ }));
1536
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1537
+ type: type,
1538
+ function: {
1539
+ name: f.name,
1540
+ parameters: f.parameters,
1541
+ },
1542
+ }));
1543
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await deepseek.chat.completions.create({
1544
+ model: this.contextService.context.model,
1545
+ messages: messages,
1546
+ tools: formattedTools?.length ? formattedTools : undefined,
1547
+ });
1548
+ const result = {
1549
+ content: content,
1550
+ mode,
1551
+ agentName,
1552
+ role,
1553
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1554
+ ...rest,
1555
+ function: {
1556
+ name: f.name,
1557
+ arguments: JSON.parse(f.arguments),
1558
+ },
1559
+ })),
1560
+ };
1561
+ // Debug logging
1562
+ if (CC_ENABLE_DEBUG) {
1563
+ await fs.appendFile("./debug_deepseek_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1564
+ }
1565
+ return result;
1566
+ }
1567
+ async getStreamCompletion(params) {
1568
+ const deepseek = getDeepseek();
1569
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1570
+ this.logger.log("deepseekProvider getStreamCompletion", {
1571
+ agentName,
1572
+ mode,
1573
+ clientId,
1574
+ context: this.contextService.context,
1575
+ });
1576
+ // Map raw messages to OpenAI format
1577
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1578
+ role,
1579
+ tool_call_id,
1580
+ content,
1581
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1582
+ ...rest,
1583
+ function: {
1584
+ name: f.name,
1585
+ arguments: JSON.stringify(f.arguments),
1586
+ },
1587
+ })),
1588
+ }));
1589
+ // Map tools to OpenAI format
1590
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1591
+ type: type,
1592
+ function: {
1593
+ name: f.name,
1594
+ parameters: f.parameters,
1595
+ },
1596
+ }));
1597
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await deepseek.chat.completions.create({
1598
+ model: this.contextService.context.model,
1599
+ messages: messages,
1600
+ tools: formattedTools?.length ? formattedTools : undefined,
1601
+ });
1602
+ // Emit events to mimic streaming behavior
1603
+ if (content) {
1604
+ await agentSwarmKit.event(clientId, "llm-completion", {
1605
+ content: content.trim(),
1606
+ agentName,
1607
+ });
1608
+ }
1609
+ const result = {
1610
+ content: content || "",
1611
+ mode,
1612
+ agentName,
1613
+ role,
1614
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1615
+ ...rest,
1616
+ function: {
1617
+ name: f.name,
1618
+ arguments: JSON.parse(f.arguments),
1619
+ },
1620
+ })),
1621
+ };
1622
+ // Debug logging
1623
+ if (CC_ENABLE_DEBUG) {
1624
+ await fs.appendFile("./debug_deepseek_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1625
+ }
1626
+ return result;
1627
+ }
1628
+ async getOutlineCompletion(params) {
1629
+ const { messages: rawMessages, format } = params;
1630
+ const deepseek = getDeepseek();
1631
+ this.logger.log("deepseekProvider getOutlineCompletion", {
1632
+ context: this.contextService.context,
1633
+ });
1634
+ // Create tool definition based on format schema
1635
+ const schema = "json_schema" in format
1636
+ ? lodashEs.get(format, "json_schema.schema", format)
1637
+ : format;
1638
+ const toolDefinition = {
1639
+ type: "function",
1640
+ function: {
1641
+ name: "provide_answer",
1642
+ description: "Предоставить ответ в требуемом формате",
1643
+ parameters: schema,
1644
+ },
1645
+ };
1646
+ // Add system instruction for tool usage
1647
+ const systemMessage = {
1648
+ role: "system",
1649
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1650
+ };
1651
+ const messages = [
1652
+ systemMessage,
1653
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1654
+ role,
1655
+ tool_call_id,
1656
+ content,
1657
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1658
+ ...rest,
1659
+ function: {
1660
+ name: f.name,
1661
+ arguments: JSON.stringify(f.arguments),
1662
+ },
1663
+ })),
1664
+ })),
1665
+ ];
1666
+ let attempt = 0;
1667
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
1668
+ messages.push({
1669
+ role: "user",
1670
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1671
+ });
1672
+ });
1673
+ while (attempt < MAX_ATTEMPTS$2) {
1674
+ // Prepare request options
1675
+ const requestOptions = {
1676
+ model: this.contextService.context.model,
1677
+ messages: messages,
1678
+ tools: [toolDefinition],
1679
+ tool_choice: {
1680
+ type: "function",
1681
+ function: { name: "provide_answer" },
1682
+ },
1683
+ };
1684
+ const { choices: [{ message }], } = await deepseek.chat.completions.create(requestOptions);
1685
+ const { refusal, tool_calls } = message;
1686
+ if (refusal) {
1687
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1688
+ attempt++;
1689
+ continue;
1690
+ }
1691
+ if (!tool_calls?.length) {
1692
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1693
+ addToolRequestMessage();
1694
+ attempt++;
1695
+ continue;
1696
+ }
1697
+ if (tool_calls && tool_calls.length > 0) {
1698
+ const toolCall = tool_calls[0];
1699
+ if (toolCall.function?.name === "provide_answer") {
1700
+ // Parse JSON with repair
1701
+ let parsedArguments;
1702
+ try {
1703
+ const json = jsonrepair.jsonrepair(toolCall.function.arguments);
1704
+ parsedArguments = JSON.parse(json);
1705
+ }
1706
+ catch (error) {
1707
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1708
+ addToolRequestMessage();
1709
+ attempt++;
1710
+ continue;
1711
+ }
1712
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
1713
+ if (!validation.success) {
1714
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1715
+ addToolRequestMessage();
1716
+ attempt++;
1717
+ continue;
1718
+ }
1719
+ lodashEs.set(validation.data, "_context", this.contextService.context);
1720
+ const result = {
1721
+ role: "assistant",
1722
+ content: JSON.stringify(validation.data),
1723
+ };
1724
+ // Debug logging
1725
+ if (CC_ENABLE_DEBUG) {
1726
+ await fs.appendFile("./debug_deepseek_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1727
+ }
1728
+ return result;
1729
+ }
1730
+ }
1731
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1732
+ attempt++;
1733
+ }
1734
+ throw new Error("Model failed to use tool after maximum attempts");
1735
+ }
1736
+ }
1737
+
1738
+ const getMistral = functoolsKit.singleshot(() => {
1739
+ const apiKey = lib.contextService.context.apiKey;
1740
+ if (Array.isArray(apiKey)) {
1741
+ getMistral.clear();
1742
+ throw new Error("Mistral provider does not support token rotation");
1743
+ }
1744
+ return new OpenAI({
1745
+ baseURL: "https://api.mistral.ai/v1",
1746
+ apiKey: apiKey,
1747
+ });
1748
+ });
1749
+
1750
+ const MAX_ATTEMPTS$1 = 3;
1751
+ class MistralProvider {
1752
+ constructor(contextService, logger) {
1753
+ this.contextService = contextService;
1754
+ this.logger = logger;
1755
+ }
1756
+ async getCompletion(params) {
1757
+ const mistral = getMistral();
1758
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1759
+ this.logger.log("mistralProvider getCompletion", {
1760
+ agentName,
1761
+ mode,
1762
+ clientId,
1763
+ context: this.contextService.context,
1764
+ });
1765
+ // Map raw messages to OpenAI format
1766
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1767
+ role,
1768
+ tool_call_id,
1769
+ content,
1770
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1771
+ ...rest,
1772
+ function: {
1773
+ name: f.name,
1774
+ arguments: JSON.stringify(f.arguments),
1775
+ },
1776
+ })),
1777
+ }));
1778
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1779
+ type: type,
1780
+ function: {
1781
+ name: f.name,
1782
+ parameters: f.parameters,
1783
+ },
1784
+ }));
1785
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await mistral.chat.completions.create({
1786
+ model: this.contextService.context.model,
1787
+ messages: messages,
1788
+ tools: formattedTools?.length ? formattedTools : undefined,
1789
+ });
1790
+ const result = {
1791
+ content: content,
1792
+ mode,
1793
+ agentName,
1794
+ role,
1795
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1796
+ ...rest,
1797
+ function: {
1798
+ name: f.name,
1799
+ arguments: JSON.parse(f.arguments),
1800
+ },
1801
+ })),
1802
+ };
1803
+ // Debug logging
1804
+ if (CC_ENABLE_DEBUG) {
1805
+ await fs.appendFile("./debug_mistral_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1806
+ }
1807
+ return result;
1808
+ }
1809
+ async getStreamCompletion(params) {
1810
+ const mistral = getMistral();
1811
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
1812
+ this.logger.log("mistralProvider getStreamCompletion", {
1813
+ agentName,
1814
+ mode,
1815
+ clientId,
1816
+ context: this.contextService.context,
1817
+ });
1818
+ // Map raw messages to OpenAI format
1819
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1820
+ role,
1821
+ tool_call_id,
1822
+ content,
1823
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1824
+ ...rest,
1825
+ function: {
1826
+ name: f.name,
1827
+ arguments: JSON.stringify(f.arguments),
1828
+ },
1829
+ })),
1830
+ }));
1831
+ // Map tools to OpenAI format
1832
+ const formattedTools = tools?.map(({ type, function: f }) => ({
1833
+ type: type,
1834
+ function: {
1835
+ name: f.name,
1836
+ parameters: f.parameters,
1837
+ },
1838
+ }));
1839
+ const { choices: [{ message: { content, role, tool_calls }, },], } = await mistral.chat.completions.create({
1840
+ model: this.contextService.context.model,
1841
+ messages: messages,
1842
+ tools: formattedTools?.length ? formattedTools : undefined,
1843
+ });
1844
+ // Emit events to mimic streaming behavior
1845
+ if (content) {
1846
+ await agentSwarmKit.event(clientId, "llm-completion", {
1847
+ content: content.trim(),
1848
+ agentName,
1849
+ });
1850
+ }
1851
+ const result = {
1852
+ content: content || "",
1853
+ mode,
1854
+ agentName,
1855
+ role,
1856
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1857
+ ...rest,
1858
+ function: {
1859
+ name: f.name,
1860
+ arguments: JSON.parse(f.arguments),
1861
+ },
1862
+ })),
1863
+ };
1864
+ // Debug logging
1865
+ if (CC_ENABLE_DEBUG) {
1866
+ await fs.appendFile("./debug_mistral_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1867
+ }
1868
+ return result;
1869
+ }
1870
+ async getOutlineCompletion(params) {
1871
+ const { messages: rawMessages, format } = params;
1872
+ const mistral = getMistral();
1873
+ this.logger.log("mistralProvider getOutlineCompletion", {
1874
+ context: this.contextService.context,
1875
+ });
1876
+ // Create tool definition based on format schema
1877
+ const schema = "json_schema" in format
1878
+ ? lodashEs.get(format, "json_schema.schema", format)
1879
+ : format;
1880
+ const toolDefinition = {
1881
+ type: "function",
1882
+ function: {
1883
+ name: "provide_answer",
1884
+ description: "Предоставить ответ в требуемом формате",
1885
+ parameters: schema,
1886
+ },
1887
+ };
1888
+ // Add system instruction for tool usage
1889
+ const systemMessage = {
1890
+ role: "system",
1891
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
1892
+ };
1893
+ const messages = [
1894
+ systemMessage,
1895
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
1896
+ role,
1897
+ tool_call_id,
1898
+ content,
1899
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
1900
+ ...rest,
1901
+ function: {
1902
+ name: f.name,
1903
+ arguments: JSON.stringify(f.arguments),
1904
+ },
1905
+ })),
1906
+ })),
1907
+ ];
1908
+ let attempt = 0;
1909
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
1910
+ messages.push({
1911
+ role: "user",
1912
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
1913
+ });
1914
+ });
1915
+ while (attempt < MAX_ATTEMPTS$1) {
1916
+ // Prepare request options
1917
+ const requestOptions = {
1918
+ model: this.contextService.context.model,
1919
+ messages: messages,
1920
+ tools: [toolDefinition],
1921
+ tool_choice: {
1922
+ type: "function",
1923
+ function: { name: "provide_answer" },
1924
+ },
1925
+ };
1926
+ const { choices: [{ message }], } = await mistral.chat.completions.create(requestOptions);
1927
+ const { refusal, tool_calls } = message;
1928
+ if (refusal) {
1929
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1930
+ attempt++;
1931
+ continue;
1932
+ }
1933
+ if (!tool_calls?.length) {
1934
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
1935
+ addToolRequestMessage();
1936
+ attempt++;
1937
+ continue;
1938
+ }
1939
+ if (tool_calls && tool_calls.length > 0) {
1940
+ const toolCall = tool_calls[0];
1941
+ if (toolCall.function?.name === "provide_answer") {
1942
+ // Parse JSON with repair
1943
+ let parsedArguments;
1944
+ try {
1945
+ const json = jsonrepair.jsonrepair(toolCall.function.arguments);
1946
+ parsedArguments = JSON.parse(json);
1947
+ }
1948
+ catch (error) {
1949
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
1950
+ addToolRequestMessage();
1951
+ attempt++;
1952
+ continue;
1953
+ }
1954
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
1955
+ if (!validation.success) {
1956
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
1957
+ addToolRequestMessage();
1958
+ attempt++;
1959
+ continue;
1960
+ }
1961
+ lodashEs.set(validation.data, "_context", this.contextService.context);
1962
+ const result = {
1963
+ role: "assistant",
1964
+ content: JSON.stringify(validation.data),
1965
+ };
1966
+ // Debug logging
1967
+ if (CC_ENABLE_DEBUG) {
1968
+ await fs.appendFile("./debug_mistral_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
1969
+ }
1970
+ return result;
1971
+ }
1972
+ }
1973
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
1974
+ attempt++;
1975
+ }
1976
+ throw new Error("Model failed to use tool after maximum attempts");
1977
+ }
1978
+ }
1979
+
1980
+ const getPerplexity = functoolsKit.singleshot(() => {
1981
+ const apiKey = lib.contextService.context.apiKey;
1982
+ if (Array.isArray(apiKey)) {
1983
+ getPerplexity.clear();
1984
+ throw new Error("Perplexity provider does not support token rotation");
1985
+ }
1986
+ return new OpenAI({
1987
+ baseURL: "https://api.perplexity.ai",
1988
+ apiKey: apiKey,
1989
+ });
1990
+ });
1991
+
1992
+ class PerplexityProvider {
1993
+ constructor(contextService, logger) {
1994
+ this.contextService = contextService;
1995
+ this.logger = logger;
1996
+ }
1997
+ async getCompletion(params) {
1998
+ const perplexity = getPerplexity();
1999
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2000
+ this.logger.log("perplexityProvider getCompletion", {
2001
+ agentName,
2002
+ mode,
2003
+ clientId,
2004
+ context: this.contextService.context,
2005
+ });
2006
+ // Filter and sort messages like in example.ts
2007
+ const messages = rawMessages
2008
+ .filter(({ role }) => role === "user" || role === "assistant")
2009
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2010
+ role,
2011
+ tool_call_id,
2012
+ content,
2013
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2014
+ ...rest,
2015
+ function: {
2016
+ name: f.name,
2017
+ arguments: JSON.stringify(f.arguments),
2018
+ },
2019
+ })),
2020
+ }));
2021
+ const systemPrompt = rawMessages
2022
+ .filter(({ role }) => role === "system")
2023
+ .reduce((acm, { content }) => functoolsKit.str.newline(acm, content), "");
2024
+ if (systemPrompt) {
2025
+ messages.unshift({
2026
+ role: "system",
2027
+ content: systemPrompt,
2028
+ });
2029
+ }
2030
+ // Merge consecutive assistant messages
2031
+ for (let i = messages.length - 1; i > 0; i--) {
2032
+ if (messages[i].role === "assistant" &&
2033
+ messages[i - 1].role === "assistant") {
2034
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2035
+ // Merge tool_calls if they exist
2036
+ if (messages[i].tool_calls || messages[i - 1].tool_calls) {
2037
+ messages[i - 1].tool_calls = [
2038
+ ...(messages[i - 1].tool_calls || []),
2039
+ ...(messages[i].tool_calls || []),
2040
+ ];
2041
+ }
2042
+ messages.splice(i, 1);
2043
+ }
2044
+ }
2045
+ // Merge consecutive user messages
2046
+ for (let i = messages.length - 1; i > 0; i--) {
2047
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2048
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2049
+ messages.splice(i, 1);
2050
+ }
2051
+ }
2052
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2053
+ type: type,
2054
+ function: {
2055
+ name: f.name,
2056
+ description: f.description ?? "", // Perplexity API requires description
2057
+ parameters: f.parameters,
2058
+ },
2059
+ }));
2060
+ const result = await perplexity.chat.completions.create({
2061
+ model: this.contextService.context.model,
2062
+ messages: messages,
2063
+ tools: formattedTools?.length ? formattedTools : undefined,
2064
+ tool_choice: "auto",
2065
+ });
2066
+ const { choices: [{ message: { content, role, tool_calls }, },], } = result;
2067
+ const finalResult = {
2068
+ content: content,
2069
+ mode,
2070
+ agentName,
2071
+ role,
2072
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2073
+ ...rest,
2074
+ function: {
2075
+ name: f.name,
2076
+ arguments: JSON.parse(f.arguments),
2077
+ },
2078
+ })),
2079
+ };
2080
+ // Debug logging
2081
+ if (CC_ENABLE_DEBUG) {
2082
+ await fs.appendFile("./debug_perplexity_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2083
+ }
2084
+ return finalResult;
2085
+ }
2086
+ async getStreamCompletion(params) {
2087
+ const { clientId, agentName, mode } = params;
2088
+ this.logger.log("perplexityProvider getStreamCompletion", {
2089
+ agentName,
2090
+ mode,
2091
+ clientId,
2092
+ context: this.contextService.context,
2093
+ });
2094
+ const result = {
2095
+ content: "Выбранная в настройках языковая модель не поддерживает tool_calling",
2096
+ mode,
2097
+ agentName,
2098
+ role: "assistant",
2099
+ };
2100
+ return result;
2101
+ }
2102
+ async getOutlineCompletion(params) {
2103
+ const { messages: rawMessages, format } = params;
2104
+ const perplexity = getPerplexity();
2105
+ this.logger.log("perplexityProvider getOutlineCompletion", {
2106
+ context: this.contextService.context,
2107
+ });
2108
+ // Filter and sort messages like GPT5Provider
2109
+ const messages = rawMessages
2110
+ .filter(({ role }) => role === "user" || role === "assistant")
2111
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2112
+ role,
2113
+ tool_call_id,
2114
+ content,
2115
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2116
+ ...rest,
2117
+ function: {
2118
+ name: f.name,
2119
+ arguments: JSON.stringify(f.arguments),
2120
+ },
2121
+ })),
2122
+ }));
2123
+ const systemPrompt = rawMessages
2124
+ .filter(({ role }) => role === "system")
2125
+ .reduce((acm, { content }) => functoolsKit.str.newline(acm, content), "");
2126
+ if (systemPrompt) {
2127
+ messages.unshift({
2128
+ role: "system",
2129
+ content: systemPrompt,
2130
+ });
2131
+ }
2132
+ // Merge consecutive assistant messages
2133
+ for (let i = messages.length - 1; i > 0; i--) {
2134
+ if (messages[i].role === "assistant" &&
2135
+ messages[i - 1].role === "assistant") {
2136
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2137
+ // Merge tool_calls if they exist
2138
+ if (messages[i].tool_calls || messages[i - 1].tool_calls) {
2139
+ messages[i - 1].tool_calls = [
2140
+ ...(messages[i - 1].tool_calls || []),
2141
+ ...(messages[i].tool_calls || []),
2142
+ ];
2143
+ }
2144
+ messages.splice(i, 1);
2145
+ }
2146
+ }
2147
+ // Merge consecutive user messages
2148
+ for (let i = messages.length - 1; i > 0; i--) {
2149
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2150
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2151
+ messages.splice(i, 1);
2152
+ }
2153
+ }
2154
+ // Extract response format like GPT5Provider
2155
+ const response_format = "json_schema" in format
2156
+ ? format
2157
+ : { type: "json_schema", json_schema: { schema: format } };
2158
+ const completion = await perplexity.chat.completions.create({
2159
+ messages: messages,
2160
+ model: this.contextService.context.model,
2161
+ response_format: response_format,
2162
+ });
2163
+ const choice = completion.choices[0];
2164
+ if (choice.message.refusal) {
2165
+ throw new Error(choice.message.refusal);
2166
+ }
2167
+ const json = jsonrepair.jsonrepair(choice.message.content || "");
2168
+ const result = {
2169
+ role: "assistant",
2170
+ content: json,
2171
+ };
2172
+ // Debug logging
2173
+ if (CC_ENABLE_DEBUG) {
2174
+ await fs.appendFile("./debug_perplexity_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2175
+ }
2176
+ return result;
2177
+ }
2178
+ }
2179
+
2180
+ const getCohere = functoolsKit.singleshot(() => {
2181
+ const apiKey = lib.contextService.context.apiKey;
2182
+ if (Array.isArray(apiKey)) {
2183
+ getCohere.clear();
2184
+ throw new Error("Cohere provider does not support token rotation");
2185
+ }
2186
+ return new OpenAI({
2187
+ baseURL: "https://api.cohere.ai/compatibility/v1",
2188
+ apiKey: apiKey,
2189
+ });
2190
+ });
2191
+
2192
+ class CohereProvider {
2193
+ constructor(contextService, logger) {
2194
+ this.contextService = contextService;
2195
+ this.logger = logger;
2196
+ }
2197
+ async getCompletion(params) {
2198
+ const cohere = getCohere();
2199
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2200
+ this.logger.log("cohereProvider getCompletion", {
2201
+ agentName,
2202
+ mode,
2203
+ clientId,
2204
+ context: this.contextService.context,
2205
+ });
2206
+ // Filter and sort messages - INCLUDE TOOL MESSAGES for Cohere
2207
+ const messages = rawMessages
2208
+ .filter(({ role }) => role === "user" || role === "assistant" || role === "tool")
2209
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2210
+ role,
2211
+ tool_call_id,
2212
+ content,
2213
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2214
+ ...rest,
2215
+ function: {
2216
+ name: f.name,
2217
+ arguments: JSON.stringify(f.arguments),
2218
+ },
2219
+ })),
2220
+ }));
2221
+ const systemPrompt = rawMessages
2222
+ .filter(({ role }) => role === "system")
2223
+ .reduce((acm, { content }) => functoolsKit.str.newline(acm, content), "");
2224
+ if (systemPrompt) {
2225
+ messages.unshift({
2226
+ role: "system",
2227
+ content: systemPrompt,
2228
+ });
2229
+ }
2230
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2231
+ // Cohere requires strict tool_calls -> tool_responses sequence
2232
+ // Only merge consecutive user messages (safe)
2233
+ for (let i = messages.length - 1; i > 0; i--) {
2234
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2235
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2236
+ messages.splice(i, 1);
2237
+ }
2238
+ }
2239
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2240
+ type: type,
2241
+ function: {
2242
+ name: f.name,
2243
+ description: f.description ?? "", // Cohere API requires description
2244
+ parameters: f.parameters,
2245
+ },
2246
+ }));
2247
+ const result = await cohere.chat.completions.create({
2248
+ model: this.contextService.context.model,
2249
+ messages: messages,
2250
+ tools: formattedTools?.length ? formattedTools : undefined,
2251
+ tool_choice: "auto",
2252
+ });
2253
+ const { choices: [{ message: { content, role, tool_calls }, },], } = result;
2254
+ const finalResult = {
2255
+ content: content,
2256
+ mode,
2257
+ agentName,
2258
+ role,
2259
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2260
+ ...rest,
2261
+ function: {
2262
+ name: f.name,
2263
+ arguments: JSON.parse(f.arguments),
2264
+ },
2265
+ })),
2266
+ };
2267
+ // Debug logging
2268
+ if (CC_ENABLE_DEBUG) {
2269
+ await fs.appendFile("./debug_cohere_provider.txt", JSON.stringify({ params, answer: finalResult }, null, 2) + "\n\n");
2270
+ }
2271
+ return finalResult;
2272
+ }
2273
+ async getStreamCompletion(params) {
2274
+ const cohere = getCohere();
2275
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2276
+ this.logger.log("cohereProvider getStreamCompletion", {
2277
+ agentName,
2278
+ mode,
2279
+ clientId,
2280
+ context: this.contextService.context,
2281
+ });
2282
+ // Filter and sort messages - INCLUDE TOOL MESSAGES for Cohere
2283
+ const messages = rawMessages
2284
+ .filter(({ role }) => role === "user" || role === "assistant" || role === "tool")
2285
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2286
+ role,
2287
+ tool_call_id,
2288
+ content,
2289
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2290
+ ...rest,
2291
+ function: {
2292
+ name: f.name,
2293
+ arguments: JSON.stringify(f.arguments),
2294
+ },
2295
+ })),
2296
+ }));
2297
+ const systemPrompt = rawMessages
2298
+ .filter(({ role }) => role === "system")
2299
+ .reduce((acm, { content }) => functoolsKit.str.newline(acm, content), "");
2300
+ if (systemPrompt) {
2301
+ messages.unshift({
2302
+ role: "system",
2303
+ content: systemPrompt,
2304
+ });
2305
+ }
2306
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2307
+ // Cohere requires strict tool_calls -> tool_responses sequence
2308
+ // Merge consecutive user messages
2309
+ for (let i = messages.length - 1; i > 0; i--) {
2310
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2311
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2312
+ messages.splice(i, 1);
2313
+ }
2314
+ }
2315
+ // Map tools to OpenAI format
2316
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2317
+ type: type,
2318
+ function: {
2319
+ name: f.name,
2320
+ description: f.description ?? "", // Cohere API requires description
2321
+ parameters: f.parameters,
2322
+ },
2323
+ }));
2324
+ const completion = await cohere.chat.completions.create({
2325
+ model: this.contextService.context.model,
2326
+ messages: messages,
2327
+ tools: formattedTools?.length ? formattedTools : undefined,
2328
+ tool_choice: "auto",
2329
+ });
2330
+ const { choices: [{ message: { content, role, tool_calls }, },], } = completion;
2331
+ // Emit events to mimic streaming behavior
2332
+ if (content) {
2333
+ await agentSwarmKit.event(clientId, "llm-completion", {
2334
+ content: content.trim(),
2335
+ agentName,
2336
+ });
2337
+ }
2338
+ const result = {
2339
+ content: content || "",
2340
+ mode,
2341
+ agentName,
2342
+ role,
2343
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2344
+ ...rest,
2345
+ function: {
2346
+ name: f.name,
2347
+ arguments: JSON.parse(f.arguments),
2348
+ },
2349
+ })),
2350
+ };
2351
+ // Debug logging
2352
+ if (CC_ENABLE_DEBUG) {
2353
+ await fs.appendFile("./debug_cohere_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2354
+ }
2355
+ return result;
2356
+ }
2357
+ async getOutlineCompletion(params) {
2358
+ const { messages: rawMessages, format } = params;
2359
+ const cohere = getCohere();
2360
+ this.logger.log("cohereProvider getOutlineCompletion", {
2361
+ context: this.contextService.context,
2362
+ });
2363
+ // Filter and sort messages like GPT5Provider
2364
+ const messages = rawMessages
2365
+ .filter(({ role }) => role === "user" || role === "assistant")
2366
+ .map(({ role, tool_call_id, tool_calls, content }) => ({
2367
+ role,
2368
+ tool_call_id,
2369
+ content,
2370
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2371
+ ...rest,
2372
+ function: {
2373
+ name: f.name,
2374
+ arguments: JSON.stringify(f.arguments),
2375
+ },
2376
+ })),
2377
+ }));
2378
+ const systemPrompt = rawMessages
2379
+ .filter(({ role }) => role === "system")
2380
+ .reduce((acm, { content }) => functoolsKit.str.newline(acm, content), "");
2381
+ if (systemPrompt) {
2382
+ messages.unshift({
2383
+ role: "system",
2384
+ content: systemPrompt,
2385
+ });
2386
+ }
2387
+ // DO NOT merge consecutive assistant messages in Cohere - breaks tool calling flow
2388
+ // Cohere requires strict tool_calls -> tool_responses sequence
2389
+ // Merge consecutive user messages
2390
+ for (let i = messages.length - 1; i > 0; i--) {
2391
+ if (messages[i].role === "user" && messages[i - 1].role === "user") {
2392
+ messages[i - 1].content = functoolsKit.str.newline(messages[i - 1].content, messages[i].content);
2393
+ messages.splice(i, 1);
2394
+ }
2395
+ }
2396
+ // Extract response format like GPT5Provider
2397
+ const response_format = "json_schema" in format
2398
+ ? format
2399
+ : { type: "json_schema", json_schema: { schema: format } };
2400
+ const completion = await cohere.chat.completions.create({
2401
+ messages: messages,
2402
+ model: this.contextService.context.model,
2403
+ response_format: response_format,
2404
+ });
2405
+ const choice = completion.choices[0];
2406
+ if (choice.message.refusal) {
2407
+ throw new Error(choice.message.refusal);
2408
+ }
2409
+ const json = jsonrepair.jsonrepair(choice.message.content || "");
2410
+ const result = {
2411
+ role: "assistant",
2412
+ content: json,
2413
+ };
2414
+ // Debug logging
2415
+ if (CC_ENABLE_DEBUG) {
2416
+ await fs.appendFile("./debug_cohere_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2417
+ }
2418
+ return result;
2419
+ }
2420
+ }
2421
+
2422
+ const MAX_ATTEMPTS = 3;
2423
+ const BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
2424
+ class AlibabaProvider {
2425
+ constructor(contextService, logger) {
2426
+ this.contextService = contextService;
2427
+ this.logger = logger;
2428
+ }
2429
+ async getCompletion(params) {
2430
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2431
+ this.logger.log("alibabaProvider getCompletion", {
2432
+ agentName,
2433
+ mode,
2434
+ clientId,
2435
+ context: this.contextService.context,
2436
+ });
2437
+ if (Array.isArray(this.contextService.context.apiKey)) {
2438
+ throw new Error("Alibaba provider does not support token rotation");
2439
+ }
2440
+ // Map raw messages to OpenAI format
2441
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2442
+ role,
2443
+ tool_call_id,
2444
+ content,
2445
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2446
+ ...rest,
2447
+ function: {
2448
+ name: f.name,
2449
+ arguments: JSON.stringify(f.arguments),
2450
+ },
2451
+ })),
2452
+ }));
2453
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2454
+ type: type,
2455
+ function: {
2456
+ name: f.name,
2457
+ parameters: f.parameters,
2458
+ },
2459
+ }));
2460
+ // Prepare request body with enable_thinking parameter
2461
+ const requestBody = {
2462
+ model: this.contextService.context.model,
2463
+ messages: messages,
2464
+ tools: formattedTools?.length ? formattedTools : undefined,
2465
+ enable_thinking: false,
2466
+ };
2467
+ // Use fetchApi from functools-kit
2468
+ const responseData = await functoolsKit.fetchApi(`${BASE_URL}/chat/completions`, {
2469
+ method: "POST",
2470
+ headers: {
2471
+ "Content-Type": "application/json",
2472
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2473
+ },
2474
+ body: JSON.stringify(requestBody),
2475
+ });
2476
+ const { choices: [{ message: { content, role, tool_calls }, },], } = responseData;
2477
+ const result = {
2478
+ content: content,
2479
+ mode,
2480
+ agentName,
2481
+ role,
2482
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2483
+ ...rest,
2484
+ function: {
2485
+ name: f.name,
2486
+ arguments: JSON.parse(f.arguments),
2487
+ },
2488
+ })),
2489
+ };
2490
+ // Debug logging
2491
+ if (CC_ENABLE_DEBUG) {
2492
+ await fs.appendFile("./debug_alibaba_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2493
+ }
2494
+ return result;
2495
+ }
2496
+ async getStreamCompletion(params) {
2497
+ const { clientId, agentName, messages: rawMessages, mode, tools } = params;
2498
+ this.logger.log("alibabaProvider getStreamCompletion", {
2499
+ agentName,
2500
+ mode,
2501
+ clientId,
2502
+ context: this.contextService.context,
2503
+ });
2504
+ if (Array.isArray(this.contextService.context.apiKey)) {
2505
+ throw new Error("Alibaba provider does not support token rotation");
2506
+ }
2507
+ // Map raw messages to OpenAI format
2508
+ const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2509
+ role,
2510
+ tool_call_id,
2511
+ content,
2512
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2513
+ ...rest,
2514
+ function: {
2515
+ name: f.name,
2516
+ arguments: JSON.stringify(f.arguments),
2517
+ },
2518
+ })),
2519
+ }));
2520
+ const formattedTools = tools?.map(({ type, function: f }) => ({
2521
+ type: type,
2522
+ function: {
2523
+ name: f.name,
2524
+ parameters: f.parameters,
2525
+ },
2526
+ }));
2527
+ // Prepare request body with enable_thinking parameter
2528
+ const requestBody = {
2529
+ model: this.contextService.context.model,
2530
+ messages: messages,
2531
+ tools: formattedTools?.length ? formattedTools : undefined,
2532
+ enable_thinking: false,
2533
+ };
2534
+ // Use fetchApi from functools-kit
2535
+ const responseData = await functoolsKit.fetchApi(`${BASE_URL}/chat/completions`, {
2536
+ method: "POST",
2537
+ headers: {
2538
+ "Content-Type": "application/json",
2539
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2540
+ },
2541
+ body: JSON.stringify(requestBody),
2542
+ });
2543
+ const { choices: [{ message: { content, role, tool_calls }, },], } = responseData;
2544
+ // Emit events to mimic streaming behavior
2545
+ if (content) {
2546
+ await agentSwarmKit.event(clientId, "llm-completion", {
2547
+ content: content.trim(),
2548
+ agentName,
2549
+ });
2550
+ }
2551
+ const result = {
2552
+ content: content,
2553
+ mode,
2554
+ agentName,
2555
+ role,
2556
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2557
+ ...rest,
2558
+ function: {
2559
+ name: f.name,
2560
+ arguments: JSON.parse(f.arguments),
2561
+ },
2562
+ })),
2563
+ };
2564
+ // Debug logging
2565
+ if (CC_ENABLE_DEBUG) {
2566
+ await fs.appendFile("./debug_alibaba_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2567
+ }
2568
+ return result;
2569
+ }
2570
+ async getOutlineCompletion(params) {
2571
+ const { messages: rawMessages, format } = params;
2572
+ this.logger.log("alibabaProvider getOutlineCompletion", {
2573
+ context: this.contextService.context,
2574
+ });
2575
+ if (Array.isArray(this.contextService.context.apiKey)) {
2576
+ throw new Error("Alibaba provider does not support token rotation");
2577
+ }
2578
+ // Create tool definition based on format schema
2579
+ const schema = "json_schema" in format
2580
+ ? lodashEs.get(format, "json_schema.schema", format)
2581
+ : format;
2582
+ const toolDefinition = {
2583
+ type: "function",
2584
+ function: {
2585
+ name: "provide_answer",
2586
+ description: "Предоставить ответ в требуемом формате",
2587
+ parameters: schema,
2588
+ },
2589
+ };
2590
+ // Add system instruction for tool usage
2591
+ const systemMessage = {
2592
+ role: "system",
2593
+ content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
2594
+ };
2595
+ const messages = [
2596
+ systemMessage,
2597
+ ...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
2598
+ role,
2599
+ tool_call_id,
2600
+ content,
2601
+ tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
2602
+ ...rest,
2603
+ function: {
2604
+ name: f.name,
2605
+ arguments: JSON.stringify(f.arguments),
2606
+ },
2607
+ })),
2608
+ })),
2609
+ ];
2610
+ let attempt = 0;
2611
+ const addToolRequestMessage = functoolsKit.singleshot(() => {
2612
+ messages.push({
2613
+ role: "user",
2614
+ content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
2615
+ });
2616
+ });
2617
+ while (attempt < MAX_ATTEMPTS) {
2618
+ // Prepare request body with enable_thinking parameter
2619
+ const requestBody = {
2620
+ model: this.contextService.context.model,
2621
+ messages: messages,
2622
+ tools: [toolDefinition],
2623
+ tool_choice: {
2624
+ type: "function",
2625
+ function: { name: "provide_answer" },
2626
+ },
2627
+ enable_thinking: false,
2628
+ };
2629
+ // Use fetchApi from functools-kit
2630
+ const responseData = await functoolsKit.fetchApi(`${BASE_URL}/chat/completions`, {
2631
+ method: "POST",
2632
+ headers: {
2633
+ "Content-Type": "application/json",
2634
+ "Authorization": `Bearer ${this.contextService.context.apiKey}`,
2635
+ },
2636
+ body: JSON.stringify(requestBody),
2637
+ });
2638
+ const { choices: [{ message }], } = responseData;
2639
+ const { refusal, tool_calls } = message;
2640
+ if (refusal) {
2641
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
2642
+ attempt++;
2643
+ continue;
2644
+ }
2645
+ if (!tool_calls?.length) {
2646
+ console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
2647
+ addToolRequestMessage();
2648
+ attempt++;
2649
+ continue;
2650
+ }
2651
+ if (tool_calls && tool_calls.length > 0) {
2652
+ const toolCall = tool_calls[0];
2653
+ if (toolCall.function?.name === "provide_answer") {
2654
+ // Parse JSON with repair
2655
+ let parsedArguments;
2656
+ try {
2657
+ const json = jsonrepair.jsonrepair(toolCall.function.arguments);
2658
+ parsedArguments = JSON.parse(json);
2659
+ }
2660
+ catch (error) {
2661
+ console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
2662
+ addToolRequestMessage();
2663
+ attempt++;
2664
+ continue;
2665
+ }
2666
+ const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
2667
+ if (!validation.success) {
2668
+ console.error(`Attempt ${attempt + 1}: ${validation.error}`);
2669
+ addToolRequestMessage();
2670
+ attempt++;
2671
+ continue;
2672
+ }
2673
+ lodashEs.set(validation.data, "_context", this.contextService.context);
2674
+ const result = {
2675
+ role: "assistant",
2676
+ content: JSON.stringify(validation.data),
2677
+ };
2678
+ // Debug logging
2679
+ if (CC_ENABLE_DEBUG) {
2680
+ await fs.appendFile("./debug_alibaba_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
2681
+ }
2682
+ return result;
2683
+ }
2684
+ }
2685
+ console.error(`Attempt ${attempt + 1}: Model send refusal`);
2686
+ attempt++;
2687
+ }
2688
+ throw new Error("Model failed to use tool after maximum attempts");
2689
+ }
2690
+ }
2691
+
2692
+ const commonServices = {
2693
+ loggerService: inject(TYPES.loggerService),
2694
+ };
2695
+ const baseServices = {
2696
+ contextService: inject(TYPES.contextService),
2697
+ };
2698
+ const privateServices = {
2699
+ runnerPrivateService: inject(TYPES.runnerPrivateService),
2700
+ outlinePrivateService: inject(TYPES.outlinePrivateService),
2701
+ };
2702
+ const publicServices = {
2703
+ runnerPublicService: inject(TYPES.runnerPublicService),
2704
+ outlinePublicService: inject(TYPES.outlinePublicService),
2705
+ };
2706
+ const engine = {
2707
+ ...commonServices,
2708
+ ...baseServices,
2709
+ ...privateServices,
2710
+ ...publicServices,
2711
+ };
2712
+ init();
2713
+ {
2714
+ engine.runnerPrivateService.registerRunner(InferenceName.OllamaInference, OllamaProvider);
2715
+ engine.runnerPrivateService.registerRunner(InferenceName.GrokInference, GrokProvider$1);
2716
+ engine.runnerPrivateService.registerRunner(InferenceName.HfInference, HfProvider);
2717
+ engine.runnerPrivateService.registerRunner(InferenceName.ClaudeInference, GrokProvider);
2718
+ engine.runnerPrivateService.registerRunner(InferenceName.GPT5Inference, GPT5Provider);
2719
+ engine.runnerPrivateService.registerRunner(InferenceName.DeepseekInference, DeepseekProvider);
2720
+ engine.runnerPrivateService.registerRunner(InferenceName.MistralInference, MistralProvider);
2721
+ engine.runnerPrivateService.registerRunner(InferenceName.PerplexityInference, PerplexityProvider);
2722
+ engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
2723
+ engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
2724
+ }
2725
+ Object.assign(globalThis, { engine });
2726
+ var lib = engine;
2727
+
2728
+ agentSwarmKit.addCompletion({
2729
+ completionName: CompletionName.RunnerOutlineCompletion,
2730
+ getCompletion: async (params) => {
2731
+ return await engine.runnerPrivateService.getOutlineCompletion(params);
2732
+ },
2733
+ json: true,
2734
+ });
2735
+
2736
+ agentSwarmKit.addCompletion({
2737
+ completionName: CompletionName.RunnerStreamCompletion,
2738
+ getCompletion: async (params) => {
2739
+ return await engine.runnerPrivateService.getStreamCompletion(params);
2740
+ },
2741
+ });
2742
+
2743
+ agentSwarmKit.addCompletion({
2744
+ completionName: CompletionName.RunnerCompletion,
2745
+ getCompletion: async (params) => {
2746
+ return await engine.runnerPrivateService.getCompletion(params);
2747
+ },
2748
+ });
2749
+
2750
+ const SignalSchema = zod.z.object({
2751
+ position: zod.z
2752
+ .enum(["long", "short", "wait"])
2753
+ .describe(functoolsKit.str.newline("Position direction (ALWAYS required):", "long: market shows consistent bullish signals, uptrend or growth potential", "short: market shows consistent bearish signals, downtrend or decline potential", "wait: conflicting signals between timeframes OR unfavorable trading conditions")),
2754
+ price_open: zod.z
2755
+ .number()
2756
+ .describe(functoolsKit.str.newline("Position opening price in USD", "Use the current market price at the time of analysis")),
2757
+ price_stop_loss: zod.z
2758
+ .number()
2759
+ .describe(functoolsKit.str.newline("Stop-loss price in USD", "For LONG: price below price_open (protection against decline)", "For SHORT: price above price_open (protection against rise)", "NEVER set SL in 'empty space' without technical justification")),
2760
+ price_take_profit: zod.z
2761
+ .number()
2762
+ .describe(functoolsKit.str.newline("Take-profit price in USD", "For LONG: price above price_open (growth target)", "For SHORT: price below price_open (decline target)", "NEVER set TP based on trend without technical justification")),
2763
+ minute_estimated_time: zod.z
2764
+ .number()
2765
+ .describe(functoolsKit.str.newline("Estimated time to reach Take Profit in minutes", "Calculated based on HONEST technical analysis, using:", "ATR, ADX, MACD, Momentum, Slope and other metrics")),
2766
+ risk_note: zod.z
2767
+ .string()
2768
+ .describe(functoolsKit.str.newline("Description of current market situation risks:", "", "Analyze and specify applicable risks:", "1. Whale manipulations (volume spikes, long shadows, pin bars, candle engulfing, false breakouts)", "2. Order book (order book walls, spoofing, bid/ask imbalance, low liquidity)", "3. P&L history (recurring mistakes on similar patterns)", "4. Time factors (trading session, low liquidity, upcoming events)", "5. Correlations (overall market trend, conflicting trends across timeframes)", "6. Technical risks (indicator divergences, weak volumes, critical levels)", "7. Gaps and anomalies (price gaps, unfilled gaps, movements without volume)", "", "Provide SPECIFIC numbers, percentages and probabilities.")),
2769
+ });
2770
+
2771
+ agentSwarmKit.addOutline({
2772
+ outlineName: OutlineName.SignalOutline,
2773
+ completion: CompletionName.RunnerOutlineCompletion,
2774
+ format: zod$1.zodResponseFormat(SignalSchema, "position_open_decision"),
2775
+ getOutlineHistory: async ({ history, param: messages = [] }) => {
2776
+ await history.push(messages);
2777
+ },
2778
+ validations: [
2779
+ {
2780
+ validate: ({ data }) => {
2781
+ if (!data.position) {
2782
+ throw new Error("The position field is not filled");
2783
+ }
2784
+ },
2785
+ docDescription: "Validates that position direction (long/short/wait) is specified.",
2786
+ },
2787
+ {
2788
+ validate: ({ data }) => {
2789
+ if (!data.risk_note) {
2790
+ throw new Error("The risk_note field is not filled");
2791
+ }
2792
+ },
2793
+ docDescription: "Validates that risk description is provided.",
2794
+ },
2795
+ {
2796
+ validate: ({ data }) => {
2797
+ if (!data.price_open || data.price_open <= 0) {
2798
+ throw new Error("The price_open field must contain a positive price");
2799
+ }
2800
+ },
2801
+ docDescription: "Validates that opening price is specified and positive.",
2802
+ },
2803
+ {
2804
+ validate: ({ data }) => {
2805
+ if (data.position !== "wait" &&
2806
+ (!data.price_stop_loss || data.price_stop_loss <= 0)) {
2807
+ throw new Error("When position='long' or 'short', the price_stop_loss field is required and must be positive");
2808
+ }
2809
+ },
2810
+ docDescription: "Validates that stop-loss is specified when opening a position.",
2811
+ },
2812
+ {
2813
+ validate: ({ data }) => {
2814
+ if (data.position !== "wait" &&
2815
+ (!data.price_take_profit || data.price_take_profit <= 0)) {
2816
+ throw new Error("When position='long' or 'short', the price_take_profit field is required and must be positive");
2817
+ }
2818
+ },
2819
+ docDescription: "Validates that take-profit is specified when opening a position.",
2820
+ },
2821
+ {
2822
+ validate: ({ data }) => {
2823
+ if (data.position === "long") {
2824
+ if (data.price_stop_loss >= data.price_open) {
2825
+ throw new Error("For LONG position, price_stop_loss must be below price_open");
2826
+ }
2827
+ if (data.price_take_profit <= data.price_open) {
2828
+ throw new Error("For LONG position, price_take_profit must be above price_open");
2829
+ }
2830
+ }
2831
+ },
2832
+ docDescription: "Validates price correctness for LONG position.",
2833
+ },
2834
+ {
2835
+ validate: ({ data }) => {
2836
+ if (data.position === "short") {
2837
+ if (data.price_stop_loss <= data.price_open) {
2838
+ throw new Error("For SHORT position, price_stop_loss must be above price_open");
2839
+ }
2840
+ if (data.price_take_profit >= data.price_open) {
2841
+ throw new Error("For SHORT position, price_take_profit must be below price_open");
2842
+ }
2843
+ }
2844
+ },
2845
+ docDescription: "Validates price correctness for SHORT position.",
2846
+ },
2847
+ {
2848
+ validate: ({ data }) => {
2849
+ if (data.position !== "wait" &&
2850
+ (!data.minute_estimated_time || data.minute_estimated_time <= 0)) {
2851
+ throw new Error("When position='long' or 'short', the minute_estimated_time field is required and must be positive");
2852
+ }
2853
+ },
2854
+ docDescription: "Validates that estimated time to TP is specified when opening a position.",
2855
+ },
2856
+ {
2857
+ validate: ({ data }) => {
2858
+ if (data.position !== "wait" && data.minute_estimated_time > 360) {
2859
+ throw new Error("Estimated time to reach TP exceeds 6 hours (360 minutes). Use position='wait' for low volatility conditions");
2860
+ }
2861
+ },
2862
+ docDescription: "Validates that estimated time to reach TP does not exceed 6 hours.",
2863
+ },
2864
+ ],
2865
+ });
2866
+
2867
+ agentSwarmKit.validate({
2868
+ CompletionName: CompletionName$1,
2869
+ OutlineName: OutlineName$1,
2870
+ });
2871
+
2872
+ const ollama = async (messages, model, apiKey) => {
2873
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.OllamaInference, model, apiKey);
2874
+ };
2875
+ const grok = async (messages, model, apiKey) => {
2876
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GrokInference, model, apiKey);
2877
+ };
2878
+ const hf = async (messages, model, apiKey) => {
2879
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.HfInference, model, apiKey);
2880
+ };
2881
+ const claude = async (messages, model, apiKey) => {
2882
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.ClaudeInference, model, apiKey);
2883
+ };
2884
+ const gpt5 = async (messages, model, apiKey) => {
2885
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.GPT5Inference, model, apiKey);
2886
+ };
2887
+ const deepseek = async (messages, model, apiKey) => {
2888
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.DeepseekInference, model, apiKey);
2889
+ };
2890
+ const mistral = async (messages, model, apiKey) => {
2891
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.MistralInference, model, apiKey);
2892
+ };
2893
+ const perplexity = async (messages, model, apiKey) => {
2894
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.PerplexityInference, model, apiKey);
2895
+ };
2896
+ const cohere = async (messages, model, apiKey) => {
2897
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.CohereInference, model, apiKey);
2898
+ };
2899
+ const alibaba = async (messages, model, apiKey) => {
2900
+ return await lib.outlinePublicService.getCompletion(messages, InferenceName$1.AlibabaInference, model, apiKey);
2901
+ };
2902
+
2903
+ const setLogger = (logger) => {
2904
+ lib.loggerService.setLogger(logger);
2905
+ };
2906
+
2907
+ exports.alibaba = alibaba;
2908
+ exports.claude = claude;
2909
+ exports.cohere = cohere;
2910
+ exports.deepseek = deepseek;
2911
+ exports.gpt5 = gpt5;
2912
+ exports.grok = grok;
2913
+ exports.hf = hf;
2914
+ exports.lib = engine;
2915
+ exports.mistral = mistral;
2916
+ exports.ollama = ollama;
2917
+ exports.perplexity = perplexity;
2918
+ exports.setLogger = setLogger;