@backtest-kit/ollama 3.0.6 → 3.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/index.cjs +359 -12
- package/build/index.mjs +359 -13
- package/package.json +16 -14
- package/types.d.ts +25 -2
package/build/index.cjs
CHANGED
|
@@ -20,6 +20,7 @@ var inference = require('@huggingface/inference');
|
|
|
20
20
|
var openai = require('@langchain/openai');
|
|
21
21
|
var lodashEs = require('lodash-es');
|
|
22
22
|
var ollama$1 = require('ollama');
|
|
23
|
+
var Groq = require('groq-sdk');
|
|
23
24
|
|
|
24
25
|
var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
|
|
25
26
|
/**
|
|
@@ -2405,6 +2406,8 @@ var InferenceName;
|
|
|
2405
2406
|
InferenceName["CohereInference"] = "cohere_inference";
|
|
2406
2407
|
/** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
|
|
2407
2408
|
InferenceName["AlibabaInference"] = "alibaba_inference";
|
|
2409
|
+
/** Groq provider (api.groq.com) */
|
|
2410
|
+
InferenceName["GroqInference"] = "groq_inference";
|
|
2408
2411
|
})(InferenceName || (InferenceName = {}));
|
|
2409
2412
|
var InferenceName$1 = InferenceName;
|
|
2410
2413
|
|
|
@@ -2766,7 +2769,7 @@ class GrokProvider {
|
|
|
2766
2769
|
/**
|
|
2767
2770
|
* Maximum number of retry attempts for outline completion.
|
|
2768
2771
|
*/
|
|
2769
|
-
const MAX_ATTEMPTS$
|
|
2772
|
+
const MAX_ATTEMPTS$6 = 5;
|
|
2770
2773
|
/**
|
|
2771
2774
|
* Custom ChatOpenAI implementation for HuggingFace with simplified token counting.
|
|
2772
2775
|
* Routes requests to HuggingFace Router endpoint.
|
|
@@ -3076,7 +3079,7 @@ class HfProvider {
|
|
|
3076
3079
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3077
3080
|
});
|
|
3078
3081
|
});
|
|
3079
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3082
|
+
while (attempt < MAX_ATTEMPTS$6) {
|
|
3080
3083
|
const { choices: [{ message }], } = await functoolsKit.fetchApi("https://router.huggingface.co/v1/chat/completions", {
|
|
3081
3084
|
method: "POST",
|
|
3082
3085
|
headers: {
|
|
@@ -3285,7 +3288,7 @@ const getOllama = functoolsKit.singleshot(() => {
|
|
|
3285
3288
|
/**
|
|
3286
3289
|
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
3287
3290
|
*/
|
|
3288
|
-
const MAX_ATTEMPTS$
|
|
3291
|
+
const MAX_ATTEMPTS$5 = 3;
|
|
3289
3292
|
/**
|
|
3290
3293
|
* Provider for Ollama LLM completions.
|
|
3291
3294
|
*
|
|
@@ -3575,7 +3578,7 @@ class OllamaProvider {
|
|
|
3575
3578
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3576
3579
|
});
|
|
3577
3580
|
});
|
|
3578
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3581
|
+
while (attempt < MAX_ATTEMPTS$5) {
|
|
3579
3582
|
const response = await ollama.chat({
|
|
3580
3583
|
model: this.contextService.context.model,
|
|
3581
3584
|
messages,
|
|
@@ -3675,7 +3678,7 @@ const getClaude = functoolsKit.singleshot(() => {
|
|
|
3675
3678
|
/**
|
|
3676
3679
|
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
3677
3680
|
*/
|
|
3678
|
-
const MAX_ATTEMPTS$
|
|
3681
|
+
const MAX_ATTEMPTS$4 = 5;
|
|
3679
3682
|
/**
|
|
3680
3683
|
* Provider for Anthropic Claude models via OpenAI-compatible API.
|
|
3681
3684
|
*
|
|
@@ -3911,7 +3914,7 @@ class ClaudeProvider {
|
|
|
3911
3914
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3912
3915
|
});
|
|
3913
3916
|
});
|
|
3914
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3917
|
+
while (attempt < MAX_ATTEMPTS$4) {
|
|
3915
3918
|
// Prepare request options
|
|
3916
3919
|
const requestOptions = {
|
|
3917
3920
|
model: this.contextService.context.model,
|
|
@@ -4369,7 +4372,7 @@ const getDeepseek = functoolsKit.singleshot(() => {
|
|
|
4369
4372
|
/**
|
|
4370
4373
|
* Maximum number of retry attempts for outline completion.
|
|
4371
4374
|
*/
|
|
4372
|
-
const MAX_ATTEMPTS$
|
|
4375
|
+
const MAX_ATTEMPTS$3 = 3;
|
|
4373
4376
|
/**
|
|
4374
4377
|
* Provider for Deepseek AI models via OpenAI-compatible API.
|
|
4375
4378
|
*
|
|
@@ -4584,7 +4587,7 @@ class DeepseekProvider {
|
|
|
4584
4587
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
4585
4588
|
});
|
|
4586
4589
|
});
|
|
4587
|
-
while (attempt < MAX_ATTEMPTS$
|
|
4590
|
+
while (attempt < MAX_ATTEMPTS$3) {
|
|
4588
4591
|
// Prepare request options
|
|
4589
4592
|
const requestOptions = {
|
|
4590
4593
|
model: this.contextService.context.model,
|
|
@@ -4691,7 +4694,7 @@ const getMistral = functoolsKit.singleshot(() => {
|
|
|
4691
4694
|
/**
|
|
4692
4695
|
* Maximum number of retry attempts for outline completion.
|
|
4693
4696
|
*/
|
|
4694
|
-
const MAX_ATTEMPTS$
|
|
4697
|
+
const MAX_ATTEMPTS$2 = 3;
|
|
4695
4698
|
/**
|
|
4696
4699
|
* Provider for Mistral AI models via OpenAI-compatible API.
|
|
4697
4700
|
*
|
|
@@ -4906,7 +4909,7 @@ class MistralProvider {
|
|
|
4906
4909
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
4907
4910
|
});
|
|
4908
4911
|
});
|
|
4909
|
-
while (attempt < MAX_ATTEMPTS$
|
|
4912
|
+
while (attempt < MAX_ATTEMPTS$2) {
|
|
4910
4913
|
// Prepare request options
|
|
4911
4914
|
const requestOptions = {
|
|
4912
4915
|
model: this.contextService.context.model,
|
|
@@ -5582,7 +5585,7 @@ class CohereProvider {
|
|
|
5582
5585
|
/**
|
|
5583
5586
|
* Maximum number of retry attempts for outline completion.
|
|
5584
5587
|
*/
|
|
5585
|
-
const MAX_ATTEMPTS = 3;
|
|
5588
|
+
const MAX_ATTEMPTS$1 = 3;
|
|
5586
5589
|
/**
|
|
5587
5590
|
* Alibaba Cloud DashScope API base URL.
|
|
5588
5591
|
*/
|
|
@@ -5831,7 +5834,7 @@ class AlibabaProvider {
|
|
|
5831
5834
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
5832
5835
|
});
|
|
5833
5836
|
});
|
|
5834
|
-
while (attempt < MAX_ATTEMPTS) {
|
|
5837
|
+
while (attempt < MAX_ATTEMPTS$1) {
|
|
5835
5838
|
// Prepare request body with enable_thinking parameter
|
|
5836
5839
|
const requestBody = {
|
|
5837
5840
|
model: this.contextService.context.model,
|
|
@@ -6303,6 +6306,316 @@ class GLM4Provider {
|
|
|
6303
6306
|
}
|
|
6304
6307
|
}
|
|
6305
6308
|
|
|
6309
|
+
/**
|
|
6310
|
+
* Creates and caches a Groq client for Groq API.
|
|
6311
|
+
*
|
|
6312
|
+
* Uses the official Groq SDK with default settings.
|
|
6313
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
6314
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
6315
|
+
*
|
|
6316
|
+
* @returns Groq client configured for Groq API
|
|
6317
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
6318
|
+
*
|
|
6319
|
+
* @example
|
|
6320
|
+
* ```typescript
|
|
6321
|
+
* import { getGroq } from "./config/groq";
|
|
6322
|
+
*
|
|
6323
|
+
* const client = getGroq();
|
|
6324
|
+
* const completion = await client.chat.completions.create({
|
|
6325
|
+
* model: "llama-3.3-70b-versatile",
|
|
6326
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
6327
|
+
* });
|
|
6328
|
+
* ```
|
|
6329
|
+
*/
|
|
6330
|
+
const getGroq = functoolsKit.singleshot(() => {
|
|
6331
|
+
const apiKey = engine$1.contextService.context.apiKey;
|
|
6332
|
+
if (Array.isArray(apiKey)) {
|
|
6333
|
+
getGroq.clear();
|
|
6334
|
+
throw new Error("Groq provider does not support token rotation");
|
|
6335
|
+
}
|
|
6336
|
+
return new Groq({
|
|
6337
|
+
apiKey: apiKey,
|
|
6338
|
+
});
|
|
6339
|
+
});
|
|
6340
|
+
|
|
6341
|
+
/**
|
|
6342
|
+
* Maximum number of retry attempts for outline completion.
|
|
6343
|
+
*/
|
|
6344
|
+
const MAX_ATTEMPTS = 3;
|
|
6345
|
+
/**
|
|
6346
|
+
* Provider for Groq AI models via Groq SDK.
|
|
6347
|
+
*
|
|
6348
|
+
* Supports Groq models through the official Groq SDK with tool calling.
|
|
6349
|
+
* Features simulated streaming and structured output via tool-based schema enforcement.
|
|
6350
|
+
*
|
|
6351
|
+
* Key features:
|
|
6352
|
+
* - Official Groq SDK
|
|
6353
|
+
* - Tool calling with conditional inclusion (only if tools present)
|
|
6354
|
+
* - Simulated streaming (returns complete response)
|
|
6355
|
+
* - Schema enforcement via tool calling with retry logic
|
|
6356
|
+
* - Debug logging support
|
|
6357
|
+
*
|
|
6358
|
+
* @example
|
|
6359
|
+
* ```typescript
|
|
6360
|
+
* const provider = new GroqProvider(contextService, logger);
|
|
6361
|
+
* const response = await provider.getCompletion({
|
|
6362
|
+
* agentName: "groq-assistant",
|
|
6363
|
+
* messages: [{ role: "user", content: "Explain transformers" }],
|
|
6364
|
+
* mode: "direct",
|
|
6365
|
+
* tools: [],
|
|
6366
|
+
* clientId: "client-001"
|
|
6367
|
+
* });
|
|
6368
|
+
* ```
|
|
6369
|
+
*/
|
|
6370
|
+
class GroqProvider {
|
|
6371
|
+
/**
|
|
6372
|
+
* Creates a new GroqProvider instance.
|
|
6373
|
+
*
|
|
6374
|
+
* @param contextService - Context service with model configuration
|
|
6375
|
+
* @param logger - Logger for operation tracking
|
|
6376
|
+
*/
|
|
6377
|
+
constructor(contextService, logger) {
|
|
6378
|
+
this.contextService = contextService;
|
|
6379
|
+
this.logger = logger;
|
|
6380
|
+
}
|
|
6381
|
+
/**
|
|
6382
|
+
* Performs standard completion request to Groq.
|
|
6383
|
+
*
|
|
6384
|
+
* @param params - Completion parameters
|
|
6385
|
+
* @returns Promise resolving to assistant's response
|
|
6386
|
+
*/
|
|
6387
|
+
async getCompletion(params) {
|
|
6388
|
+
const groq = getGroq();
|
|
6389
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
6390
|
+
this.logger.log("groqProvider getCompletion", {
|
|
6391
|
+
agentName,
|
|
6392
|
+
mode,
|
|
6393
|
+
clientId,
|
|
6394
|
+
context: this.contextService.context,
|
|
6395
|
+
});
|
|
6396
|
+
// Map raw messages to Groq format
|
|
6397
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6398
|
+
role,
|
|
6399
|
+
tool_call_id,
|
|
6400
|
+
content,
|
|
6401
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6402
|
+
...rest,
|
|
6403
|
+
function: {
|
|
6404
|
+
name: f.name,
|
|
6405
|
+
arguments: JSON.stringify(f.arguments),
|
|
6406
|
+
},
|
|
6407
|
+
})),
|
|
6408
|
+
}));
|
|
6409
|
+
const formattedTools = tools?.map(({ type, function: f }) => ({
|
|
6410
|
+
type: type,
|
|
6411
|
+
function: {
|
|
6412
|
+
name: f.name,
|
|
6413
|
+
parameters: f.parameters,
|
|
6414
|
+
},
|
|
6415
|
+
}));
|
|
6416
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await groq.chat.completions.create({
|
|
6417
|
+
model: this.contextService.context.model,
|
|
6418
|
+
messages: messages,
|
|
6419
|
+
tools: formattedTools?.length ? formattedTools : undefined,
|
|
6420
|
+
});
|
|
6421
|
+
const result = {
|
|
6422
|
+
content: content,
|
|
6423
|
+
mode,
|
|
6424
|
+
agentName,
|
|
6425
|
+
role,
|
|
6426
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6427
|
+
...rest,
|
|
6428
|
+
function: {
|
|
6429
|
+
name: f.name,
|
|
6430
|
+
arguments: JSON.parse(f.arguments),
|
|
6431
|
+
},
|
|
6432
|
+
})),
|
|
6433
|
+
};
|
|
6434
|
+
// Debug logging
|
|
6435
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6436
|
+
await fs.appendFile("./debug_groq_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6437
|
+
}
|
|
6438
|
+
return result;
|
|
6439
|
+
}
|
|
6440
|
+
/**
|
|
6441
|
+
* Performs simulated streaming completion.
|
|
6442
|
+
*
|
|
6443
|
+
* @param params - Completion parameters
|
|
6444
|
+
* @returns Promise resolving to complete response
|
|
6445
|
+
*/
|
|
6446
|
+
async getStreamCompletion(params) {
|
|
6447
|
+
const groq = getGroq();
|
|
6448
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
6449
|
+
this.logger.log("groqProvider getStreamCompletion", {
|
|
6450
|
+
agentName,
|
|
6451
|
+
mode,
|
|
6452
|
+
clientId,
|
|
6453
|
+
context: this.contextService.context,
|
|
6454
|
+
});
|
|
6455
|
+
// Map raw messages to Groq format
|
|
6456
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6457
|
+
role,
|
|
6458
|
+
tool_call_id,
|
|
6459
|
+
content,
|
|
6460
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6461
|
+
...rest,
|
|
6462
|
+
function: {
|
|
6463
|
+
name: f.name,
|
|
6464
|
+
arguments: JSON.stringify(f.arguments),
|
|
6465
|
+
},
|
|
6466
|
+
})),
|
|
6467
|
+
}));
|
|
6468
|
+
// Map tools to Groq format
|
|
6469
|
+
const formattedTools = tools?.map(({ type, function: f }) => ({
|
|
6470
|
+
type: type,
|
|
6471
|
+
function: {
|
|
6472
|
+
name: f.name,
|
|
6473
|
+
parameters: f.parameters,
|
|
6474
|
+
},
|
|
6475
|
+
}));
|
|
6476
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await groq.chat.completions.create({
|
|
6477
|
+
model: this.contextService.context.model,
|
|
6478
|
+
messages: messages,
|
|
6479
|
+
tools: formattedTools?.length ? formattedTools : undefined,
|
|
6480
|
+
});
|
|
6481
|
+
// Emit events to mimic streaming behavior
|
|
6482
|
+
if (content) {
|
|
6483
|
+
await agentSwarmKit.event(clientId, "llm-completion", {
|
|
6484
|
+
content: content.trim(),
|
|
6485
|
+
agentName,
|
|
6486
|
+
});
|
|
6487
|
+
}
|
|
6488
|
+
const result = {
|
|
6489
|
+
content: content || "",
|
|
6490
|
+
mode,
|
|
6491
|
+
agentName,
|
|
6492
|
+
role,
|
|
6493
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6494
|
+
...rest,
|
|
6495
|
+
function: {
|
|
6496
|
+
name: f.name,
|
|
6497
|
+
arguments: JSON.parse(f.arguments),
|
|
6498
|
+
},
|
|
6499
|
+
})),
|
|
6500
|
+
};
|
|
6501
|
+
// Debug logging
|
|
6502
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6503
|
+
await fs.appendFile("./debug_groq_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6504
|
+
}
|
|
6505
|
+
return result;
|
|
6506
|
+
}
|
|
6507
|
+
/**
|
|
6508
|
+
* Performs structured output completion with schema validation.
|
|
6509
|
+
*
|
|
6510
|
+
* @param params - Outline completion parameters
|
|
6511
|
+
* @returns Promise resolving to validated JSON string
|
|
6512
|
+
* @throws Error if model fails after MAX_ATTEMPTS
|
|
6513
|
+
*/
|
|
6514
|
+
async getOutlineCompletion(params) {
|
|
6515
|
+
const { messages: rawMessages, format } = params;
|
|
6516
|
+
const groq = getGroq();
|
|
6517
|
+
this.logger.log("groqProvider getOutlineCompletion", {
|
|
6518
|
+
context: this.contextService.context,
|
|
6519
|
+
});
|
|
6520
|
+
// Create tool definition based on format schema
|
|
6521
|
+
const schema = "json_schema" in format
|
|
6522
|
+
? lodashEs.get(format, "json_schema.schema", format)
|
|
6523
|
+
: format;
|
|
6524
|
+
const toolDefinition = {
|
|
6525
|
+
type: "function",
|
|
6526
|
+
function: {
|
|
6527
|
+
name: "provide_answer",
|
|
6528
|
+
description: "Предоставить ответ в требуемом формате",
|
|
6529
|
+
parameters: schema,
|
|
6530
|
+
},
|
|
6531
|
+
};
|
|
6532
|
+
// Add system instruction for tool usage
|
|
6533
|
+
const systemMessage = {
|
|
6534
|
+
role: "system",
|
|
6535
|
+
content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
|
|
6536
|
+
};
|
|
6537
|
+
const messages = [
|
|
6538
|
+
systemMessage,
|
|
6539
|
+
...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6540
|
+
role,
|
|
6541
|
+
tool_call_id,
|
|
6542
|
+
content,
|
|
6543
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6544
|
+
...rest,
|
|
6545
|
+
function: {
|
|
6546
|
+
name: f.name,
|
|
6547
|
+
arguments: JSON.stringify(f.arguments),
|
|
6548
|
+
},
|
|
6549
|
+
})),
|
|
6550
|
+
})),
|
|
6551
|
+
];
|
|
6552
|
+
let attempt = 0;
|
|
6553
|
+
const addToolRequestMessage = functoolsKit.singleshot(() => {
|
|
6554
|
+
messages.push({
|
|
6555
|
+
role: "user",
|
|
6556
|
+
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
6557
|
+
});
|
|
6558
|
+
});
|
|
6559
|
+
while (attempt < MAX_ATTEMPTS) {
|
|
6560
|
+
// Prepare request options
|
|
6561
|
+
const requestOptions = {
|
|
6562
|
+
model: this.contextService.context.model,
|
|
6563
|
+
messages: messages,
|
|
6564
|
+
tools: [toolDefinition],
|
|
6565
|
+
tool_choice: {
|
|
6566
|
+
type: "function",
|
|
6567
|
+
function: { name: "provide_answer" },
|
|
6568
|
+
},
|
|
6569
|
+
};
|
|
6570
|
+
const { choices: [{ message }], } = await groq.chat.completions.create(requestOptions);
|
|
6571
|
+
const { tool_calls } = message;
|
|
6572
|
+
if (!tool_calls?.length) {
|
|
6573
|
+
console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
|
|
6574
|
+
addToolRequestMessage();
|
|
6575
|
+
attempt++;
|
|
6576
|
+
continue;
|
|
6577
|
+
}
|
|
6578
|
+
if (tool_calls && tool_calls.length > 0) {
|
|
6579
|
+
const toolCall = tool_calls[0];
|
|
6580
|
+
if (toolCall.function?.name === "provide_answer") {
|
|
6581
|
+
// Parse JSON with repair
|
|
6582
|
+
let parsedArguments;
|
|
6583
|
+
try {
|
|
6584
|
+
const json = jsonrepair.jsonrepair(toolCall.function.arguments);
|
|
6585
|
+
parsedArguments = JSON.parse(json);
|
|
6586
|
+
}
|
|
6587
|
+
catch (error) {
|
|
6588
|
+
console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
|
|
6589
|
+
addToolRequestMessage();
|
|
6590
|
+
attempt++;
|
|
6591
|
+
continue;
|
|
6592
|
+
}
|
|
6593
|
+
const validation = agentSwarmKit.validateToolArguments(parsedArguments, schema);
|
|
6594
|
+
if (!validation.success) {
|
|
6595
|
+
console.error(`Attempt ${attempt + 1}: ${validation.error}`);
|
|
6596
|
+
addToolRequestMessage();
|
|
6597
|
+
attempt++;
|
|
6598
|
+
continue;
|
|
6599
|
+
}
|
|
6600
|
+
lodashEs.set(validation.data, "_context", this.contextService.context);
|
|
6601
|
+
const result = {
|
|
6602
|
+
role: "assistant",
|
|
6603
|
+
content: JSON.stringify(validation.data),
|
|
6604
|
+
};
|
|
6605
|
+
// Debug logging
|
|
6606
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6607
|
+
await fs.appendFile("./debug_groq_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6608
|
+
}
|
|
6609
|
+
return result;
|
|
6610
|
+
}
|
|
6611
|
+
}
|
|
6612
|
+
console.error(`Attempt ${attempt + 1}: Model send refusal`);
|
|
6613
|
+
attempt++;
|
|
6614
|
+
}
|
|
6615
|
+
throw new Error("Model failed to use tool after maximum attempts");
|
|
6616
|
+
}
|
|
6617
|
+
}
|
|
6618
|
+
|
|
6306
6619
|
/**
|
|
6307
6620
|
* Main library entry point for the Ollama package.
|
|
6308
6621
|
*
|
|
@@ -6418,6 +6731,7 @@ init();
|
|
|
6418
6731
|
engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
|
|
6419
6732
|
engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
|
|
6420
6733
|
engine.runnerPrivateService.registerRunner(InferenceName.GLM4Inference, GLM4Provider);
|
|
6734
|
+
engine.runnerPrivateService.registerRunner(InferenceName.GroqInference, GroqProvider);
|
|
6421
6735
|
}
|
|
6422
6736
|
// Make engine globally accessible for debugging
|
|
6423
6737
|
Object.assign(globalThis, { engine });
|
|
@@ -6890,6 +7204,38 @@ const alibaba = (fn, model, apiKey) => {
|
|
|
6890
7204
|
* const result = await wrappedFn(args);
|
|
6891
7205
|
* ```
|
|
6892
7206
|
*/
|
|
7207
|
+
/**
|
|
7208
|
+
* Wrap async function with Groq inference context.
|
|
7209
|
+
*
|
|
7210
|
+
* Creates a higher-order function that executes the provided async function
|
|
7211
|
+
* within a Groq inference context.
|
|
7212
|
+
*
|
|
7213
|
+
* @template T - Async function type
|
|
7214
|
+
* @param fn - Async function to wrap
|
|
7215
|
+
* @param model - Groq model name (e.g., "llama-3.3-70b-versatile")
|
|
7216
|
+
* @param apiKey - Single API key or array of keys
|
|
7217
|
+
* @returns Wrapped function with same signature as input
|
|
7218
|
+
*
|
|
7219
|
+
* @example
|
|
7220
|
+
* ```typescript
|
|
7221
|
+
* import { groq } from '@backtest-kit/ollama';
|
|
7222
|
+
*
|
|
7223
|
+
* const wrappedFn = groq(myAsyncFn, 'llama-3.3-70b-versatile', process.env.GROQ_API_KEY);
|
|
7224
|
+
* const result = await wrappedFn(args);
|
|
7225
|
+
* ```
|
|
7226
|
+
*/
|
|
7227
|
+
const groq = (fn, model, apiKey) => {
|
|
7228
|
+
const wrappedFn = async (args) => {
|
|
7229
|
+
return await ContextService.runInContext(async () => {
|
|
7230
|
+
return await fn(...args);
|
|
7231
|
+
}, {
|
|
7232
|
+
apiKey,
|
|
7233
|
+
inference: InferenceName$1.GroqInference,
|
|
7234
|
+
model,
|
|
7235
|
+
});
|
|
7236
|
+
};
|
|
7237
|
+
return wrappedFn;
|
|
7238
|
+
};
|
|
6893
7239
|
const glm4 = (fn, model, apiKey) => {
|
|
6894
7240
|
const wrappedFn = async (args) => {
|
|
6895
7241
|
return await ContextService.runInContext(async () => {
|
|
@@ -7483,6 +7829,7 @@ exports.getOptimizerSchema = getOptimizerSchema;
|
|
|
7483
7829
|
exports.glm4 = glm4;
|
|
7484
7830
|
exports.gpt5 = gpt5;
|
|
7485
7831
|
exports.grok = grok;
|
|
7832
|
+
exports.groq = groq;
|
|
7486
7833
|
exports.hf = hf;
|
|
7487
7834
|
exports.lib = engine;
|
|
7488
7835
|
exports.listOptimizerSchema = listOptimizerSchema;
|
package/build/index.mjs
CHANGED
|
@@ -18,6 +18,7 @@ import { InferenceClient } from '@huggingface/inference';
|
|
|
18
18
|
import { ChatOpenAI } from '@langchain/openai';
|
|
19
19
|
import { get, set } from 'lodash-es';
|
|
20
20
|
import { Ollama } from 'ollama';
|
|
21
|
+
import Groq from 'groq-sdk';
|
|
21
22
|
|
|
22
23
|
/**
|
|
23
24
|
* Enumeration of completion strategy types.
|
|
@@ -2402,6 +2403,8 @@ var InferenceName;
|
|
|
2402
2403
|
InferenceName["CohereInference"] = "cohere_inference";
|
|
2403
2404
|
/** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
|
|
2404
2405
|
InferenceName["AlibabaInference"] = "alibaba_inference";
|
|
2406
|
+
/** Groq provider (api.groq.com) */
|
|
2407
|
+
InferenceName["GroqInference"] = "groq_inference";
|
|
2405
2408
|
})(InferenceName || (InferenceName = {}));
|
|
2406
2409
|
var InferenceName$1 = InferenceName;
|
|
2407
2410
|
|
|
@@ -2763,7 +2766,7 @@ class GrokProvider {
|
|
|
2763
2766
|
/**
|
|
2764
2767
|
* Maximum number of retry attempts for outline completion.
|
|
2765
2768
|
*/
|
|
2766
|
-
const MAX_ATTEMPTS$
|
|
2769
|
+
const MAX_ATTEMPTS$6 = 5;
|
|
2767
2770
|
/**
|
|
2768
2771
|
* Custom ChatOpenAI implementation for HuggingFace with simplified token counting.
|
|
2769
2772
|
* Routes requests to HuggingFace Router endpoint.
|
|
@@ -3073,7 +3076,7 @@ class HfProvider {
|
|
|
3073
3076
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3074
3077
|
});
|
|
3075
3078
|
});
|
|
3076
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3079
|
+
while (attempt < MAX_ATTEMPTS$6) {
|
|
3077
3080
|
const { choices: [{ message }], } = await fetchApi("https://router.huggingface.co/v1/chat/completions", {
|
|
3078
3081
|
method: "POST",
|
|
3079
3082
|
headers: {
|
|
@@ -3282,7 +3285,7 @@ const getOllama = singleshot(() => {
|
|
|
3282
3285
|
/**
|
|
3283
3286
|
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
3284
3287
|
*/
|
|
3285
|
-
const MAX_ATTEMPTS$
|
|
3288
|
+
const MAX_ATTEMPTS$5 = 3;
|
|
3286
3289
|
/**
|
|
3287
3290
|
* Provider for Ollama LLM completions.
|
|
3288
3291
|
*
|
|
@@ -3572,7 +3575,7 @@ class OllamaProvider {
|
|
|
3572
3575
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3573
3576
|
});
|
|
3574
3577
|
});
|
|
3575
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3578
|
+
while (attempt < MAX_ATTEMPTS$5) {
|
|
3576
3579
|
const response = await ollama.chat({
|
|
3577
3580
|
model: this.contextService.context.model,
|
|
3578
3581
|
messages,
|
|
@@ -3672,7 +3675,7 @@ const getClaude = singleshot(() => {
|
|
|
3672
3675
|
/**
|
|
3673
3676
|
* Maximum number of retry attempts for outline completion when model fails to use tools correctly.
|
|
3674
3677
|
*/
|
|
3675
|
-
const MAX_ATTEMPTS$
|
|
3678
|
+
const MAX_ATTEMPTS$4 = 5;
|
|
3676
3679
|
/**
|
|
3677
3680
|
* Provider for Anthropic Claude models via OpenAI-compatible API.
|
|
3678
3681
|
*
|
|
@@ -3908,7 +3911,7 @@ class ClaudeProvider {
|
|
|
3908
3911
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
3909
3912
|
});
|
|
3910
3913
|
});
|
|
3911
|
-
while (attempt < MAX_ATTEMPTS$
|
|
3914
|
+
while (attempt < MAX_ATTEMPTS$4) {
|
|
3912
3915
|
// Prepare request options
|
|
3913
3916
|
const requestOptions = {
|
|
3914
3917
|
model: this.contextService.context.model,
|
|
@@ -4366,7 +4369,7 @@ const getDeepseek = singleshot(() => {
|
|
|
4366
4369
|
/**
|
|
4367
4370
|
* Maximum number of retry attempts for outline completion.
|
|
4368
4371
|
*/
|
|
4369
|
-
const MAX_ATTEMPTS$
|
|
4372
|
+
const MAX_ATTEMPTS$3 = 3;
|
|
4370
4373
|
/**
|
|
4371
4374
|
* Provider for Deepseek AI models via OpenAI-compatible API.
|
|
4372
4375
|
*
|
|
@@ -4581,7 +4584,7 @@ class DeepseekProvider {
|
|
|
4581
4584
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
4582
4585
|
});
|
|
4583
4586
|
});
|
|
4584
|
-
while (attempt < MAX_ATTEMPTS$
|
|
4587
|
+
while (attempt < MAX_ATTEMPTS$3) {
|
|
4585
4588
|
// Prepare request options
|
|
4586
4589
|
const requestOptions = {
|
|
4587
4590
|
model: this.contextService.context.model,
|
|
@@ -4688,7 +4691,7 @@ const getMistral = singleshot(() => {
|
|
|
4688
4691
|
/**
|
|
4689
4692
|
* Maximum number of retry attempts for outline completion.
|
|
4690
4693
|
*/
|
|
4691
|
-
const MAX_ATTEMPTS$
|
|
4694
|
+
const MAX_ATTEMPTS$2 = 3;
|
|
4692
4695
|
/**
|
|
4693
4696
|
* Provider for Mistral AI models via OpenAI-compatible API.
|
|
4694
4697
|
*
|
|
@@ -4903,7 +4906,7 @@ class MistralProvider {
|
|
|
4903
4906
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
4904
4907
|
});
|
|
4905
4908
|
});
|
|
4906
|
-
while (attempt < MAX_ATTEMPTS$
|
|
4909
|
+
while (attempt < MAX_ATTEMPTS$2) {
|
|
4907
4910
|
// Prepare request options
|
|
4908
4911
|
const requestOptions = {
|
|
4909
4912
|
model: this.contextService.context.model,
|
|
@@ -5579,7 +5582,7 @@ class CohereProvider {
|
|
|
5579
5582
|
/**
|
|
5580
5583
|
* Maximum number of retry attempts for outline completion.
|
|
5581
5584
|
*/
|
|
5582
|
-
const MAX_ATTEMPTS = 3;
|
|
5585
|
+
const MAX_ATTEMPTS$1 = 3;
|
|
5583
5586
|
/**
|
|
5584
5587
|
* Alibaba Cloud DashScope API base URL.
|
|
5585
5588
|
*/
|
|
@@ -5828,7 +5831,7 @@ class AlibabaProvider {
|
|
|
5828
5831
|
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
5829
5832
|
});
|
|
5830
5833
|
});
|
|
5831
|
-
while (attempt < MAX_ATTEMPTS) {
|
|
5834
|
+
while (attempt < MAX_ATTEMPTS$1) {
|
|
5832
5835
|
// Prepare request body with enable_thinking parameter
|
|
5833
5836
|
const requestBody = {
|
|
5834
5837
|
model: this.contextService.context.model,
|
|
@@ -6300,6 +6303,316 @@ class GLM4Provider {
|
|
|
6300
6303
|
}
|
|
6301
6304
|
}
|
|
6302
6305
|
|
|
6306
|
+
/**
|
|
6307
|
+
* Creates and caches a Groq client for Groq API.
|
|
6308
|
+
*
|
|
6309
|
+
* Uses the official Groq SDK with default settings.
|
|
6310
|
+
* The client instance is cached using singleshot memoization for performance.
|
|
6311
|
+
* Token rotation is not supported - throws error if array of keys is provided.
|
|
6312
|
+
*
|
|
6313
|
+
* @returns Groq client configured for Groq API
|
|
6314
|
+
* @throws Error if API key array is provided (token rotation not supported)
|
|
6315
|
+
*
|
|
6316
|
+
* @example
|
|
6317
|
+
* ```typescript
|
|
6318
|
+
* import { getGroq } from "./config/groq";
|
|
6319
|
+
*
|
|
6320
|
+
* const client = getGroq();
|
|
6321
|
+
* const completion = await client.chat.completions.create({
|
|
6322
|
+
* model: "llama-3.3-70b-versatile",
|
|
6323
|
+
* messages: [{ role: "user", content: "Hello" }]
|
|
6324
|
+
* });
|
|
6325
|
+
* ```
|
|
6326
|
+
*/
|
|
6327
|
+
const getGroq = singleshot(() => {
|
|
6328
|
+
const apiKey = engine$1.contextService.context.apiKey;
|
|
6329
|
+
if (Array.isArray(apiKey)) {
|
|
6330
|
+
getGroq.clear();
|
|
6331
|
+
throw new Error("Groq provider does not support token rotation");
|
|
6332
|
+
}
|
|
6333
|
+
return new Groq({
|
|
6334
|
+
apiKey: apiKey,
|
|
6335
|
+
});
|
|
6336
|
+
});
|
|
6337
|
+
|
|
6338
|
+
/**
|
|
6339
|
+
* Maximum number of retry attempts for outline completion.
|
|
6340
|
+
*/
|
|
6341
|
+
const MAX_ATTEMPTS = 3;
|
|
6342
|
+
/**
|
|
6343
|
+
* Provider for Groq AI models via Groq SDK.
|
|
6344
|
+
*
|
|
6345
|
+
* Supports Groq models through the official Groq SDK with tool calling.
|
|
6346
|
+
* Features simulated streaming and structured output via tool-based schema enforcement.
|
|
6347
|
+
*
|
|
6348
|
+
* Key features:
|
|
6349
|
+
* - Official Groq SDK
|
|
6350
|
+
* - Tool calling with conditional inclusion (only if tools present)
|
|
6351
|
+
* - Simulated streaming (returns complete response)
|
|
6352
|
+
* - Schema enforcement via tool calling with retry logic
|
|
6353
|
+
* - Debug logging support
|
|
6354
|
+
*
|
|
6355
|
+
* @example
|
|
6356
|
+
* ```typescript
|
|
6357
|
+
* const provider = new GroqProvider(contextService, logger);
|
|
6358
|
+
* const response = await provider.getCompletion({
|
|
6359
|
+
* agentName: "groq-assistant",
|
|
6360
|
+
* messages: [{ role: "user", content: "Explain transformers" }],
|
|
6361
|
+
* mode: "direct",
|
|
6362
|
+
* tools: [],
|
|
6363
|
+
* clientId: "client-001"
|
|
6364
|
+
* });
|
|
6365
|
+
* ```
|
|
6366
|
+
*/
|
|
6367
|
+
class GroqProvider {
|
|
6368
|
+
/**
|
|
6369
|
+
* Creates a new GroqProvider instance.
|
|
6370
|
+
*
|
|
6371
|
+
* @param contextService - Context service with model configuration
|
|
6372
|
+
* @param logger - Logger for operation tracking
|
|
6373
|
+
*/
|
|
6374
|
+
constructor(contextService, logger) {
|
|
6375
|
+
this.contextService = contextService;
|
|
6376
|
+
this.logger = logger;
|
|
6377
|
+
}
|
|
6378
|
+
/**
|
|
6379
|
+
* Performs standard completion request to Groq.
|
|
6380
|
+
*
|
|
6381
|
+
* @param params - Completion parameters
|
|
6382
|
+
* @returns Promise resolving to assistant's response
|
|
6383
|
+
*/
|
|
6384
|
+
async getCompletion(params) {
|
|
6385
|
+
const groq = getGroq();
|
|
6386
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
6387
|
+
this.logger.log("groqProvider getCompletion", {
|
|
6388
|
+
agentName,
|
|
6389
|
+
mode,
|
|
6390
|
+
clientId,
|
|
6391
|
+
context: this.contextService.context,
|
|
6392
|
+
});
|
|
6393
|
+
// Map raw messages to Groq format
|
|
6394
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6395
|
+
role,
|
|
6396
|
+
tool_call_id,
|
|
6397
|
+
content,
|
|
6398
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6399
|
+
...rest,
|
|
6400
|
+
function: {
|
|
6401
|
+
name: f.name,
|
|
6402
|
+
arguments: JSON.stringify(f.arguments),
|
|
6403
|
+
},
|
|
6404
|
+
})),
|
|
6405
|
+
}));
|
|
6406
|
+
const formattedTools = tools?.map(({ type, function: f }) => ({
|
|
6407
|
+
type: type,
|
|
6408
|
+
function: {
|
|
6409
|
+
name: f.name,
|
|
6410
|
+
parameters: f.parameters,
|
|
6411
|
+
},
|
|
6412
|
+
}));
|
|
6413
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await groq.chat.completions.create({
|
|
6414
|
+
model: this.contextService.context.model,
|
|
6415
|
+
messages: messages,
|
|
6416
|
+
tools: formattedTools?.length ? formattedTools : undefined,
|
|
6417
|
+
});
|
|
6418
|
+
const result = {
|
|
6419
|
+
content: content,
|
|
6420
|
+
mode,
|
|
6421
|
+
agentName,
|
|
6422
|
+
role,
|
|
6423
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6424
|
+
...rest,
|
|
6425
|
+
function: {
|
|
6426
|
+
name: f.name,
|
|
6427
|
+
arguments: JSON.parse(f.arguments),
|
|
6428
|
+
},
|
|
6429
|
+
})),
|
|
6430
|
+
};
|
|
6431
|
+
// Debug logging
|
|
6432
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6433
|
+
await fs.appendFile("./debug_groq_provider.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6434
|
+
}
|
|
6435
|
+
return result;
|
|
6436
|
+
}
|
|
6437
|
+
/**
|
|
6438
|
+
* Performs simulated streaming completion.
|
|
6439
|
+
*
|
|
6440
|
+
* @param params - Completion parameters
|
|
6441
|
+
* @returns Promise resolving to complete response
|
|
6442
|
+
*/
|
|
6443
|
+
async getStreamCompletion(params) {
|
|
6444
|
+
const groq = getGroq();
|
|
6445
|
+
const { clientId, agentName, messages: rawMessages, mode, tools } = params;
|
|
6446
|
+
this.logger.log("groqProvider getStreamCompletion", {
|
|
6447
|
+
agentName,
|
|
6448
|
+
mode,
|
|
6449
|
+
clientId,
|
|
6450
|
+
context: this.contextService.context,
|
|
6451
|
+
});
|
|
6452
|
+
// Map raw messages to Groq format
|
|
6453
|
+
const messages = rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6454
|
+
role,
|
|
6455
|
+
tool_call_id,
|
|
6456
|
+
content,
|
|
6457
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6458
|
+
...rest,
|
|
6459
|
+
function: {
|
|
6460
|
+
name: f.name,
|
|
6461
|
+
arguments: JSON.stringify(f.arguments),
|
|
6462
|
+
},
|
|
6463
|
+
})),
|
|
6464
|
+
}));
|
|
6465
|
+
// Map tools to Groq format
|
|
6466
|
+
const formattedTools = tools?.map(({ type, function: f }) => ({
|
|
6467
|
+
type: type,
|
|
6468
|
+
function: {
|
|
6469
|
+
name: f.name,
|
|
6470
|
+
parameters: f.parameters,
|
|
6471
|
+
},
|
|
6472
|
+
}));
|
|
6473
|
+
const { choices: [{ message: { content, role, tool_calls }, },], } = await groq.chat.completions.create({
|
|
6474
|
+
model: this.contextService.context.model,
|
|
6475
|
+
messages: messages,
|
|
6476
|
+
tools: formattedTools?.length ? formattedTools : undefined,
|
|
6477
|
+
});
|
|
6478
|
+
// Emit events to mimic streaming behavior
|
|
6479
|
+
if (content) {
|
|
6480
|
+
await event(clientId, "llm-completion", {
|
|
6481
|
+
content: content.trim(),
|
|
6482
|
+
agentName,
|
|
6483
|
+
});
|
|
6484
|
+
}
|
|
6485
|
+
const result = {
|
|
6486
|
+
content: content || "",
|
|
6487
|
+
mode,
|
|
6488
|
+
agentName,
|
|
6489
|
+
role,
|
|
6490
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6491
|
+
...rest,
|
|
6492
|
+
function: {
|
|
6493
|
+
name: f.name,
|
|
6494
|
+
arguments: JSON.parse(f.arguments),
|
|
6495
|
+
},
|
|
6496
|
+
})),
|
|
6497
|
+
};
|
|
6498
|
+
// Debug logging
|
|
6499
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6500
|
+
await fs.appendFile("./debug_groq_provider_stream.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6501
|
+
}
|
|
6502
|
+
return result;
|
|
6503
|
+
}
|
|
6504
|
+
/**
|
|
6505
|
+
* Performs structured output completion with schema validation.
|
|
6506
|
+
*
|
|
6507
|
+
* @param params - Outline completion parameters
|
|
6508
|
+
* @returns Promise resolving to validated JSON string
|
|
6509
|
+
* @throws Error if model fails after MAX_ATTEMPTS
|
|
6510
|
+
*/
|
|
6511
|
+
async getOutlineCompletion(params) {
|
|
6512
|
+
const { messages: rawMessages, format } = params;
|
|
6513
|
+
const groq = getGroq();
|
|
6514
|
+
this.logger.log("groqProvider getOutlineCompletion", {
|
|
6515
|
+
context: this.contextService.context,
|
|
6516
|
+
});
|
|
6517
|
+
// Create tool definition based on format schema
|
|
6518
|
+
const schema = "json_schema" in format
|
|
6519
|
+
? get(format, "json_schema.schema", format)
|
|
6520
|
+
: format;
|
|
6521
|
+
const toolDefinition = {
|
|
6522
|
+
type: "function",
|
|
6523
|
+
function: {
|
|
6524
|
+
name: "provide_answer",
|
|
6525
|
+
description: "Предоставить ответ в требуемом формате",
|
|
6526
|
+
parameters: schema,
|
|
6527
|
+
},
|
|
6528
|
+
};
|
|
6529
|
+
// Add system instruction for tool usage
|
|
6530
|
+
const systemMessage = {
|
|
6531
|
+
role: "system",
|
|
6532
|
+
content: "ОБЯЗАТЕЛЬНО используй инструмент provide_answer для предоставления ответа. НЕ отвечай обычным текстом. ВСЕГДА вызывай инструмент provide_answer с правильными параметрами.",
|
|
6533
|
+
};
|
|
6534
|
+
const messages = [
|
|
6535
|
+
systemMessage,
|
|
6536
|
+
...rawMessages.map(({ role, tool_call_id, tool_calls, content }) => ({
|
|
6537
|
+
role,
|
|
6538
|
+
tool_call_id,
|
|
6539
|
+
content,
|
|
6540
|
+
tool_calls: tool_calls?.map(({ function: f, ...rest }) => ({
|
|
6541
|
+
...rest,
|
|
6542
|
+
function: {
|
|
6543
|
+
name: f.name,
|
|
6544
|
+
arguments: JSON.stringify(f.arguments),
|
|
6545
|
+
},
|
|
6546
|
+
})),
|
|
6547
|
+
})),
|
|
6548
|
+
];
|
|
6549
|
+
let attempt = 0;
|
|
6550
|
+
const addToolRequestMessage = singleshot(() => {
|
|
6551
|
+
messages.push({
|
|
6552
|
+
role: "user",
|
|
6553
|
+
content: "Пожалуйста, используй инструмент provide_answer для предоставления ответа. Не отвечай обычным текстом.",
|
|
6554
|
+
});
|
|
6555
|
+
});
|
|
6556
|
+
while (attempt < MAX_ATTEMPTS) {
|
|
6557
|
+
// Prepare request options
|
|
6558
|
+
const requestOptions = {
|
|
6559
|
+
model: this.contextService.context.model,
|
|
6560
|
+
messages: messages,
|
|
6561
|
+
tools: [toolDefinition],
|
|
6562
|
+
tool_choice: {
|
|
6563
|
+
type: "function",
|
|
6564
|
+
function: { name: "provide_answer" },
|
|
6565
|
+
},
|
|
6566
|
+
};
|
|
6567
|
+
const { choices: [{ message }], } = await groq.chat.completions.create(requestOptions);
|
|
6568
|
+
const { tool_calls } = message;
|
|
6569
|
+
if (!tool_calls?.length) {
|
|
6570
|
+
console.error(`Attempt ${attempt + 1}: Model did not use tool, adding user message`);
|
|
6571
|
+
addToolRequestMessage();
|
|
6572
|
+
attempt++;
|
|
6573
|
+
continue;
|
|
6574
|
+
}
|
|
6575
|
+
if (tool_calls && tool_calls.length > 0) {
|
|
6576
|
+
const toolCall = tool_calls[0];
|
|
6577
|
+
if (toolCall.function?.name === "provide_answer") {
|
|
6578
|
+
// Parse JSON with repair
|
|
6579
|
+
let parsedArguments;
|
|
6580
|
+
try {
|
|
6581
|
+
const json = jsonrepair(toolCall.function.arguments);
|
|
6582
|
+
parsedArguments = JSON.parse(json);
|
|
6583
|
+
}
|
|
6584
|
+
catch (error) {
|
|
6585
|
+
console.error(`Attempt ${attempt + 1}: Failed to parse tool arguments:`, error);
|
|
6586
|
+
addToolRequestMessage();
|
|
6587
|
+
attempt++;
|
|
6588
|
+
continue;
|
|
6589
|
+
}
|
|
6590
|
+
const validation = validateToolArguments(parsedArguments, schema);
|
|
6591
|
+
if (!validation.success) {
|
|
6592
|
+
console.error(`Attempt ${attempt + 1}: ${validation.error}`);
|
|
6593
|
+
addToolRequestMessage();
|
|
6594
|
+
attempt++;
|
|
6595
|
+
continue;
|
|
6596
|
+
}
|
|
6597
|
+
set(validation.data, "_context", this.contextService.context);
|
|
6598
|
+
const result = {
|
|
6599
|
+
role: "assistant",
|
|
6600
|
+
content: JSON.stringify(validation.data),
|
|
6601
|
+
};
|
|
6602
|
+
// Debug logging
|
|
6603
|
+
if (GLOBAL_CONFIG.CC_ENABLE_DEBUG) {
|
|
6604
|
+
await fs.appendFile("./debug_groq_provider_outline.txt", JSON.stringify({ params, answer: result }, null, 2) + "\n\n");
|
|
6605
|
+
}
|
|
6606
|
+
return result;
|
|
6607
|
+
}
|
|
6608
|
+
}
|
|
6609
|
+
console.error(`Attempt ${attempt + 1}: Model send refusal`);
|
|
6610
|
+
attempt++;
|
|
6611
|
+
}
|
|
6612
|
+
throw new Error("Model failed to use tool after maximum attempts");
|
|
6613
|
+
}
|
|
6614
|
+
}
|
|
6615
|
+
|
|
6303
6616
|
/**
|
|
6304
6617
|
* Main library entry point for the Ollama package.
|
|
6305
6618
|
*
|
|
@@ -6415,6 +6728,7 @@ init();
|
|
|
6415
6728
|
engine.runnerPrivateService.registerRunner(InferenceName.CohereInference, CohereProvider);
|
|
6416
6729
|
engine.runnerPrivateService.registerRunner(InferenceName.AlibabaInference, AlibabaProvider);
|
|
6417
6730
|
engine.runnerPrivateService.registerRunner(InferenceName.GLM4Inference, GLM4Provider);
|
|
6731
|
+
engine.runnerPrivateService.registerRunner(InferenceName.GroqInference, GroqProvider);
|
|
6418
6732
|
}
|
|
6419
6733
|
// Make engine globally accessible for debugging
|
|
6420
6734
|
Object.assign(globalThis, { engine });
|
|
@@ -6887,6 +7201,38 @@ const alibaba = (fn, model, apiKey) => {
|
|
|
6887
7201
|
* const result = await wrappedFn(args);
|
|
6888
7202
|
* ```
|
|
6889
7203
|
*/
|
|
7204
|
+
/**
|
|
7205
|
+
* Wrap async function with Groq inference context.
|
|
7206
|
+
*
|
|
7207
|
+
* Creates a higher-order function that executes the provided async function
|
|
7208
|
+
* within a Groq inference context.
|
|
7209
|
+
*
|
|
7210
|
+
* @template T - Async function type
|
|
7211
|
+
* @param fn - Async function to wrap
|
|
7212
|
+
* @param model - Groq model name (e.g., "llama-3.3-70b-versatile")
|
|
7213
|
+
* @param apiKey - Single API key or array of keys
|
|
7214
|
+
* @returns Wrapped function with same signature as input
|
|
7215
|
+
*
|
|
7216
|
+
* @example
|
|
7217
|
+
* ```typescript
|
|
7218
|
+
* import { groq } from '@backtest-kit/ollama';
|
|
7219
|
+
*
|
|
7220
|
+
* const wrappedFn = groq(myAsyncFn, 'llama-3.3-70b-versatile', process.env.GROQ_API_KEY);
|
|
7221
|
+
* const result = await wrappedFn(args);
|
|
7222
|
+
* ```
|
|
7223
|
+
*/
|
|
7224
|
+
const groq = (fn, model, apiKey) => {
|
|
7225
|
+
const wrappedFn = async (args) => {
|
|
7226
|
+
return await ContextService.runInContext(async () => {
|
|
7227
|
+
return await fn(...args);
|
|
7228
|
+
}, {
|
|
7229
|
+
apiKey,
|
|
7230
|
+
inference: InferenceName$1.GroqInference,
|
|
7231
|
+
model,
|
|
7232
|
+
});
|
|
7233
|
+
};
|
|
7234
|
+
return wrappedFn;
|
|
7235
|
+
};
|
|
6890
7236
|
const glm4 = (fn, model, apiKey) => {
|
|
6891
7237
|
const wrappedFn = async (args) => {
|
|
6892
7238
|
return await ContextService.runInContext(async () => {
|
|
@@ -7466,4 +7812,4 @@ class OptimizerUtils {
|
|
|
7466
7812
|
*/
|
|
7467
7813
|
const Optimizer = new OptimizerUtils();
|
|
7468
7814
|
|
|
7469
|
-
export { CompletionName, Module, Optimizer, Prompt, addOptimizerSchema, alibaba, claude, cohere, commitPrompt, deepseek, dumpSignalData, getOptimizerSchema, glm4, gpt5, grok, hf, engine as lib, listOptimizerSchema, listenError, listenOptimizerProgress, mistral, ollama, perplexity, setLogger, validate };
|
|
7815
|
+
export { CompletionName, Module, Optimizer, Prompt, addOptimizerSchema, alibaba, claude, cohere, commitPrompt, deepseek, dumpSignalData, getOptimizerSchema, glm4, gpt5, grok, groq, hf, engine as lib, listOptimizerSchema, listenError, listenOptimizerProgress, mistral, ollama, perplexity, setLogger, validate };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@backtest-kit/ollama",
|
|
3
|
-
"version": "3.0.
|
|
3
|
+
"version": "3.0.7",
|
|
4
4
|
"description": "Multi-provider LLM inference library for AI-powered trading strategies. Supports 10+ providers including OpenAI, Claude, DeepSeek, Grok, Mistral with unified API and automatic token rotation.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Petr Tripolsky",
|
|
@@ -65,9 +65,15 @@
|
|
|
65
65
|
"default": "./build/index.cjs"
|
|
66
66
|
},
|
|
67
67
|
"devDependencies": {
|
|
68
|
+
"@huggingface/inference": "4.7.1",
|
|
69
|
+
"@langchain/core": "0.3.57",
|
|
70
|
+
"@langchain/xai": "0.0.2",
|
|
68
71
|
"@rollup/plugin-typescript": "11.1.6",
|
|
69
72
|
"@types/node": "22.9.0",
|
|
70
73
|
"glob": "11.0.1",
|
|
74
|
+
"ollama": "0.6.0",
|
|
75
|
+
"openai": "4.97.0",
|
|
76
|
+
"groq-sdk": "0.37.0",
|
|
71
77
|
"rimraf": "6.0.1",
|
|
72
78
|
"rollup": "3.29.5",
|
|
73
79
|
"rollup-plugin-dts": "6.1.1",
|
|
@@ -75,32 +81,28 @@
|
|
|
75
81
|
"ts-morph": "27.0.2",
|
|
76
82
|
"tslib": "2.7.0",
|
|
77
83
|
"typedoc": "0.27.9",
|
|
78
|
-
"worker-testbed": "1.0.12"
|
|
79
|
-
"ollama": "0.6.0",
|
|
80
|
-
"openai": "4.97.0",
|
|
81
|
-
"@langchain/core": "0.3.57",
|
|
82
|
-
"@langchain/xai": "0.0.2",
|
|
83
|
-
"@huggingface/inference": "4.7.1"
|
|
84
|
+
"worker-testbed": "1.0.12"
|
|
84
85
|
},
|
|
85
86
|
"peerDependencies": {
|
|
86
|
-
"
|
|
87
|
-
"typescript": "^5.0.0",
|
|
88
|
-
"ollama": "^0.6.0",
|
|
89
|
-
"openai": "^4.97.0",
|
|
87
|
+
"@huggingface/inference": "^4.7.1",
|
|
90
88
|
"@langchain/core": "^0.3.57",
|
|
91
89
|
"@langchain/xai": "^0.0.2",
|
|
92
|
-
"
|
|
90
|
+
"backtest-kit": "^3.0.17",
|
|
91
|
+
"groq-sdk": "^0.37.0",
|
|
92
|
+
"ollama": "^0.6.0",
|
|
93
|
+
"openai": "^4.97.0",
|
|
94
|
+
"typescript": "^5.0.0"
|
|
93
95
|
},
|
|
94
96
|
"dependencies": {
|
|
97
|
+
"agent-swarm-kit": "^1.2.4",
|
|
95
98
|
"di-kit": "^1.0.18",
|
|
96
99
|
"di-scoped": "^1.0.21",
|
|
97
100
|
"functools-kit": "^1.0.95",
|
|
101
|
+
"get-moment-stamp": "^1.1.1",
|
|
98
102
|
"jsonrepair": "^3.12.0",
|
|
99
103
|
"markdown-it": "^14.1.0",
|
|
100
104
|
"markdownlint": "^0.38.0",
|
|
101
105
|
"sanitize-html": "^2.17.0",
|
|
102
|
-
"get-moment-stamp": "^1.1.1",
|
|
103
|
-
"agent-swarm-kit": "^1.2.4",
|
|
104
106
|
"zod": "^3.25.76"
|
|
105
107
|
},
|
|
106
108
|
"publishConfig": {
|
package/types.d.ts
CHANGED
|
@@ -233,6 +233,27 @@ declare const alibaba: <T extends (...args: any[]) => Promise<any>>(fn: T, model
|
|
|
233
233
|
* const result = await wrappedFn(args);
|
|
234
234
|
* ```
|
|
235
235
|
*/
|
|
236
|
+
/**
|
|
237
|
+
* Wrap async function with Groq inference context.
|
|
238
|
+
*
|
|
239
|
+
* Creates a higher-order function that executes the provided async function
|
|
240
|
+
* within a Groq inference context.
|
|
241
|
+
*
|
|
242
|
+
* @template T - Async function type
|
|
243
|
+
* @param fn - Async function to wrap
|
|
244
|
+
* @param model - Groq model name (e.g., "llama-3.3-70b-versatile")
|
|
245
|
+
* @param apiKey - Single API key or array of keys
|
|
246
|
+
* @returns Wrapped function with same signature as input
|
|
247
|
+
*
|
|
248
|
+
* @example
|
|
249
|
+
* ```typescript
|
|
250
|
+
* import { groq } from '@backtest-kit/ollama';
|
|
251
|
+
*
|
|
252
|
+
* const wrappedFn = groq(myAsyncFn, 'llama-3.3-70b-versatile', process.env.GROQ_API_KEY);
|
|
253
|
+
* const result = await wrappedFn(args);
|
|
254
|
+
* ```
|
|
255
|
+
*/
|
|
256
|
+
declare const groq: <T extends (...args: any[]) => Promise<any>>(fn: T, model: string, apiKey?: string | string[]) => T;
|
|
236
257
|
declare const glm4: <T extends (...args: any[]) => Promise<any>>(fn: T, model: string, apiKey?: string | string[]) => T;
|
|
237
258
|
|
|
238
259
|
/**
|
|
@@ -1367,7 +1388,9 @@ declare enum InferenceName {
|
|
|
1367
1388
|
/** Cohere provider (api.cohere.ai) */
|
|
1368
1389
|
CohereInference = "cohere_inference",
|
|
1369
1390
|
/** Alibaba Cloud provider (dashscope-intl.aliyuncs.com) */
|
|
1370
|
-
AlibabaInference = "alibaba_inference"
|
|
1391
|
+
AlibabaInference = "alibaba_inference",
|
|
1392
|
+
/** Groq provider (api.groq.com) */
|
|
1393
|
+
GroqInference = "groq_inference"
|
|
1371
1394
|
}
|
|
1372
1395
|
|
|
1373
1396
|
/**
|
|
@@ -2275,4 +2298,4 @@ declare const engine: {
|
|
|
2275
2298
|
loggerService: LoggerService;
|
|
2276
2299
|
};
|
|
2277
2300
|
|
|
2278
|
-
export { CompletionName, type IOptimizerCallbacks, type IOptimizerData, type IOptimizerFetchArgs, type IOptimizerFilterArgs, type IOptimizerRange, type IOptimizerSchema, type IOptimizerSource, type IOptimizerStrategy, type IOptimizerTemplate, type MessageModel, type MessageRole, Module, Optimizer, type ProgressOptimizerContract, Prompt, type PromptModel, addOptimizerSchema, alibaba, claude, cohere, commitPrompt, deepseek, dumpSignalData, getOptimizerSchema, glm4, gpt5, grok, hf, engine as lib, listOptimizerSchema, listenError, listenOptimizerProgress, mistral, ollama, perplexity, setLogger, validate };
|
|
2301
|
+
export { CompletionName, type IOptimizerCallbacks, type IOptimizerData, type IOptimizerFetchArgs, type IOptimizerFilterArgs, type IOptimizerRange, type IOptimizerSchema, type IOptimizerSource, type IOptimizerStrategy, type IOptimizerTemplate, type MessageModel, type MessageRole, Module, Optimizer, type ProgressOptimizerContract, Prompt, type PromptModel, addOptimizerSchema, alibaba, claude, cohere, commitPrompt, deepseek, dumpSignalData, getOptimizerSchema, glm4, gpt5, grok, groq, hf, engine as lib, listOptimizerSchema, listenError, listenOptimizerProgress, mistral, ollama, perplexity, setLogger, validate };
|