agentic-api 2.0.491 → 2.0.592
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +37 -34
- package/dist/src/agents/job.runner.d.ts +130 -0
- package/dist/src/agents/job.runner.js +339 -0
- package/dist/src/agents/reducer.core.d.ts +11 -1
- package/dist/src/agents/reducer.core.js +76 -86
- package/dist/src/agents/reducer.d.ts +1 -0
- package/dist/src/agents/reducer.factory.d.ts +46 -0
- package/dist/src/agents/reducer.factory.js +154 -0
- package/dist/src/agents/reducer.js +1 -0
- package/dist/src/agents/simulator.d.ts +26 -1
- package/dist/src/agents/simulator.dashboard.d.ts +140 -0
- package/dist/src/agents/simulator.dashboard.js +344 -0
- package/dist/src/agents/simulator.js +56 -0
- package/dist/src/agents/simulator.types.d.ts +38 -6
- package/dist/src/agents/simulator.utils.d.ts +22 -1
- package/dist/src/agents/simulator.utils.js +27 -0
- package/dist/src/execute/helpers.js +2 -2
- package/dist/src/execute/modelconfig.d.ts +21 -11
- package/dist/src/execute/modelconfig.js +29 -13
- package/dist/src/execute/responses.js +8 -7
- package/dist/src/index.d.ts +6 -1
- package/dist/src/index.js +21 -1
- package/dist/src/llm/config.d.ts +25 -0
- package/dist/src/llm/config.js +38 -0
- package/dist/src/llm/index.d.ts +48 -0
- package/dist/src/llm/index.js +115 -0
- package/dist/src/llm/openai.d.ts +6 -0
- package/dist/src/llm/openai.js +154 -0
- package/dist/src/llm/pricing.d.ts +26 -0
- package/dist/src/llm/pricing.js +129 -0
- package/dist/src/llm/xai.d.ts +17 -0
- package/dist/src/llm/xai.js +90 -0
- package/dist/src/pricing.llm.d.ts +3 -15
- package/dist/src/pricing.llm.js +10 -251
- package/dist/src/prompts.d.ts +0 -1
- package/dist/src/prompts.js +51 -118
- package/dist/src/rag/embeddings.d.ts +5 -1
- package/dist/src/rag/embeddings.js +15 -5
- package/dist/src/rag/parser.js +1 -1
- package/dist/src/rag/rag.manager.d.ts +44 -6
- package/dist/src/rag/rag.manager.js +138 -49
- package/dist/src/rag/types.d.ts +2 -0
- package/dist/src/rag/usecase.js +8 -11
- package/dist/src/rules/git/git.health.js +59 -4
- package/dist/src/rules/git/repo.d.ts +11 -4
- package/dist/src/rules/git/repo.js +64 -18
- package/dist/src/rules/git/repo.pr.d.ts +8 -0
- package/dist/src/rules/git/repo.pr.js +45 -1
- package/dist/src/rules/git/repo.tools.d.ts +5 -1
- package/dist/src/rules/git/repo.tools.js +54 -7
- package/dist/src/rules/types.d.ts +14 -0
- package/dist/src/rules/utils.matter.d.ts +0 -20
- package/dist/src/rules/utils.matter.js +42 -74
- package/dist/src/scrapper.js +2 -2
- package/dist/src/utils.d.ts +0 -8
- package/dist/src/utils.js +1 -28
- package/package.json +1 -1
|
@@ -36,8 +36,9 @@ exports.executeAgent = executeAgent;
|
|
|
36
36
|
exports.executeQuery = executeQuery;
|
|
37
37
|
const types_1 = require("../types");
|
|
38
38
|
const utils_1 = require("../utils");
|
|
39
|
+
const llm_1 = require("../llm");
|
|
39
40
|
const stategraph_1 = require("../stategraph");
|
|
40
|
-
const
|
|
41
|
+
const pricing_1 = require("../llm/pricing");
|
|
41
42
|
//
|
|
42
43
|
// Import des utilitaires partagés et helpers optimisés
|
|
43
44
|
const shared_1 = require("./shared");
|
|
@@ -95,7 +96,7 @@ async function createResponseStream(openai, options, stdout) {
|
|
|
95
96
|
* - response.completed → finalisation
|
|
96
97
|
*/
|
|
97
98
|
async function readCompletionsStream(params) {
|
|
98
|
-
const openai = (0,
|
|
99
|
+
const openai = (0, llm_1.llmInstance)();
|
|
99
100
|
const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, context, verbose } = params;
|
|
100
101
|
const model = (0, modelconfig_1.modelConfig)(agentConfig.model, {}, true).model; // forResponses=true
|
|
101
102
|
const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
|
|
@@ -313,7 +314,7 @@ async function readCompletionsStream(params) {
|
|
|
313
314
|
*/
|
|
314
315
|
async function executeAgentSet(agentSet, context, params) {
|
|
315
316
|
const { query, verbose } = params;
|
|
316
|
-
const openai = (0,
|
|
317
|
+
const openai = (0, llm_1.llmInstance)();
|
|
317
318
|
const agents = (0, utils_1.injectTransferTools)(agentSet);
|
|
318
319
|
const discussionRootAgent = params.home || agents[0].name;
|
|
319
320
|
const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
|
|
@@ -434,7 +435,7 @@ async function executeAgentSet(agentSet, context, params) {
|
|
|
434
435
|
*/
|
|
435
436
|
async function executeAgent(agentSet, params) {
|
|
436
437
|
const { query, verbose, debug } = params;
|
|
437
|
-
const openai = (0,
|
|
438
|
+
const openai = (0, llm_1.llmInstance)();
|
|
438
439
|
const agent = agentSet.find(a => a.name === params.home);
|
|
439
440
|
if (!agent) {
|
|
440
441
|
throw new Error(`Agent ${params.home} not found`);
|
|
@@ -477,7 +478,7 @@ async function executeAgent(agentSet, params) {
|
|
|
477
478
|
// normalizeOutputFromResponses retourne directement le format Chat Completions
|
|
478
479
|
const final = await createResponseStream(openai, options, params.stdout);
|
|
479
480
|
const model = (0, modelconfig_1.modelConfig)(agent.model, {}, true).model;
|
|
480
|
-
(0,
|
|
481
|
+
(0, pricing_1.accumulateCost)(usage, model, final.usage);
|
|
481
482
|
state = final.id;
|
|
482
483
|
const content = final.choices[0]?.message.content || '';
|
|
483
484
|
const toolCalls = final.choices[0]?.message.tool_calls || [];
|
|
@@ -531,7 +532,7 @@ async function executeQuery(params) {
|
|
|
531
532
|
if (!modelName) {
|
|
532
533
|
throw new Error('executeQuery requires "model" parameter');
|
|
533
534
|
}
|
|
534
|
-
const openai = (0,
|
|
535
|
+
const openai = (0, llm_1.llmInstance)();
|
|
535
536
|
const model = (0, modelconfig_1.modelConfig)(modelName, {}, true); // forResponses=true
|
|
536
537
|
// Responses API: response_format → text.format
|
|
537
538
|
// Fusionner avec text.verbosity qui peut venir de modelConfig
|
|
@@ -571,7 +572,7 @@ async function executeQuery(params) {
|
|
|
571
572
|
// Responses API: utiliser responses.stream() avec .on() handlers et .finalResponse()
|
|
572
573
|
// normalizeOutputFromResponses retourne directement le format Chat Completions
|
|
573
574
|
const final = await createResponseStream(openai, options, params.stdout);
|
|
574
|
-
(0,
|
|
575
|
+
(0, pricing_1.accumulateCost)(usage, model.model, final.usage);
|
|
575
576
|
state = final.id || '';
|
|
576
577
|
const content = final.choices[0]?.message.content || '';
|
|
577
578
|
if (verbose) {
|
package/dist/src/index.d.ts
CHANGED
|
@@ -6,11 +6,15 @@ export * from './utils';
|
|
|
6
6
|
export * from './types';
|
|
7
7
|
export * from './stategraph';
|
|
8
8
|
export * from './execute';
|
|
9
|
-
export
|
|
9
|
+
export { llmInstance, resetInstances, openaiInstance, PROVIDER_MAP, detectProvider, getDefaultProvider } from './llm';
|
|
10
|
+
export type { LLMProvider, LLMConfig, ProviderConfig } from './llm';
|
|
11
|
+
export { modelPricing, calculateCost, accumulateCost, LLM, LLMxai, LLMopenai } from './llm/pricing';
|
|
12
|
+
export { modelConfig } from './execute/modelconfig';
|
|
10
13
|
export * from './scrapper';
|
|
11
14
|
export * from './agents/reducer';
|
|
12
15
|
export * from './agents/semantic';
|
|
13
16
|
export * from './agents/system';
|
|
17
|
+
export * from './agents/job.runner';
|
|
14
18
|
export * from './rag';
|
|
15
19
|
export * from './usecase';
|
|
16
20
|
export * from './rules';
|
|
@@ -18,3 +22,4 @@ export * from './agents/simulator';
|
|
|
18
22
|
export * from './agents/simulator.types';
|
|
19
23
|
export * from './agents/simulator.prompts';
|
|
20
24
|
export * from './agents/simulator.utils';
|
|
25
|
+
export * from './agents/simulator.dashboard';
|
package/dist/src/index.js
CHANGED
|
@@ -18,6 +18,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
18
18
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
19
19
|
};
|
|
20
20
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
21
|
+
exports.modelConfig = exports.LLMopenai = exports.LLMxai = exports.LLM = exports.accumulateCost = exports.calculateCost = exports.modelPricing = exports.getDefaultProvider = exports.detectProvider = exports.PROVIDER_MAP = exports.openaiInstance = exports.resetInstances = exports.llmInstance = void 0;
|
|
21
22
|
// Export des utilitaires
|
|
22
23
|
__exportStar(require("./utils"), exports);
|
|
23
24
|
// Types
|
|
@@ -26,13 +27,31 @@ __exportStar(require("./types"), exports);
|
|
|
26
27
|
__exportStar(require("./stategraph"), exports);
|
|
27
28
|
// Execute (avec feature toggle legacy/responses)
|
|
28
29
|
__exportStar(require("./execute"), exports);
|
|
29
|
-
|
|
30
|
+
// LLM - providers et pricing
|
|
31
|
+
var llm_1 = require("./llm");
|
|
32
|
+
Object.defineProperty(exports, "llmInstance", { enumerable: true, get: function () { return llm_1.llmInstance; } });
|
|
33
|
+
Object.defineProperty(exports, "resetInstances", { enumerable: true, get: function () { return llm_1.resetInstances; } });
|
|
34
|
+
Object.defineProperty(exports, "openaiInstance", { enumerable: true, get: function () { return llm_1.openaiInstance; } });
|
|
35
|
+
Object.defineProperty(exports, "PROVIDER_MAP", { enumerable: true, get: function () { return llm_1.PROVIDER_MAP; } });
|
|
36
|
+
Object.defineProperty(exports, "detectProvider", { enumerable: true, get: function () { return llm_1.detectProvider; } });
|
|
37
|
+
Object.defineProperty(exports, "getDefaultProvider", { enumerable: true, get: function () { return llm_1.getDefaultProvider; } });
|
|
38
|
+
var pricing_1 = require("./llm/pricing");
|
|
39
|
+
Object.defineProperty(exports, "modelPricing", { enumerable: true, get: function () { return pricing_1.modelPricing; } });
|
|
40
|
+
Object.defineProperty(exports, "calculateCost", { enumerable: true, get: function () { return pricing_1.calculateCost; } });
|
|
41
|
+
Object.defineProperty(exports, "accumulateCost", { enumerable: true, get: function () { return pricing_1.accumulateCost; } });
|
|
42
|
+
Object.defineProperty(exports, "LLM", { enumerable: true, get: function () { return pricing_1.LLM; } });
|
|
43
|
+
Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return pricing_1.LLMxai; } });
|
|
44
|
+
Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return pricing_1.LLMopenai; } });
|
|
45
|
+
// Model config (central pour tous les mappings: responses, embeddings, vision, whisper)
|
|
46
|
+
var modelconfig_1 = require("./execute/modelconfig");
|
|
47
|
+
Object.defineProperty(exports, "modelConfig", { enumerable: true, get: function () { return modelconfig_1.modelConfig; } });
|
|
30
48
|
// Scrapper
|
|
31
49
|
__exportStar(require("./scrapper"), exports);
|
|
32
50
|
// Agents
|
|
33
51
|
__exportStar(require("./agents/reducer"), exports);
|
|
34
52
|
__exportStar(require("./agents/semantic"), exports);
|
|
35
53
|
__exportStar(require("./agents/system"), exports);
|
|
54
|
+
__exportStar(require("./agents/job.runner"), exports);
|
|
36
55
|
// RAG Library
|
|
37
56
|
__exportStar(require("./rag"), exports);
|
|
38
57
|
// Usecase
|
|
@@ -44,3 +63,4 @@ __exportStar(require("./agents/simulator"), exports);
|
|
|
44
63
|
__exportStar(require("./agents/simulator.types"), exports);
|
|
45
64
|
__exportStar(require("./agents/simulator.prompts"), exports);
|
|
46
65
|
__exportStar(require("./agents/simulator.utils"), exports);
|
|
66
|
+
__exportStar(require("./agents/simulator.dashboard"), exports);
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Configuration des providers LLM
|
|
3
|
+
*
|
|
4
|
+
* Définit les baseURL, clés API et configurations par provider.
|
|
5
|
+
*/
|
|
6
|
+
export type LLMProvider = 'openai' | 'xai';
|
|
7
|
+
export interface LLMConfig {
|
|
8
|
+
provider?: LLMProvider;
|
|
9
|
+
key?: string;
|
|
10
|
+
baseUrl?: string;
|
|
11
|
+
}
|
|
12
|
+
export interface ProviderConfig {
|
|
13
|
+
baseURL: string;
|
|
14
|
+
keyEnv: string;
|
|
15
|
+
fallbackKeyEnv?: string;
|
|
16
|
+
}
|
|
17
|
+
export declare const PROVIDER_MAP: Record<LLMProvider, ProviderConfig>;
|
|
18
|
+
/**
|
|
19
|
+
* Détecte le provider à partir d'une baseURL
|
|
20
|
+
*/
|
|
21
|
+
export declare function detectProvider(baseURL?: string): LLMProvider;
|
|
22
|
+
/**
|
|
23
|
+
* Retourne le provider par défaut depuis les variables d'environnement
|
|
24
|
+
*/
|
|
25
|
+
export declare function getDefaultProvider(): LLMProvider;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Configuration des providers LLM
|
|
4
|
+
*
|
|
5
|
+
* Définit les baseURL, clés API et configurations par provider.
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.PROVIDER_MAP = void 0;
|
|
9
|
+
exports.detectProvider = detectProvider;
|
|
10
|
+
exports.getDefaultProvider = getDefaultProvider;
|
|
11
|
+
exports.PROVIDER_MAP = {
|
|
12
|
+
openai: {
|
|
13
|
+
baseURL: 'https://api.openai.com/v1',
|
|
14
|
+
keyEnv: 'OPENAI_API_KEY',
|
|
15
|
+
fallbackKeyEnv: 'LLM_API_KEY'
|
|
16
|
+
},
|
|
17
|
+
xai: {
|
|
18
|
+
baseURL: 'https://api.x.ai/v1',
|
|
19
|
+
keyEnv: 'XAI_API_KEY',
|
|
20
|
+
fallbackKeyEnv: 'LLM_API_KEY'
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
/**
|
|
24
|
+
* Détecte le provider à partir d'une baseURL
|
|
25
|
+
*/
|
|
26
|
+
function detectProvider(baseURL) {
|
|
27
|
+
if (!baseURL)
|
|
28
|
+
return process.env.LLM_PROVIDER || 'openai';
|
|
29
|
+
if (baseURL.includes('x.ai'))
|
|
30
|
+
return 'xai';
|
|
31
|
+
return 'openai';
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Retourne le provider par défaut depuis les variables d'environnement
|
|
35
|
+
*/
|
|
36
|
+
function getDefaultProvider() {
|
|
37
|
+
return process.env.LLM_PROVIDER || 'openai';
|
|
38
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Provider Instances
|
|
3
|
+
*
|
|
4
|
+
* Fournit une instance OpenAI configurable par provider:
|
|
5
|
+
* - llmInstance(): utilise LLM_PROVIDER depuis .env
|
|
6
|
+
* - llmInstance({provider: 'openai'}): force OpenAI
|
|
7
|
+
* - llmInstance({provider: 'xai'}): force xAI
|
|
8
|
+
*
|
|
9
|
+
* Configuration via .env:
|
|
10
|
+
* - LLM_PROVIDER: openai | xai (défaut: openai)
|
|
11
|
+
* - OPENAI_API_KEY: clé API OpenAI
|
|
12
|
+
* - XAI_API_KEY: clé API xAI
|
|
13
|
+
* - LLM_API_KEY: fallback pour les deux providers
|
|
14
|
+
*/
|
|
15
|
+
import OpenAI from 'openai';
|
|
16
|
+
import { LLMConfig } from './config';
|
|
17
|
+
declare global {
|
|
18
|
+
var _llmInstances_: Map<string, OpenAI> | undefined;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Instance LLM configurable par provider
|
|
22
|
+
*
|
|
23
|
+
* @param config - Configuration optionnelle
|
|
24
|
+
* @param config.provider - Provider à utiliser ('openai' | 'xai')
|
|
25
|
+
* @param config.key - Clé API (optionnel, utilise env par défaut)
|
|
26
|
+
* @param config.baseUrl - Base URL (optionnel, utilise config provider par défaut)
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* // Utilise le provider par défaut (.env LLM_PROVIDER)
|
|
30
|
+
* const openai = llmInstance();
|
|
31
|
+
*
|
|
32
|
+
* // Force OpenAI (pour embeddings, whisper)
|
|
33
|
+
* const openai = llmInstance({provider: 'openai'});
|
|
34
|
+
*
|
|
35
|
+
* // Force xAI
|
|
36
|
+
* const openai = llmInstance({provider: 'xai'});
|
|
37
|
+
*/
|
|
38
|
+
export declare function llmInstance(config?: LLMConfig): OpenAI;
|
|
39
|
+
/**
|
|
40
|
+
* @deprecated Utiliser llmInstance() à la place
|
|
41
|
+
* Alias pour rétrocompatibilité
|
|
42
|
+
*/
|
|
43
|
+
export declare function openaiInstance(envKey?: string, baseUrl?: string): OpenAI;
|
|
44
|
+
/**
|
|
45
|
+
* Reset toutes les instances (utile pour les tests)
|
|
46
|
+
*/
|
|
47
|
+
export declare function resetInstances(): void;
|
|
48
|
+
export { PROVIDER_MAP, LLMProvider, LLMConfig, detectProvider, getDefaultProvider, ProviderConfig } from './config';
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LLM Provider Instances
|
|
4
|
+
*
|
|
5
|
+
* Fournit une instance OpenAI configurable par provider:
|
|
6
|
+
* - llmInstance(): utilise LLM_PROVIDER depuis .env
|
|
7
|
+
* - llmInstance({provider: 'openai'}): force OpenAI
|
|
8
|
+
* - llmInstance({provider: 'xai'}): force xAI
|
|
9
|
+
*
|
|
10
|
+
* Configuration via .env:
|
|
11
|
+
* - LLM_PROVIDER: openai | xai (défaut: openai)
|
|
12
|
+
* - OPENAI_API_KEY: clé API OpenAI
|
|
13
|
+
* - XAI_API_KEY: clé API xAI
|
|
14
|
+
* - LLM_API_KEY: fallback pour les deux providers
|
|
15
|
+
*/
|
|
16
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
17
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
18
|
+
};
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.getDefaultProvider = exports.detectProvider = exports.PROVIDER_MAP = void 0;
|
|
21
|
+
exports.llmInstance = llmInstance;
|
|
22
|
+
exports.openaiInstance = openaiInstance;
|
|
23
|
+
exports.resetInstances = resetInstances;
|
|
24
|
+
const openai_1 = __importDefault(require("openai"));
|
|
25
|
+
const config_1 = require("./config");
|
|
26
|
+
function getInstancesMap() {
|
|
27
|
+
if (!globalThis._llmInstances_) {
|
|
28
|
+
globalThis._llmInstances_ = new Map();
|
|
29
|
+
}
|
|
30
|
+
return globalThis._llmInstances_;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Instance LLM configurable par provider
|
|
34
|
+
*
|
|
35
|
+
* @param config - Configuration optionnelle
|
|
36
|
+
* @param config.provider - Provider à utiliser ('openai' | 'xai')
|
|
37
|
+
* @param config.key - Clé API (optionnel, utilise env par défaut)
|
|
38
|
+
* @param config.baseUrl - Base URL (optionnel, utilise config provider par défaut)
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* // Utilise le provider par défaut (.env LLM_PROVIDER)
|
|
42
|
+
* const openai = llmInstance();
|
|
43
|
+
*
|
|
44
|
+
* // Force OpenAI (pour embeddings, whisper)
|
|
45
|
+
* const openai = llmInstance({provider: 'openai'});
|
|
46
|
+
*
|
|
47
|
+
* // Force xAI
|
|
48
|
+
* const openai = llmInstance({provider: 'xai'});
|
|
49
|
+
*/
|
|
50
|
+
function llmInstance(config) {
|
|
51
|
+
const provider = config?.provider || (0, config_1.getDefaultProvider)();
|
|
52
|
+
const providerConfig = config_1.PROVIDER_MAP[provider];
|
|
53
|
+
if (!providerConfig) {
|
|
54
|
+
throw new Error(`Unknown provider: ${provider}. Valid providers: ${Object.keys(config_1.PROVIDER_MAP).join(', ')}`);
|
|
55
|
+
}
|
|
56
|
+
//
|
|
57
|
+
// Clé de cache: provider + éventuelles customisations
|
|
58
|
+
const cacheKey = config?.key || config?.baseUrl
|
|
59
|
+
? `${provider}-${config?.key || ''}-${config?.baseUrl || ''}`
|
|
60
|
+
: provider;
|
|
61
|
+
const instances = getInstancesMap();
|
|
62
|
+
//
|
|
63
|
+
// Retourner l'instance existante si disponible (cache)
|
|
64
|
+
if (instances.has(cacheKey)) {
|
|
65
|
+
return instances.get(cacheKey);
|
|
66
|
+
}
|
|
67
|
+
//
|
|
68
|
+
// Résoudre la clé API: custom > provider env > fallback env
|
|
69
|
+
const apiKey = config?.key
|
|
70
|
+
|| process.env[providerConfig.keyEnv]
|
|
71
|
+
|| (providerConfig.fallbackKeyEnv ? process.env[providerConfig.fallbackKeyEnv] : undefined);
|
|
72
|
+
if (!apiKey) {
|
|
73
|
+
throw new Error(`API key missing for provider "${provider}". ` +
|
|
74
|
+
`Set ${providerConfig.keyEnv} or ${providerConfig.fallbackKeyEnv || 'LLM_API_KEY'} in your environment.`);
|
|
75
|
+
}
|
|
76
|
+
//
|
|
77
|
+
// Résoudre la base URL: custom > provider config
|
|
78
|
+
const baseURL = config?.baseUrl || providerConfig.baseURL;
|
|
79
|
+
const instance = new openai_1.default({
|
|
80
|
+
apiKey,
|
|
81
|
+
baseURL,
|
|
82
|
+
timeout: 60000 * 15,
|
|
83
|
+
});
|
|
84
|
+
instances.set(cacheKey, instance);
|
|
85
|
+
return instance;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* @deprecated Utiliser llmInstance() à la place
|
|
89
|
+
* Alias pour rétrocompatibilité
|
|
90
|
+
*/
|
|
91
|
+
function openaiInstance(envKey, baseUrl) {
|
|
92
|
+
//
|
|
93
|
+
// Si des paramètres legacy sont passés, les utiliser
|
|
94
|
+
if (envKey || baseUrl) {
|
|
95
|
+
const provider = baseUrl?.includes('x.ai') ? 'xai' : 'openai';
|
|
96
|
+
return llmInstance({
|
|
97
|
+
provider,
|
|
98
|
+
key: envKey ? (process.env[envKey] || envKey) : undefined,
|
|
99
|
+
baseUrl
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
return llmInstance();
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Reset toutes les instances (utile pour les tests)
|
|
106
|
+
*/
|
|
107
|
+
function resetInstances() {
|
|
108
|
+
globalThis._llmInstances_ = undefined;
|
|
109
|
+
}
|
|
110
|
+
//
|
|
111
|
+
// Re-export config
|
|
112
|
+
var config_2 = require("./config");
|
|
113
|
+
Object.defineProperty(exports, "PROVIDER_MAP", { enumerable: true, get: function () { return config_2.PROVIDER_MAP; } });
|
|
114
|
+
Object.defineProperty(exports, "detectProvider", { enumerable: true, get: function () { return config_2.detectProvider; } });
|
|
115
|
+
Object.defineProperty(exports, "getDefaultProvider", { enumerable: true, get: function () { return config_2.getDefaultProvider; } });
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* OpenAI Model Mappings
|
|
4
|
+
*
|
|
5
|
+
* Configuration des modèles pour le provider OpenAI
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.LLMopenai = void 0;
|
|
9
|
+
exports.LLMopenai = {
|
|
10
|
+
// Vision capable models
|
|
11
|
+
"VISION": {
|
|
12
|
+
model: "gpt-5-mini",
|
|
13
|
+
temperature: 0.2,
|
|
14
|
+
stream: false
|
|
15
|
+
},
|
|
16
|
+
"VISION-fast": {
|
|
17
|
+
model: "gpt-5-mini",
|
|
18
|
+
temperature: 0.2,
|
|
19
|
+
stream: false
|
|
20
|
+
},
|
|
21
|
+
// Embedding models
|
|
22
|
+
// Note: memories-lite utilise ce modèle via agentic-server/src/config/memories.ts
|
|
23
|
+
// Usage: modelConfig('EMBEDDING-small', { provider: 'openai' }).model
|
|
24
|
+
"EMBEDDING-small": {
|
|
25
|
+
model: "text-embedding-3-small",
|
|
26
|
+
dimensions: 1536
|
|
27
|
+
},
|
|
28
|
+
"EMBEDDING-large": {
|
|
29
|
+
model: "text-embedding-3-large",
|
|
30
|
+
dimensions: 3072
|
|
31
|
+
},
|
|
32
|
+
// Audio models
|
|
33
|
+
"WHISPER": {
|
|
34
|
+
model: "whisper-1"
|
|
35
|
+
},
|
|
36
|
+
"TRANSCRIBE": {
|
|
37
|
+
model: "gpt-4o-mini-transcribe"
|
|
38
|
+
},
|
|
39
|
+
// Chat/Responses models
|
|
40
|
+
"LOW-fast": {
|
|
41
|
+
temperature: 1,
|
|
42
|
+
frequency_penalty: 0.0,
|
|
43
|
+
presence_penalty: 0.0,
|
|
44
|
+
model: "gpt-5-nano",
|
|
45
|
+
reasoning_effort: "minimal",
|
|
46
|
+
verbosity: "low",
|
|
47
|
+
stream: true
|
|
48
|
+
},
|
|
49
|
+
"LOW": {
|
|
50
|
+
temperature: 1,
|
|
51
|
+
frequency_penalty: 0.0,
|
|
52
|
+
presence_penalty: 0.0,
|
|
53
|
+
model: "gpt-5-nano",
|
|
54
|
+
reasoning_effort: "low",
|
|
55
|
+
verbosity: "low",
|
|
56
|
+
stream: true
|
|
57
|
+
},
|
|
58
|
+
"LOW-medium": {
|
|
59
|
+
temperature: 1,
|
|
60
|
+
frequency_penalty: 0.0,
|
|
61
|
+
presence_penalty: 0.0,
|
|
62
|
+
model: "gpt-5-nano",
|
|
63
|
+
reasoning_effort: "medium",
|
|
64
|
+
verbosity: "low",
|
|
65
|
+
stream: true
|
|
66
|
+
},
|
|
67
|
+
"LOW-4fast": {
|
|
68
|
+
temperature: .2,
|
|
69
|
+
frequency_penalty: 0.0,
|
|
70
|
+
presence_penalty: 0.0,
|
|
71
|
+
model: "gpt-4.1-nano",
|
|
72
|
+
stream: true
|
|
73
|
+
},
|
|
74
|
+
"MEDIUM-4.1-mini": {
|
|
75
|
+
temperature: .2,
|
|
76
|
+
frequency_penalty: 0.0,
|
|
77
|
+
presence_penalty: 0.0,
|
|
78
|
+
model: "gpt-4.1-mini",
|
|
79
|
+
stream: true
|
|
80
|
+
},
|
|
81
|
+
"MEDIUM-4.1": {
|
|
82
|
+
temperature: .2,
|
|
83
|
+
frequency_penalty: 0.0,
|
|
84
|
+
presence_penalty: 0.0,
|
|
85
|
+
model: "gpt-4.1",
|
|
86
|
+
stream: true
|
|
87
|
+
},
|
|
88
|
+
"MEDIUM-fast": {
|
|
89
|
+
temperature: 1,
|
|
90
|
+
frequency_penalty: 0.0,
|
|
91
|
+
presence_penalty: 0.0,
|
|
92
|
+
model: "gpt-5-mini",
|
|
93
|
+
reasoning_effort: "minimal",
|
|
94
|
+
verbosity: "low",
|
|
95
|
+
stream: true
|
|
96
|
+
},
|
|
97
|
+
"MEDIUM": {
|
|
98
|
+
temperature: 1,
|
|
99
|
+
frequency_penalty: 0.0,
|
|
100
|
+
presence_penalty: 0.0,
|
|
101
|
+
model: "gpt-5-mini",
|
|
102
|
+
reasoning_effort: "low",
|
|
103
|
+
verbosity: "low",
|
|
104
|
+
stream: true
|
|
105
|
+
},
|
|
106
|
+
"HIGH-fast": {
|
|
107
|
+
model: "gpt-5.2",
|
|
108
|
+
reasoning_effort: "none",
|
|
109
|
+
verbosity: "low",
|
|
110
|
+
temperature: 1,
|
|
111
|
+
stream: true
|
|
112
|
+
},
|
|
113
|
+
"HIGH": {
|
|
114
|
+
model: "gpt-5.2",
|
|
115
|
+
reasoning_effort: "low",
|
|
116
|
+
verbosity: "low",
|
|
117
|
+
stream: true
|
|
118
|
+
},
|
|
119
|
+
"HIGH-medium": {
|
|
120
|
+
model: "gpt-5.2",
|
|
121
|
+
reasoning_effort: "medium",
|
|
122
|
+
verbosity: "low",
|
|
123
|
+
stream: true
|
|
124
|
+
},
|
|
125
|
+
"SEARCH-fast": {
|
|
126
|
+
model: "gpt-5-nano",
|
|
127
|
+
tools: [
|
|
128
|
+
{
|
|
129
|
+
type: "web_search_preview",
|
|
130
|
+
user_location: {
|
|
131
|
+
type: "approximate",
|
|
132
|
+
country: "CH",
|
|
133
|
+
city: "Geneva",
|
|
134
|
+
region: "Geneva",
|
|
135
|
+
},
|
|
136
|
+
}
|
|
137
|
+
],
|
|
138
|
+
},
|
|
139
|
+
"SEARCH": {
|
|
140
|
+
model: "gpt-5-mini",
|
|
141
|
+
reasoning: { effort: "low" },
|
|
142
|
+
tools: [
|
|
143
|
+
{
|
|
144
|
+
type: "web_search_preview",
|
|
145
|
+
user_location: {
|
|
146
|
+
type: "approximate",
|
|
147
|
+
country: "CH",
|
|
148
|
+
city: "Geneva",
|
|
149
|
+
region: "Geneva",
|
|
150
|
+
},
|
|
151
|
+
}
|
|
152
|
+
],
|
|
153
|
+
},
|
|
154
|
+
};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Pricing
|
|
3
|
+
*
|
|
4
|
+
* Gère le pricing des modèles.
|
|
5
|
+
* Les mappings de modèles sont dans openai.ts et xai.ts
|
|
6
|
+
*/
|
|
7
|
+
import { CompletionUsage } from "openai/resources";
|
|
8
|
+
import { Usage } from "../types";
|
|
9
|
+
import { LLMProvider } from "./config";
|
|
10
|
+
type ModelPricing = {
|
|
11
|
+
input: number;
|
|
12
|
+
cachedInput?: number;
|
|
13
|
+
output: number;
|
|
14
|
+
};
|
|
15
|
+
export declare const modelPricing: Record<string, ModelPricing>;
|
|
16
|
+
export declare function calculateCost(model: string, usage?: CompletionUsage): number;
|
|
17
|
+
export declare function accumulateCost(currentUsage: Usage, model: string, usage?: CompletionUsage): number;
|
|
18
|
+
/**
|
|
19
|
+
* Retourne le mapping des modèles selon le provider
|
|
20
|
+
*
|
|
21
|
+
* @param provider - Provider ('openai' | 'xai') ou instance OpenAI (legacy)
|
|
22
|
+
* @param forceThinking - Force reasoning_effort à high
|
|
23
|
+
*/
|
|
24
|
+
export declare function LLM(provider?: LLMProvider | any, forceThinking?: boolean): Record<string, any>;
|
|
25
|
+
export { LLMopenai } from './openai';
|
|
26
|
+
export { LLMxai } from './xai';
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LLM Pricing
|
|
4
|
+
*
|
|
5
|
+
* Gère le pricing des modèles.
|
|
6
|
+
* Les mappings de modèles sont dans openai.ts et xai.ts
|
|
7
|
+
*/
|
|
8
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
|
+
exports.LLMxai = exports.LLMopenai = exports.modelPricing = void 0;
|
|
10
|
+
exports.calculateCost = calculateCost;
|
|
11
|
+
exports.accumulateCost = accumulateCost;
|
|
12
|
+
exports.LLM = LLM;
|
|
13
|
+
const config_1 = require("./config");
|
|
14
|
+
const openai_1 = require("./openai");
|
|
15
|
+
const xai_1 = require("./xai");
|
|
16
|
+
//
|
|
17
|
+
// Pricing par modèle
|
|
18
|
+
// - https://platform.openai.com/docs/pricing#latest-models
|
|
19
|
+
// - https://x.ai/api (pour xAI)
|
|
20
|
+
exports.modelPricing = {
|
|
21
|
+
// OpenAI GPT-4 family
|
|
22
|
+
"gpt-4.5-preview": { input: 0.000075, cachedInput: 0.0000325, output: 0.000125 },
|
|
23
|
+
"gpt-4.1": { input: 0.000002, cachedInput: 0.0000005, output: 0.000008 },
|
|
24
|
+
"gpt-4.1-mini": { input: 0.0000004, cachedInput: 0.0000001, output: 0.0000016 },
|
|
25
|
+
"gpt-4.1-nano": { input: 0.0000001, cachedInput: 0.000000025, output: 0.0000004 },
|
|
26
|
+
"gpt-4o": { input: 0.0000025, cachedInput: 0.00000125, output: 0.00001 },
|
|
27
|
+
"gpt-4o-audio-preview": { input: 0.0000025, output: 0.00001 },
|
|
28
|
+
"gpt-4o-realtime-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
|
|
29
|
+
"gpt-4o-search-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
|
|
30
|
+
"gpt-4o-mini": { input: 0.00000015, cachedInput: 0.000000075, output: 0.0000006 },
|
|
31
|
+
"gpt-4o-mini-audio-preview": { input: 0.00000015, output: 0.0000006 },
|
|
32
|
+
"gpt-4o-mini-realtime-preview": { input: 0.0000006, cachedInput: 0.0000003, output: 0.0000024 },
|
|
33
|
+
"gpt-4o-mini-search-preview": { input: 0.0000015, cachedInput: 0.00000075, output: 0.000006 },
|
|
34
|
+
"gpt-4o-mini-transcribe": { input: 0.0000015, output: 0.000006 },
|
|
35
|
+
// OpenAI GPT-5 family
|
|
36
|
+
"gpt-5": { input: 0.00000125, output: 0.00001 },
|
|
37
|
+
"gpt-5.1": { input: 0.00000125, output: 0.00001 },
|
|
38
|
+
"gpt-5.2": { input: 0.00000175, cachedInput: 0.000000175, output: 0.000014 },
|
|
39
|
+
"gpt-5-mini": { input: 0.00000025, output: 0.000002 },
|
|
40
|
+
"gpt-5-nano": { input: 0.00000005, output: 0.0000004 },
|
|
41
|
+
// OpenAI o-series
|
|
42
|
+
"o1": { input: 0.000015, cachedInput: 0.0000075, output: 0.00006 },
|
|
43
|
+
"o4-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
|
|
44
|
+
"o3-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
|
|
45
|
+
"o1-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
|
|
46
|
+
// OpenAI Embeddings
|
|
47
|
+
"text-embedding-3-small": { input: 0.00000002, output: 0 },
|
|
48
|
+
"text-embedding-3-large": { input: 0.00000013, output: 0 },
|
|
49
|
+
// OpenAI Audio
|
|
50
|
+
"whisper-1": { input: 0.0001, output: 0 }, // per second
|
|
51
|
+
// xAI Grok 4 family (Décembre 2024)
|
|
52
|
+
// https://x.ai/api#pricing - Prix par token ($/1M tokens divisé par 1M)
|
|
53
|
+
"grok-4": { input: 0.000003, cachedInput: 0.000003, output: 0.000015 },
|
|
54
|
+
"grok-4-1-fast-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
|
|
55
|
+
"grok-4-1-fast-non-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
|
|
56
|
+
"grok-4-fast-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
|
|
57
|
+
"grok-4-fast-non-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
|
|
58
|
+
"grok-code-fast-1": { input: 0.0000002, output: 0.0000015 },
|
|
59
|
+
};
|
|
60
|
+
function calculateCost(model, usage) {
|
|
61
|
+
if (!usage) {
|
|
62
|
+
return 0;
|
|
63
|
+
}
|
|
64
|
+
if (!exports.modelPricing[model]) {
|
|
65
|
+
console.warn(`⚠️ Unknown model for pricing: ${model}`);
|
|
66
|
+
return 0;
|
|
67
|
+
}
|
|
68
|
+
const pricing = exports.modelPricing[model];
|
|
69
|
+
const cost = usage.prompt_tokens * pricing.input +
|
|
70
|
+
usage.completion_tokens * pricing.output;
|
|
71
|
+
return cost;
|
|
72
|
+
}
|
|
73
|
+
function accumulateCost(currentUsage, model, usage) {
|
|
74
|
+
if (!usage) {
|
|
75
|
+
return 0;
|
|
76
|
+
}
|
|
77
|
+
currentUsage.prompt += usage.prompt_tokens || 0;
|
|
78
|
+
currentUsage.completion += usage.completion_tokens || 0;
|
|
79
|
+
currentUsage.total += usage.total_tokens || 0;
|
|
80
|
+
const cost = calculateCost(model, usage);
|
|
81
|
+
currentUsage.cost += cost;
|
|
82
|
+
return currentUsage.cost;
|
|
83
|
+
}
|
|
84
|
+
//
|
|
85
|
+
// Mapping provider → modèles
|
|
86
|
+
const LLMmapping = {
|
|
87
|
+
openai: openai_1.LLMopenai,
|
|
88
|
+
xai: xai_1.LLMxai
|
|
89
|
+
};
|
|
90
|
+
/**
|
|
91
|
+
* Retourne le mapping des modèles selon le provider
|
|
92
|
+
*
|
|
93
|
+
* @param provider - Provider ('openai' | 'xai') ou instance OpenAI (legacy)
|
|
94
|
+
* @param forceThinking - Force reasoning_effort à high
|
|
95
|
+
*/
|
|
96
|
+
function LLM(provider, forceThinking) {
|
|
97
|
+
let resolvedProvider;
|
|
98
|
+
//
|
|
99
|
+
// Support legacy: instance OpenAI passée directement
|
|
100
|
+
if (provider && typeof provider === 'object' && provider.baseURL) {
|
|
101
|
+
resolvedProvider = provider.baseURL.includes('x.ai') ? 'xai' : 'openai';
|
|
102
|
+
}
|
|
103
|
+
else if (typeof provider === 'string') {
|
|
104
|
+
resolvedProvider = provider;
|
|
105
|
+
}
|
|
106
|
+
else {
|
|
107
|
+
resolvedProvider = (0, config_1.getDefaultProvider)();
|
|
108
|
+
}
|
|
109
|
+
const mapping = LLMmapping[resolvedProvider] || LLMmapping.openai;
|
|
110
|
+
//
|
|
111
|
+
// Clone pour éviter de muter l'original
|
|
112
|
+
const result = { ...mapping };
|
|
113
|
+
//
|
|
114
|
+
// Force reasoning_effort to high if thinking is enabled
|
|
115
|
+
if (forceThinking) {
|
|
116
|
+
Object.keys(result).forEach(key => {
|
|
117
|
+
if (result[key]?.reasoning_effort) {
|
|
118
|
+
result[key] = { ...result[key], reasoning_effort: "high" };
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
return result;
|
|
123
|
+
}
|
|
124
|
+
//
|
|
125
|
+
// Re-export des mappings pour rétrocompatibilité
|
|
126
|
+
var openai_2 = require("./openai");
|
|
127
|
+
Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return openai_2.LLMopenai; } });
|
|
128
|
+
var xai_2 = require("./xai");
|
|
129
|
+
Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return xai_2.LLMxai; } });
|