agentic-api 2.0.314 → 2.0.585
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +37 -34
- package/dist/src/agents/prompts.d.ts +1 -1
- package/dist/src/agents/prompts.js +9 -7
- package/dist/src/agents/reducer.core.js +2 -2
- package/dist/src/agents/simulator.d.ts +33 -4
- package/dist/src/agents/simulator.dashboard.d.ts +140 -0
- package/dist/src/agents/simulator.dashboard.js +344 -0
- package/dist/src/agents/simulator.executor.d.ts +9 -3
- package/dist/src/agents/simulator.executor.js +43 -17
- package/dist/src/agents/simulator.js +103 -19
- package/dist/src/agents/simulator.prompts.d.ts +9 -8
- package/dist/src/agents/simulator.prompts.js +68 -62
- package/dist/src/agents/simulator.types.d.ts +39 -4
- package/dist/src/agents/simulator.utils.d.ts +22 -1
- package/dist/src/agents/simulator.utils.js +27 -2
- package/dist/src/execute/helpers.d.ts +75 -0
- package/dist/src/execute/helpers.js +139 -0
- package/dist/src/execute/index.d.ts +11 -0
- package/dist/src/execute/index.js +44 -0
- package/dist/src/execute/legacy.d.ts +46 -0
- package/dist/src/{execute.js → execute/legacy.js} +130 -232
- package/dist/src/execute/modelconfig.d.ts +29 -0
- package/dist/src/execute/modelconfig.js +72 -0
- package/dist/src/execute/responses.d.ts +55 -0
- package/dist/src/execute/responses.js +595 -0
- package/dist/src/execute/shared.d.ts +83 -0
- package/dist/src/execute/shared.js +188 -0
- package/dist/src/index.d.ts +5 -1
- package/dist/src/index.js +21 -2
- package/dist/src/llm/config.d.ts +25 -0
- package/dist/src/llm/config.js +38 -0
- package/dist/src/llm/index.d.ts +48 -0
- package/dist/src/llm/index.js +115 -0
- package/dist/src/llm/openai.d.ts +6 -0
- package/dist/src/llm/openai.js +154 -0
- package/dist/src/llm/pricing.d.ts +26 -0
- package/dist/src/llm/pricing.js +129 -0
- package/dist/src/llm/xai.d.ts +17 -0
- package/dist/src/llm/xai.js +90 -0
- package/dist/src/pricing.llm.d.ts +3 -15
- package/dist/src/pricing.llm.js +10 -230
- package/dist/src/prompts.d.ts +0 -1
- package/dist/src/prompts.js +51 -118
- package/dist/src/rag/embeddings.d.ts +5 -1
- package/dist/src/rag/embeddings.js +23 -7
- package/dist/src/rag/parser.js +1 -1
- package/dist/src/rag/rag.manager.d.ts +33 -2
- package/dist/src/rag/rag.manager.js +159 -61
- package/dist/src/rag/types.d.ts +2 -0
- package/dist/src/rag/usecase.js +8 -11
- package/dist/src/rules/git/git.e2e.helper.js +21 -2
- package/dist/src/rules/git/git.health.d.ts +4 -2
- package/dist/src/rules/git/git.health.js +113 -16
- package/dist/src/rules/git/index.d.ts +1 -1
- package/dist/src/rules/git/index.js +3 -2
- package/dist/src/rules/git/repo.d.ts +57 -7
- package/dist/src/rules/git/repo.js +326 -39
- package/dist/src/rules/git/repo.pr.d.ts +8 -0
- package/dist/src/rules/git/repo.pr.js +161 -13
- package/dist/src/rules/git/repo.tools.d.ts +5 -1
- package/dist/src/rules/git/repo.tools.js +54 -7
- package/dist/src/rules/types.d.ts +25 -0
- package/dist/src/rules/utils.matter.d.ts +0 -20
- package/dist/src/rules/utils.matter.js +58 -81
- package/dist/src/scrapper.js +3 -2
- package/dist/src/stategraph/stategraph.d.ts +26 -1
- package/dist/src/stategraph/stategraph.js +43 -2
- package/dist/src/stategraph/stategraph.storage.js +4 -0
- package/dist/src/stategraph/types.d.ts +5 -0
- package/dist/src/types.d.ts +42 -7
- package/dist/src/types.js +8 -7
- package/dist/src/usecase.js +1 -1
- package/dist/src/utils.d.ts +0 -8
- package/dist/src/utils.js +26 -29
- package/package.json +9 -7
- package/dist/src/execute.d.ts +0 -63
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DummyWritable = void 0;
|
|
4
|
+
exports.sendFeedback = sendFeedback;
|
|
5
|
+
exports.normalizeOptionsForResponses = normalizeOptionsForResponses;
|
|
6
|
+
exports.normalizedFunctionCallFromResponse = normalizedFunctionCallFromResponse;
|
|
7
|
+
exports.normalizeOutputFromResponses = normalizeOutputFromResponses;
|
|
8
|
+
exports.convertMessagesToResponsesInput = convertMessagesToResponsesInput;
|
|
9
|
+
//
|
|
10
|
+
// Writable vide pour les tests
|
|
11
|
+
exports.DummyWritable = {
|
|
12
|
+
write: () => { }
|
|
13
|
+
};
|
|
14
|
+
//
|
|
15
|
+
// Envoi de feedback temps réel vers l'UX
|
|
16
|
+
// Informe l'utilisateur de l'état de l'analyse : agent actif, description, usage, state
|
|
17
|
+
function sendFeedback(params) {
|
|
18
|
+
const { agent, stdout, description, usage, state, verbose } = params;
|
|
19
|
+
const feedback = {
|
|
20
|
+
agent,
|
|
21
|
+
description,
|
|
22
|
+
usage,
|
|
23
|
+
state
|
|
24
|
+
};
|
|
25
|
+
// if(verbose) {
|
|
26
|
+
// console.log('--- DBG sendFeedback:',agent, description || '--', state);
|
|
27
|
+
// }
|
|
28
|
+
//
|
|
29
|
+
// send agent state and description
|
|
30
|
+
stdout.write(`\n<step>${JSON.stringify(feedback)}</step>\n`);
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Normalise les options pour l'API Responses
|
|
34
|
+
*
|
|
35
|
+
* Transforme les paramètres du format Chat Completions vers Responses API:
|
|
36
|
+
* - Tools: { type: "function", function: {...} } → { type: "function", name, description, parameters, strict }
|
|
37
|
+
* - Autres transformations futures si nécessaire
|
|
38
|
+
*
|
|
39
|
+
* @param options - Options à normaliser
|
|
40
|
+
* @returns Options normalisées pour Responses API
|
|
41
|
+
*/
|
|
42
|
+
function normalizeOptionsForResponses(options) {
|
|
43
|
+
const normalized = { ...options };
|
|
44
|
+
// Transformer les tools au format Responses API
|
|
45
|
+
if (normalized.tools && normalized.tools.length > 0) {
|
|
46
|
+
normalized.tools = normalized.tools.map((tool) => {
|
|
47
|
+
if (tool.type === 'function' && tool.function) {
|
|
48
|
+
// Format Chat Completions → Responses API
|
|
49
|
+
return {
|
|
50
|
+
type: 'function',
|
|
51
|
+
name: tool.function.name,
|
|
52
|
+
description: tool.function.description,
|
|
53
|
+
parameters: tool.function.parameters,
|
|
54
|
+
strict: tool.function.strict ?? true
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
// Déjà au bon format ou type différent
|
|
58
|
+
return tool;
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
return normalized;
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Normalise un function call du format Responses vers Chat Completions
|
|
65
|
+
*
|
|
66
|
+
* Responses API: { type: "function_call", name, arguments, call_id }
|
|
67
|
+
* Chat Completions: { id, type: "function", function: { name, arguments } }
|
|
68
|
+
*
|
|
69
|
+
* Si déjà au format Chat Completions (avec .function), retourne tel quel
|
|
70
|
+
*
|
|
71
|
+
* @param functionCall - Function call au format Responses ou Chat Completions
|
|
72
|
+
* @returns Function call au format Chat Completions
|
|
73
|
+
*/
|
|
74
|
+
function normalizedFunctionCallFromResponse(functionCall) {
|
|
75
|
+
// Si déjà au format Chat Completions, retourner tel quel
|
|
76
|
+
if (functionCall.function) {
|
|
77
|
+
return functionCall;
|
|
78
|
+
}
|
|
79
|
+
// Transformer du format Responses vers Chat Completions
|
|
80
|
+
return {
|
|
81
|
+
id: functionCall.call_id,
|
|
82
|
+
type: "function",
|
|
83
|
+
function: {
|
|
84
|
+
name: functionCall.name,
|
|
85
|
+
arguments: functionCall.arguments
|
|
86
|
+
}
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Normalise l'output de l'API Responses vers le format Chat Completions
|
|
91
|
+
*
|
|
92
|
+
* Transforme la structure Responses API vers le format attendu par le reste du code
|
|
93
|
+
*
|
|
94
|
+
* Format Responses API (result.output array):
|
|
95
|
+
* - { type: "reasoning", summary: [] }
|
|
96
|
+
* - { type: "message", content: [{ type: "output_text", text: "..." }] }
|
|
97
|
+
* - { type: "function_call", name, arguments, call_id }
|
|
98
|
+
*
|
|
99
|
+
* Format Chat Completions compatible:
|
|
100
|
+
* - { choices: [{ message: { content, tool_calls, reasoning_text } }], usage: {...}, id: "..." }
|
|
101
|
+
*
|
|
102
|
+
* @param result - Résultat de stream.finalResponse()
|
|
103
|
+
* @returns Résultat au format Chat Completions avec reasoning_text si présent
|
|
104
|
+
*/
|
|
105
|
+
function normalizeOutputFromResponses(result) {
|
|
106
|
+
let text = '';
|
|
107
|
+
let reasoningText = ''; // ✅ Capturer reasoning_text pour modèles reasoning (o-series, gpt-5)
|
|
108
|
+
const functionCalls = [];
|
|
109
|
+
// Parcourir les output items pour extraire text, reasoning et function_calls
|
|
110
|
+
if (result.output && Array.isArray(result.output)) {
|
|
111
|
+
for (const item of result.output) {
|
|
112
|
+
// Function calls sont directement dans output
|
|
113
|
+
if (item.type === 'function_call') {
|
|
114
|
+
functionCalls.push(item);
|
|
115
|
+
}
|
|
116
|
+
// ✅ Capturer reasoning (v5.22.0)
|
|
117
|
+
else if (item.type === 'reasoning' && item.summary) {
|
|
118
|
+
reasoningText = Array.isArray(item.summary) ? item.summary.join('\n') : String(item.summary);
|
|
119
|
+
}
|
|
120
|
+
// Messages contiennent le texte
|
|
121
|
+
else if (item.type === 'message' && item.content) {
|
|
122
|
+
for (const content of item.content) {
|
|
123
|
+
if (content.type === 'output_text' && content.text) {
|
|
124
|
+
text += content.text;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
// Transformer tous les function_calls vers le format Chat Completions
|
|
131
|
+
const toolCalls = functionCalls.map(normalizedFunctionCallFromResponse);
|
|
132
|
+
// Retourner au format Chat Completions avec reasoning_text optionnel
|
|
133
|
+
return {
|
|
134
|
+
choices: [{
|
|
135
|
+
message: {
|
|
136
|
+
tool_calls: toolCalls,
|
|
137
|
+
content: text,
|
|
138
|
+
reasoning_text: reasoningText || undefined // ✅ Optionnel, seulement si présent
|
|
139
|
+
}
|
|
140
|
+
}],
|
|
141
|
+
usage: {
|
|
142
|
+
prompt_tokens: result.usage?.input_tokens || 0,
|
|
143
|
+
completion_tokens: result.usage?.output_tokens || 0,
|
|
144
|
+
total_tokens: (result.usage?.input_tokens || 0) + (result.usage?.output_tokens || 0)
|
|
145
|
+
},
|
|
146
|
+
id: result.id
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
/**
|
|
150
|
+
* Convertit les messages du stateGraph vers le format Responses API input
|
|
151
|
+
* Gère les messages normaux, tool_calls (Chat Completions), et function_call_output (Responses)
|
|
152
|
+
*
|
|
153
|
+
* Utilisée par:
|
|
154
|
+
* - readCompletionsStream (responses.ts) pour le follow-up après tool calls
|
|
155
|
+
* - executeAgentSet (responses.ts) pour préparer l'input initial
|
|
156
|
+
*
|
|
157
|
+
* @param messages Messages du stateGraph
|
|
158
|
+
* @returns Array d'items au format Responses API input
|
|
159
|
+
*/
|
|
160
|
+
function convertMessagesToResponsesInput(messages) {
|
|
161
|
+
return messages.flatMap((m) => {
|
|
162
|
+
// ✅ Messages function_call_output ont un format différent (pas de role)
|
|
163
|
+
if (m.type === 'function_call_output') {
|
|
164
|
+
return [{ type: m.type, call_id: m.call_id, output: m.output }];
|
|
165
|
+
}
|
|
166
|
+
// ✅ Messages avec tool_calls → convertir vers le format Responses API
|
|
167
|
+
// Documentation: "input_list += response.output" ajoute reasoning + function_calls
|
|
168
|
+
if (m.tool_calls && m.tool_calls.length > 0) {
|
|
169
|
+
const result = [];
|
|
170
|
+
// Ajouter le message assistant si content présent
|
|
171
|
+
if (m.content) {
|
|
172
|
+
result.push({ role: m.role, content: m.content });
|
|
173
|
+
}
|
|
174
|
+
// Convertir chaque tool_call (Chat Completions) en function_call (Responses)
|
|
175
|
+
for (const tc of m.tool_calls) {
|
|
176
|
+
result.push({
|
|
177
|
+
type: "function_call",
|
|
178
|
+
call_id: tc.id,
|
|
179
|
+
name: tc.function.name,
|
|
180
|
+
arguments: tc.function.arguments
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
return result;
|
|
184
|
+
}
|
|
185
|
+
// Messages normaux avec role
|
|
186
|
+
return [{ role: m.role, content: m.content }];
|
|
187
|
+
});
|
|
188
|
+
}
|
package/dist/src/index.d.ts
CHANGED
|
@@ -6,7 +6,10 @@ export * from './utils';
|
|
|
6
6
|
export * from './types';
|
|
7
7
|
export * from './stategraph';
|
|
8
8
|
export * from './execute';
|
|
9
|
-
export
|
|
9
|
+
export { llmInstance, resetInstances, openaiInstance, PROVIDER_MAP, detectProvider, getDefaultProvider } from './llm';
|
|
10
|
+
export type { LLMProvider, LLMConfig, ProviderConfig } from './llm';
|
|
11
|
+
export { modelPricing, calculateCost, accumulateCost, LLM, LLMxai, LLMopenai } from './llm/pricing';
|
|
12
|
+
export { modelConfig } from './execute/modelconfig';
|
|
10
13
|
export * from './scrapper';
|
|
11
14
|
export * from './agents/reducer';
|
|
12
15
|
export * from './agents/semantic';
|
|
@@ -18,3 +21,4 @@ export * from './agents/simulator';
|
|
|
18
21
|
export * from './agents/simulator.types';
|
|
19
22
|
export * from './agents/simulator.prompts';
|
|
20
23
|
export * from './agents/simulator.utils';
|
|
24
|
+
export * from './agents/simulator.dashboard';
|
package/dist/src/index.js
CHANGED
|
@@ -18,15 +18,33 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
18
18
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
19
19
|
};
|
|
20
20
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
21
|
+
exports.modelConfig = exports.LLMopenai = exports.LLMxai = exports.LLM = exports.accumulateCost = exports.calculateCost = exports.modelPricing = exports.getDefaultProvider = exports.detectProvider = exports.PROVIDER_MAP = exports.openaiInstance = exports.resetInstances = exports.llmInstance = void 0;
|
|
21
22
|
// Export des utilitaires
|
|
22
23
|
__exportStar(require("./utils"), exports);
|
|
23
24
|
// Types
|
|
24
25
|
__exportStar(require("./types"), exports);
|
|
25
26
|
// StateGraph - nouvelle architecture de gestion des discussions
|
|
26
27
|
__exportStar(require("./stategraph"), exports);
|
|
27
|
-
// Execute
|
|
28
|
+
// Execute (avec feature toggle legacy/responses)
|
|
28
29
|
__exportStar(require("./execute"), exports);
|
|
29
|
-
|
|
30
|
+
// LLM - providers et pricing
|
|
31
|
+
var llm_1 = require("./llm");
|
|
32
|
+
Object.defineProperty(exports, "llmInstance", { enumerable: true, get: function () { return llm_1.llmInstance; } });
|
|
33
|
+
Object.defineProperty(exports, "resetInstances", { enumerable: true, get: function () { return llm_1.resetInstances; } });
|
|
34
|
+
Object.defineProperty(exports, "openaiInstance", { enumerable: true, get: function () { return llm_1.openaiInstance; } });
|
|
35
|
+
Object.defineProperty(exports, "PROVIDER_MAP", { enumerable: true, get: function () { return llm_1.PROVIDER_MAP; } });
|
|
36
|
+
Object.defineProperty(exports, "detectProvider", { enumerable: true, get: function () { return llm_1.detectProvider; } });
|
|
37
|
+
Object.defineProperty(exports, "getDefaultProvider", { enumerable: true, get: function () { return llm_1.getDefaultProvider; } });
|
|
38
|
+
var pricing_1 = require("./llm/pricing");
|
|
39
|
+
Object.defineProperty(exports, "modelPricing", { enumerable: true, get: function () { return pricing_1.modelPricing; } });
|
|
40
|
+
Object.defineProperty(exports, "calculateCost", { enumerable: true, get: function () { return pricing_1.calculateCost; } });
|
|
41
|
+
Object.defineProperty(exports, "accumulateCost", { enumerable: true, get: function () { return pricing_1.accumulateCost; } });
|
|
42
|
+
Object.defineProperty(exports, "LLM", { enumerable: true, get: function () { return pricing_1.LLM; } });
|
|
43
|
+
Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return pricing_1.LLMxai; } });
|
|
44
|
+
Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return pricing_1.LLMopenai; } });
|
|
45
|
+
// Model config (central pour tous les mappings: responses, embeddings, vision, whisper)
|
|
46
|
+
var modelconfig_1 = require("./execute/modelconfig");
|
|
47
|
+
Object.defineProperty(exports, "modelConfig", { enumerable: true, get: function () { return modelconfig_1.modelConfig; } });
|
|
30
48
|
// Scrapper
|
|
31
49
|
__exportStar(require("./scrapper"), exports);
|
|
32
50
|
// Agents
|
|
@@ -44,3 +62,4 @@ __exportStar(require("./agents/simulator"), exports);
|
|
|
44
62
|
__exportStar(require("./agents/simulator.types"), exports);
|
|
45
63
|
__exportStar(require("./agents/simulator.prompts"), exports);
|
|
46
64
|
__exportStar(require("./agents/simulator.utils"), exports);
|
|
65
|
+
__exportStar(require("./agents/simulator.dashboard"), exports);
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Configuration des providers LLM
|
|
3
|
+
*
|
|
4
|
+
* Définit les baseURL, clés API et configurations par provider.
|
|
5
|
+
*/
|
|
6
|
+
export type LLMProvider = 'openai' | 'xai';
|
|
7
|
+
export interface LLMConfig {
|
|
8
|
+
provider?: LLMProvider;
|
|
9
|
+
key?: string;
|
|
10
|
+
baseUrl?: string;
|
|
11
|
+
}
|
|
12
|
+
export interface ProviderConfig {
|
|
13
|
+
baseURL: string;
|
|
14
|
+
keyEnv: string;
|
|
15
|
+
fallbackKeyEnv?: string;
|
|
16
|
+
}
|
|
17
|
+
export declare const PROVIDER_MAP: Record<LLMProvider, ProviderConfig>;
|
|
18
|
+
/**
|
|
19
|
+
* Détecte le provider à partir d'une baseURL
|
|
20
|
+
*/
|
|
21
|
+
export declare function detectProvider(baseURL?: string): LLMProvider;
|
|
22
|
+
/**
|
|
23
|
+
* Retourne le provider par défaut depuis les variables d'environnement
|
|
24
|
+
*/
|
|
25
|
+
export declare function getDefaultProvider(): LLMProvider;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Configuration des providers LLM
|
|
4
|
+
*
|
|
5
|
+
* Définit les baseURL, clés API et configurations par provider.
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.PROVIDER_MAP = void 0;
|
|
9
|
+
exports.detectProvider = detectProvider;
|
|
10
|
+
exports.getDefaultProvider = getDefaultProvider;
|
|
11
|
+
exports.PROVIDER_MAP = {
|
|
12
|
+
openai: {
|
|
13
|
+
baseURL: 'https://api.openai.com/v1',
|
|
14
|
+
keyEnv: 'OPENAI_API_KEY',
|
|
15
|
+
fallbackKeyEnv: 'LLM_API_KEY'
|
|
16
|
+
},
|
|
17
|
+
xai: {
|
|
18
|
+
baseURL: 'https://api.x.ai/v1',
|
|
19
|
+
keyEnv: 'XAI_API_KEY',
|
|
20
|
+
fallbackKeyEnv: 'LLM_API_KEY'
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
/**
|
|
24
|
+
* Détecte le provider à partir d'une baseURL
|
|
25
|
+
*/
|
|
26
|
+
function detectProvider(baseURL) {
|
|
27
|
+
if (!baseURL)
|
|
28
|
+
return process.env.LLM_PROVIDER || 'openai';
|
|
29
|
+
if (baseURL.includes('x.ai'))
|
|
30
|
+
return 'xai';
|
|
31
|
+
return 'openai';
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Retourne le provider par défaut depuis les variables d'environnement
|
|
35
|
+
*/
|
|
36
|
+
function getDefaultProvider() {
|
|
37
|
+
return process.env.LLM_PROVIDER || 'openai';
|
|
38
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Provider Instances
|
|
3
|
+
*
|
|
4
|
+
* Fournit une instance OpenAI configurable par provider:
|
|
5
|
+
* - llmInstance(): utilise LLM_PROVIDER depuis .env
|
|
6
|
+
* - llmInstance({provider: 'openai'}): force OpenAI
|
|
7
|
+
* - llmInstance({provider: 'xai'}): force xAI
|
|
8
|
+
*
|
|
9
|
+
* Configuration via .env:
|
|
10
|
+
* - LLM_PROVIDER: openai | xai (défaut: openai)
|
|
11
|
+
* - OPENAI_API_KEY: clé API OpenAI
|
|
12
|
+
* - XAI_API_KEY: clé API xAI
|
|
13
|
+
* - LLM_API_KEY: fallback pour les deux providers
|
|
14
|
+
*/
|
|
15
|
+
import OpenAI from 'openai';
|
|
16
|
+
import { LLMConfig } from './config';
|
|
17
|
+
declare global {
|
|
18
|
+
var _llmInstances_: Map<string, OpenAI> | undefined;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Instance LLM configurable par provider
|
|
22
|
+
*
|
|
23
|
+
* @param config - Configuration optionnelle
|
|
24
|
+
* @param config.provider - Provider à utiliser ('openai' | 'xai')
|
|
25
|
+
* @param config.key - Clé API (optionnel, utilise env par défaut)
|
|
26
|
+
* @param config.baseUrl - Base URL (optionnel, utilise config provider par défaut)
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* // Utilise le provider par défaut (.env LLM_PROVIDER)
|
|
30
|
+
* const openai = llmInstance();
|
|
31
|
+
*
|
|
32
|
+
* // Force OpenAI (pour embeddings, whisper)
|
|
33
|
+
* const openai = llmInstance({provider: 'openai'});
|
|
34
|
+
*
|
|
35
|
+
* // Force xAI
|
|
36
|
+
* const openai = llmInstance({provider: 'xai'});
|
|
37
|
+
*/
|
|
38
|
+
export declare function llmInstance(config?: LLMConfig): OpenAI;
|
|
39
|
+
/**
|
|
40
|
+
* @deprecated Utiliser llmInstance() à la place
|
|
41
|
+
* Alias pour rétrocompatibilité
|
|
42
|
+
*/
|
|
43
|
+
export declare function openaiInstance(envKey?: string, baseUrl?: string): OpenAI;
|
|
44
|
+
/**
|
|
45
|
+
* Reset toutes les instances (utile pour les tests)
|
|
46
|
+
*/
|
|
47
|
+
export declare function resetInstances(): void;
|
|
48
|
+
export { PROVIDER_MAP, LLMProvider, LLMConfig, detectProvider, getDefaultProvider, ProviderConfig } from './config';
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LLM Provider Instances
|
|
4
|
+
*
|
|
5
|
+
* Fournit une instance OpenAI configurable par provider:
|
|
6
|
+
* - llmInstance(): utilise LLM_PROVIDER depuis .env
|
|
7
|
+
* - llmInstance({provider: 'openai'}): force OpenAI
|
|
8
|
+
* - llmInstance({provider: 'xai'}): force xAI
|
|
9
|
+
*
|
|
10
|
+
* Configuration via .env:
|
|
11
|
+
* - LLM_PROVIDER: openai | xai (défaut: openai)
|
|
12
|
+
* - OPENAI_API_KEY: clé API OpenAI
|
|
13
|
+
* - XAI_API_KEY: clé API xAI
|
|
14
|
+
* - LLM_API_KEY: fallback pour les deux providers
|
|
15
|
+
*/
|
|
16
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
17
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
18
|
+
};
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.getDefaultProvider = exports.detectProvider = exports.PROVIDER_MAP = void 0;
|
|
21
|
+
exports.llmInstance = llmInstance;
|
|
22
|
+
exports.openaiInstance = openaiInstance;
|
|
23
|
+
exports.resetInstances = resetInstances;
|
|
24
|
+
const openai_1 = __importDefault(require("openai"));
|
|
25
|
+
const config_1 = require("./config");
|
|
26
|
+
function getInstancesMap() {
|
|
27
|
+
if (!globalThis._llmInstances_) {
|
|
28
|
+
globalThis._llmInstances_ = new Map();
|
|
29
|
+
}
|
|
30
|
+
return globalThis._llmInstances_;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Instance LLM configurable par provider
|
|
34
|
+
*
|
|
35
|
+
* @param config - Configuration optionnelle
|
|
36
|
+
* @param config.provider - Provider à utiliser ('openai' | 'xai')
|
|
37
|
+
* @param config.key - Clé API (optionnel, utilise env par défaut)
|
|
38
|
+
* @param config.baseUrl - Base URL (optionnel, utilise config provider par défaut)
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* // Utilise le provider par défaut (.env LLM_PROVIDER)
|
|
42
|
+
* const openai = llmInstance();
|
|
43
|
+
*
|
|
44
|
+
* // Force OpenAI (pour embeddings, whisper)
|
|
45
|
+
* const openai = llmInstance({provider: 'openai'});
|
|
46
|
+
*
|
|
47
|
+
* // Force xAI
|
|
48
|
+
* const openai = llmInstance({provider: 'xai'});
|
|
49
|
+
*/
|
|
50
|
+
function llmInstance(config) {
|
|
51
|
+
const provider = config?.provider || (0, config_1.getDefaultProvider)();
|
|
52
|
+
const providerConfig = config_1.PROVIDER_MAP[provider];
|
|
53
|
+
if (!providerConfig) {
|
|
54
|
+
throw new Error(`Unknown provider: ${provider}. Valid providers: ${Object.keys(config_1.PROVIDER_MAP).join(', ')}`);
|
|
55
|
+
}
|
|
56
|
+
//
|
|
57
|
+
// Clé de cache: provider + éventuelles customisations
|
|
58
|
+
const cacheKey = config?.key || config?.baseUrl
|
|
59
|
+
? `${provider}-${config?.key || ''}-${config?.baseUrl || ''}`
|
|
60
|
+
: provider;
|
|
61
|
+
const instances = getInstancesMap();
|
|
62
|
+
//
|
|
63
|
+
// Retourner l'instance existante si disponible (cache)
|
|
64
|
+
if (instances.has(cacheKey)) {
|
|
65
|
+
return instances.get(cacheKey);
|
|
66
|
+
}
|
|
67
|
+
//
|
|
68
|
+
// Résoudre la clé API: custom > provider env > fallback env
|
|
69
|
+
const apiKey = config?.key
|
|
70
|
+
|| process.env[providerConfig.keyEnv]
|
|
71
|
+
|| (providerConfig.fallbackKeyEnv ? process.env[providerConfig.fallbackKeyEnv] : undefined);
|
|
72
|
+
if (!apiKey) {
|
|
73
|
+
throw new Error(`API key missing for provider "${provider}". ` +
|
|
74
|
+
`Set ${providerConfig.keyEnv} or ${providerConfig.fallbackKeyEnv || 'LLM_API_KEY'} in your environment.`);
|
|
75
|
+
}
|
|
76
|
+
//
|
|
77
|
+
// Résoudre la base URL: custom > provider config
|
|
78
|
+
const baseURL = config?.baseUrl || providerConfig.baseURL;
|
|
79
|
+
const instance = new openai_1.default({
|
|
80
|
+
apiKey,
|
|
81
|
+
baseURL,
|
|
82
|
+
timeout: 60000 * 15,
|
|
83
|
+
});
|
|
84
|
+
instances.set(cacheKey, instance);
|
|
85
|
+
return instance;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* @deprecated Utiliser llmInstance() à la place
|
|
89
|
+
* Alias pour rétrocompatibilité
|
|
90
|
+
*/
|
|
91
|
+
function openaiInstance(envKey, baseUrl) {
|
|
92
|
+
//
|
|
93
|
+
// Si des paramètres legacy sont passés, les utiliser
|
|
94
|
+
if (envKey || baseUrl) {
|
|
95
|
+
const provider = baseUrl?.includes('x.ai') ? 'xai' : 'openai';
|
|
96
|
+
return llmInstance({
|
|
97
|
+
provider,
|
|
98
|
+
key: envKey ? (process.env[envKey] || envKey) : undefined,
|
|
99
|
+
baseUrl
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
return llmInstance();
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Reset toutes les instances (utile pour les tests)
|
|
106
|
+
*/
|
|
107
|
+
function resetInstances() {
|
|
108
|
+
globalThis._llmInstances_ = undefined;
|
|
109
|
+
}
|
|
110
|
+
//
|
|
111
|
+
// Re-export config
|
|
112
|
+
var config_2 = require("./config");
|
|
113
|
+
Object.defineProperty(exports, "PROVIDER_MAP", { enumerable: true, get: function () { return config_2.PROVIDER_MAP; } });
|
|
114
|
+
Object.defineProperty(exports, "detectProvider", { enumerable: true, get: function () { return config_2.detectProvider; } });
|
|
115
|
+
Object.defineProperty(exports, "getDefaultProvider", { enumerable: true, get: function () { return config_2.getDefaultProvider; } });
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* OpenAI Model Mappings
|
|
4
|
+
*
|
|
5
|
+
* Configuration des modèles pour le provider OpenAI
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.LLMopenai = void 0;
|
|
9
|
+
exports.LLMopenai = {
|
|
10
|
+
// Vision capable models
|
|
11
|
+
"VISION": {
|
|
12
|
+
model: "gpt-5-mini",
|
|
13
|
+
temperature: 0.2,
|
|
14
|
+
stream: false
|
|
15
|
+
},
|
|
16
|
+
"VISION-fast": {
|
|
17
|
+
model: "gpt-5-mini",
|
|
18
|
+
temperature: 0.2,
|
|
19
|
+
stream: false
|
|
20
|
+
},
|
|
21
|
+
// Embedding models
|
|
22
|
+
// Note: memories-lite utilise ce modèle via agentic-server/src/config/memories.ts
|
|
23
|
+
// Usage: modelConfig('EMBEDDING-small', { provider: 'openai' }).model
|
|
24
|
+
"EMBEDDING-small": {
|
|
25
|
+
model: "text-embedding-3-small",
|
|
26
|
+
dimensions: 1536
|
|
27
|
+
},
|
|
28
|
+
"EMBEDDING-large": {
|
|
29
|
+
model: "text-embedding-3-large",
|
|
30
|
+
dimensions: 3072
|
|
31
|
+
},
|
|
32
|
+
// Audio models
|
|
33
|
+
"WHISPER": {
|
|
34
|
+
model: "whisper-1"
|
|
35
|
+
},
|
|
36
|
+
"TRANSCRIBE": {
|
|
37
|
+
model: "gpt-4o-mini-transcribe"
|
|
38
|
+
},
|
|
39
|
+
// Chat/Responses models
|
|
40
|
+
"LOW-fast": {
|
|
41
|
+
temperature: 1,
|
|
42
|
+
frequency_penalty: 0.0,
|
|
43
|
+
presence_penalty: 0.0,
|
|
44
|
+
model: "gpt-5-nano",
|
|
45
|
+
reasoning_effort: "minimal",
|
|
46
|
+
verbosity: "low",
|
|
47
|
+
stream: true
|
|
48
|
+
},
|
|
49
|
+
"LOW": {
|
|
50
|
+
temperature: 1,
|
|
51
|
+
frequency_penalty: 0.0,
|
|
52
|
+
presence_penalty: 0.0,
|
|
53
|
+
model: "gpt-5-nano",
|
|
54
|
+
reasoning_effort: "low",
|
|
55
|
+
verbosity: "low",
|
|
56
|
+
stream: true
|
|
57
|
+
},
|
|
58
|
+
"LOW-medium": {
|
|
59
|
+
temperature: 1,
|
|
60
|
+
frequency_penalty: 0.0,
|
|
61
|
+
presence_penalty: 0.0,
|
|
62
|
+
model: "gpt-5-nano",
|
|
63
|
+
reasoning_effort: "medium",
|
|
64
|
+
verbosity: "low",
|
|
65
|
+
stream: true
|
|
66
|
+
},
|
|
67
|
+
"LOW-4fast": {
|
|
68
|
+
temperature: .2,
|
|
69
|
+
frequency_penalty: 0.0,
|
|
70
|
+
presence_penalty: 0.0,
|
|
71
|
+
model: "gpt-4.1-nano",
|
|
72
|
+
stream: true
|
|
73
|
+
},
|
|
74
|
+
"MEDIUM-4.1-mini": {
|
|
75
|
+
temperature: .2,
|
|
76
|
+
frequency_penalty: 0.0,
|
|
77
|
+
presence_penalty: 0.0,
|
|
78
|
+
model: "gpt-4.1-mini",
|
|
79
|
+
stream: true
|
|
80
|
+
},
|
|
81
|
+
"MEDIUM-4.1": {
|
|
82
|
+
temperature: .2,
|
|
83
|
+
frequency_penalty: 0.0,
|
|
84
|
+
presence_penalty: 0.0,
|
|
85
|
+
model: "gpt-4.1",
|
|
86
|
+
stream: true
|
|
87
|
+
},
|
|
88
|
+
"MEDIUM-fast": {
|
|
89
|
+
temperature: 1,
|
|
90
|
+
frequency_penalty: 0.0,
|
|
91
|
+
presence_penalty: 0.0,
|
|
92
|
+
model: "gpt-5-mini",
|
|
93
|
+
reasoning_effort: "minimal",
|
|
94
|
+
verbosity: "low",
|
|
95
|
+
stream: true
|
|
96
|
+
},
|
|
97
|
+
"MEDIUM": {
|
|
98
|
+
temperature: 1,
|
|
99
|
+
frequency_penalty: 0.0,
|
|
100
|
+
presence_penalty: 0.0,
|
|
101
|
+
model: "gpt-5-mini",
|
|
102
|
+
reasoning_effort: "low",
|
|
103
|
+
verbosity: "low",
|
|
104
|
+
stream: true
|
|
105
|
+
},
|
|
106
|
+
"HIGH-fast": {
|
|
107
|
+
model: "gpt-5.2",
|
|
108
|
+
reasoning_effort: "none",
|
|
109
|
+
verbosity: "low",
|
|
110
|
+
temperature: 1,
|
|
111
|
+
stream: true
|
|
112
|
+
},
|
|
113
|
+
"HIGH": {
|
|
114
|
+
model: "gpt-5.2",
|
|
115
|
+
reasoning_effort: "low",
|
|
116
|
+
verbosity: "low",
|
|
117
|
+
stream: true
|
|
118
|
+
},
|
|
119
|
+
"HIGH-medium": {
|
|
120
|
+
model: "gpt-5.2",
|
|
121
|
+
reasoning_effort: "medium",
|
|
122
|
+
verbosity: "low",
|
|
123
|
+
stream: true
|
|
124
|
+
},
|
|
125
|
+
"SEARCH-fast": {
|
|
126
|
+
model: "gpt-5-nano",
|
|
127
|
+
tools: [
|
|
128
|
+
{
|
|
129
|
+
type: "web_search_preview",
|
|
130
|
+
user_location: {
|
|
131
|
+
type: "approximate",
|
|
132
|
+
country: "CH",
|
|
133
|
+
city: "Geneva",
|
|
134
|
+
region: "Geneva",
|
|
135
|
+
},
|
|
136
|
+
}
|
|
137
|
+
],
|
|
138
|
+
},
|
|
139
|
+
"SEARCH": {
|
|
140
|
+
model: "gpt-5-mini",
|
|
141
|
+
reasoning: { effort: "low" },
|
|
142
|
+
tools: [
|
|
143
|
+
{
|
|
144
|
+
type: "web_search_preview",
|
|
145
|
+
user_location: {
|
|
146
|
+
type: "approximate",
|
|
147
|
+
country: "CH",
|
|
148
|
+
city: "Geneva",
|
|
149
|
+
region: "Geneva",
|
|
150
|
+
},
|
|
151
|
+
}
|
|
152
|
+
],
|
|
153
|
+
},
|
|
154
|
+
};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Pricing
|
|
3
|
+
*
|
|
4
|
+
* Gère le pricing des modèles.
|
|
5
|
+
* Les mappings de modèles sont dans openai.ts et xai.ts
|
|
6
|
+
*/
|
|
7
|
+
import { CompletionUsage } from "openai/resources";
|
|
8
|
+
import { Usage } from "../types";
|
|
9
|
+
import { LLMProvider } from "./config";
|
|
10
|
+
type ModelPricing = {
|
|
11
|
+
input: number;
|
|
12
|
+
cachedInput?: number;
|
|
13
|
+
output: number;
|
|
14
|
+
};
|
|
15
|
+
export declare const modelPricing: Record<string, ModelPricing>;
|
|
16
|
+
export declare function calculateCost(model: string, usage?: CompletionUsage): number;
|
|
17
|
+
export declare function accumulateCost(currentUsage: Usage, model: string, usage?: CompletionUsage): number;
|
|
18
|
+
/**
|
|
19
|
+
* Retourne le mapping des modèles selon le provider
|
|
20
|
+
*
|
|
21
|
+
* @param provider - Provider ('openai' | 'xai') ou instance OpenAI (legacy)
|
|
22
|
+
* @param forceThinking - Force reasoning_effort à high
|
|
23
|
+
*/
|
|
24
|
+
export declare function LLM(provider?: LLMProvider | any, forceThinking?: boolean): Record<string, any>;
|
|
25
|
+
export { LLMopenai } from './openai';
|
|
26
|
+
export { LLMxai } from './xai';
|