agentic-api 2.0.314 → 2.0.585

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +37 -34
  2. package/dist/src/agents/prompts.d.ts +1 -1
  3. package/dist/src/agents/prompts.js +9 -7
  4. package/dist/src/agents/reducer.core.js +2 -2
  5. package/dist/src/agents/simulator.d.ts +33 -4
  6. package/dist/src/agents/simulator.dashboard.d.ts +140 -0
  7. package/dist/src/agents/simulator.dashboard.js +344 -0
  8. package/dist/src/agents/simulator.executor.d.ts +9 -3
  9. package/dist/src/agents/simulator.executor.js +43 -17
  10. package/dist/src/agents/simulator.js +103 -19
  11. package/dist/src/agents/simulator.prompts.d.ts +9 -8
  12. package/dist/src/agents/simulator.prompts.js +68 -62
  13. package/dist/src/agents/simulator.types.d.ts +39 -4
  14. package/dist/src/agents/simulator.utils.d.ts +22 -1
  15. package/dist/src/agents/simulator.utils.js +27 -2
  16. package/dist/src/execute/helpers.d.ts +75 -0
  17. package/dist/src/execute/helpers.js +139 -0
  18. package/dist/src/execute/index.d.ts +11 -0
  19. package/dist/src/execute/index.js +44 -0
  20. package/dist/src/execute/legacy.d.ts +46 -0
  21. package/dist/src/{execute.js → execute/legacy.js} +130 -232
  22. package/dist/src/execute/modelconfig.d.ts +29 -0
  23. package/dist/src/execute/modelconfig.js +72 -0
  24. package/dist/src/execute/responses.d.ts +55 -0
  25. package/dist/src/execute/responses.js +595 -0
  26. package/dist/src/execute/shared.d.ts +83 -0
  27. package/dist/src/execute/shared.js +188 -0
  28. package/dist/src/index.d.ts +5 -1
  29. package/dist/src/index.js +21 -2
  30. package/dist/src/llm/config.d.ts +25 -0
  31. package/dist/src/llm/config.js +38 -0
  32. package/dist/src/llm/index.d.ts +48 -0
  33. package/dist/src/llm/index.js +115 -0
  34. package/dist/src/llm/openai.d.ts +6 -0
  35. package/dist/src/llm/openai.js +154 -0
  36. package/dist/src/llm/pricing.d.ts +26 -0
  37. package/dist/src/llm/pricing.js +129 -0
  38. package/dist/src/llm/xai.d.ts +17 -0
  39. package/dist/src/llm/xai.js +90 -0
  40. package/dist/src/pricing.llm.d.ts +3 -15
  41. package/dist/src/pricing.llm.js +10 -230
  42. package/dist/src/prompts.d.ts +0 -1
  43. package/dist/src/prompts.js +51 -118
  44. package/dist/src/rag/embeddings.d.ts +5 -1
  45. package/dist/src/rag/embeddings.js +23 -7
  46. package/dist/src/rag/parser.js +1 -1
  47. package/dist/src/rag/rag.manager.d.ts +33 -2
  48. package/dist/src/rag/rag.manager.js +159 -61
  49. package/dist/src/rag/types.d.ts +2 -0
  50. package/dist/src/rag/usecase.js +8 -11
  51. package/dist/src/rules/git/git.e2e.helper.js +21 -2
  52. package/dist/src/rules/git/git.health.d.ts +4 -2
  53. package/dist/src/rules/git/git.health.js +113 -16
  54. package/dist/src/rules/git/index.d.ts +1 -1
  55. package/dist/src/rules/git/index.js +3 -2
  56. package/dist/src/rules/git/repo.d.ts +57 -7
  57. package/dist/src/rules/git/repo.js +326 -39
  58. package/dist/src/rules/git/repo.pr.d.ts +8 -0
  59. package/dist/src/rules/git/repo.pr.js +161 -13
  60. package/dist/src/rules/git/repo.tools.d.ts +5 -1
  61. package/dist/src/rules/git/repo.tools.js +54 -7
  62. package/dist/src/rules/types.d.ts +25 -0
  63. package/dist/src/rules/utils.matter.d.ts +0 -20
  64. package/dist/src/rules/utils.matter.js +58 -81
  65. package/dist/src/scrapper.js +3 -2
  66. package/dist/src/stategraph/stategraph.d.ts +26 -1
  67. package/dist/src/stategraph/stategraph.js +43 -2
  68. package/dist/src/stategraph/stategraph.storage.js +4 -0
  69. package/dist/src/stategraph/types.d.ts +5 -0
  70. package/dist/src/types.d.ts +42 -7
  71. package/dist/src/types.js +8 -7
  72. package/dist/src/usecase.js +1 -1
  73. package/dist/src/utils.d.ts +0 -8
  74. package/dist/src/utils.js +26 -29
  75. package/package.json +9 -7
  76. package/dist/src/execute.d.ts +0 -63
@@ -0,0 +1,129 @@
1
+ "use strict";
2
+ /**
3
+ * LLM Pricing
4
+ *
5
+ * Gère le pricing des modèles.
6
+ * Les mappings de modèles sont dans openai.ts et xai.ts
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
9
+ exports.LLMxai = exports.LLMopenai = exports.modelPricing = void 0;
10
+ exports.calculateCost = calculateCost;
11
+ exports.accumulateCost = accumulateCost;
12
+ exports.LLM = LLM;
13
+ const config_1 = require("./config");
14
+ const openai_1 = require("./openai");
15
+ const xai_1 = require("./xai");
16
+ //
17
+ // Pricing par modèle
18
+ // - https://platform.openai.com/docs/pricing#latest-models
19
+ // - https://x.ai/api (pour xAI)
20
+ exports.modelPricing = {
21
+ // OpenAI GPT-4 family
22
+ "gpt-4.5-preview": { input: 0.000075, cachedInput: 0.0000325, output: 0.000125 },
23
+ "gpt-4.1": { input: 0.000002, cachedInput: 0.0000005, output: 0.000008 },
24
+ "gpt-4.1-mini": { input: 0.0000004, cachedInput: 0.0000001, output: 0.0000016 },
25
+ "gpt-4.1-nano": { input: 0.0000001, cachedInput: 0.000000025, output: 0.0000004 },
26
+ "gpt-4o": { input: 0.0000025, cachedInput: 0.00000125, output: 0.00001 },
27
+ "gpt-4o-audio-preview": { input: 0.0000025, output: 0.00001 },
28
+ "gpt-4o-realtime-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
29
+ "gpt-4o-search-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
30
+ "gpt-4o-mini": { input: 0.00000015, cachedInput: 0.000000075, output: 0.0000006 },
31
+ "gpt-4o-mini-audio-preview": { input: 0.00000015, output: 0.0000006 },
32
+ "gpt-4o-mini-realtime-preview": { input: 0.0000006, cachedInput: 0.0000003, output: 0.0000024 },
33
+ "gpt-4o-mini-search-preview": { input: 0.0000015, cachedInput: 0.00000075, output: 0.000006 },
34
+ "gpt-4o-mini-transcribe": { input: 0.0000015, output: 0.000006 },
35
+ // OpenAI GPT-5 family
36
+ "gpt-5": { input: 0.00000125, output: 0.00001 },
37
+ "gpt-5.1": { input: 0.00000125, output: 0.00001 },
38
+ "gpt-5.2": { input: 0.00000175, cachedInput: 0.000000175, output: 0.000014 },
39
+ "gpt-5-mini": { input: 0.00000025, output: 0.000002 },
40
+ "gpt-5-nano": { input: 0.00000005, output: 0.0000004 },
41
+ // OpenAI o-series
42
+ "o1": { input: 0.000015, cachedInput: 0.0000075, output: 0.00006 },
43
+ "o4-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
44
+ "o3-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
45
+ "o1-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
46
+ // OpenAI Embeddings
47
+ "text-embedding-3-small": { input: 0.00000002, output: 0 },
48
+ "text-embedding-3-large": { input: 0.00000013, output: 0 },
49
+ // OpenAI Audio
50
+ "whisper-1": { input: 0.0001, output: 0 }, // per second
51
+ // xAI Grok 4 family (Décembre 2024)
52
+ // https://x.ai/api#pricing - Prix par token ($/1M tokens divisé par 1M)
53
+ "grok-4": { input: 0.000003, cachedInput: 0.000003, output: 0.000015 },
54
+ "grok-4-1-fast-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
55
+ "grok-4-1-fast-non-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
56
+ "grok-4-fast-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
57
+ "grok-4-fast-non-reasoning": { input: 0.0000002, cachedInput: 0.0000002, output: 0.0000005 },
58
+ "grok-code-fast-1": { input: 0.0000002, output: 0.0000015 },
59
+ };
60
+ function calculateCost(model, usage) {
61
+ if (!usage) {
62
+ return 0;
63
+ }
64
+ if (!exports.modelPricing[model]) {
65
+ console.warn(`⚠️ Unknown model for pricing: ${model}`);
66
+ return 0;
67
+ }
68
+ const pricing = exports.modelPricing[model];
69
+ const cost = usage.prompt_tokens * pricing.input +
70
+ usage.completion_tokens * pricing.output;
71
+ return cost;
72
+ }
73
+ function accumulateCost(currentUsage, model, usage) {
74
+ if (!usage) {
75
+ return 0;
76
+ }
77
+ currentUsage.prompt += usage.prompt_tokens || 0;
78
+ currentUsage.completion += usage.completion_tokens || 0;
79
+ currentUsage.total += usage.total_tokens || 0;
80
+ const cost = calculateCost(model, usage);
81
+ currentUsage.cost += cost;
82
+ return currentUsage.cost;
83
+ }
84
+ //
85
+ // Mapping provider → modèles
86
+ const LLMmapping = {
87
+ openai: openai_1.LLMopenai,
88
+ xai: xai_1.LLMxai
89
+ };
90
+ /**
91
+ * Retourne le mapping des modèles selon le provider
92
+ *
93
+ * @param provider - Provider ('openai' | 'xai') ou instance OpenAI (legacy)
94
+ * @param forceThinking - Force reasoning_effort à high
95
+ */
96
+ function LLM(provider, forceThinking) {
97
+ let resolvedProvider;
98
+ //
99
+ // Support legacy: instance OpenAI passée directement
100
+ if (provider && typeof provider === 'object' && provider.baseURL) {
101
+ resolvedProvider = provider.baseURL.includes('x.ai') ? 'xai' : 'openai';
102
+ }
103
+ else if (typeof provider === 'string') {
104
+ resolvedProvider = provider;
105
+ }
106
+ else {
107
+ resolvedProvider = (0, config_1.getDefaultProvider)();
108
+ }
109
+ const mapping = LLMmapping[resolvedProvider] || LLMmapping.openai;
110
+ //
111
+ // Clone pour éviter de muter l'original
112
+ const result = { ...mapping };
113
+ //
114
+ // Force reasoning_effort to high if thinking is enabled
115
+ if (forceThinking) {
116
+ Object.keys(result).forEach(key => {
117
+ if (result[key]?.reasoning_effort) {
118
+ result[key] = { ...result[key], reasoning_effort: "high" };
119
+ }
120
+ });
121
+ }
122
+ return result;
123
+ }
124
+ //
125
+ // Re-export des mappings pour rétrocompatibilité
126
+ var openai_2 = require("./openai");
127
+ Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return openai_2.LLMopenai; } });
128
+ var xai_2 = require("./xai");
129
+ Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return xai_2.LLMxai; } });
@@ -0,0 +1,17 @@
1
+ /**
2
+ * xAI (Grok) Model Mappings
3
+ *
4
+ * Configuration des modèles pour le provider xAI
5
+ * Note: xAI ne supporte pas les embeddings ni Whisper nativement
6
+ *
7
+ * Modèles disponibles (Décembre 2024):
8
+ * - grok-4: Flagship model ($3.00/$15.00 per 1M tokens)
9
+ * - grok-4-1-fast-reasoning: Fast reasoning ($0.20/$0.50 per 1M tokens)
10
+ * - grok-4-1-fast-non-reasoning: Fast non-reasoning ($0.20/$0.50 per 1M tokens)
11
+ * - grok-4-fast-reasoning: Fast reasoning ($0.20/$0.50 per 1M tokens)
12
+ * - grok-4-fast-non-reasoning: Fast non-reasoning ($0.20/$0.50 per 1M tokens)
13
+ * - grok-code-fast-1: Agentic coding ($0.20/$1.50 per 1M tokens)
14
+ *
15
+ * Pricing: https://x.ai/api#pricing
16
+ */
17
+ export declare const LLMxai: Record<string, any>;
@@ -0,0 +1,90 @@
1
+ "use strict";
2
+ /**
3
+ * xAI (Grok) Model Mappings
4
+ *
5
+ * Configuration des modèles pour le provider xAI
6
+ * Note: xAI ne supporte pas les embeddings ni Whisper nativement
7
+ *
8
+ * Modèles disponibles (Décembre 2024):
9
+ * - grok-4: Flagship model ($3.00/$15.00 per 1M tokens)
10
+ * - grok-4-1-fast-reasoning: Fast reasoning ($0.20/$0.50 per 1M tokens)
11
+ * - grok-4-1-fast-non-reasoning: Fast non-reasoning ($0.20/$0.50 per 1M tokens)
12
+ * - grok-4-fast-reasoning: Fast reasoning ($0.20/$0.50 per 1M tokens)
13
+ * - grok-4-fast-non-reasoning: Fast non-reasoning ($0.20/$0.50 per 1M tokens)
14
+ * - grok-code-fast-1: Agentic coding ($0.20/$1.50 per 1M tokens)
15
+ *
16
+ * Pricing: https://x.ai/api#pricing
17
+ */
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.LLMxai = void 0;
20
+ exports.LLMxai = {
21
+ // Vision capable models
22
+ "VISION": {
23
+ model: "grok-4",
24
+ temperature: 0.2,
25
+ stream: false
26
+ },
27
+ "VISION-fast": {
28
+ model: "grok-4-fast-reasoning",
29
+ temperature: 0.2,
30
+ stream: false
31
+ },
32
+ // Embedding models - xAI ne supporte pas les embeddings
33
+ // Si utilisé, une erreur sera levée par modelConfig
34
+ // "EMBEDDING-small": undefined,
35
+ // "EMBEDDING-large": undefined,
36
+ // Audio models - xAI ne supporte pas Whisper
37
+ // "WHISPER": undefined,
38
+ // "TRANSCRIBE": undefined,
39
+ // Chat/Responses models
40
+ "LOW-fast": {
41
+ temperature: 0.2,
42
+ model: "grok-code-fast-1",
43
+ stream: true
44
+ },
45
+ "LOW": {
46
+ temperature: 0.2,
47
+ model: "grok-4-fast-non-reasoning",
48
+ stream: true
49
+ },
50
+ "LOW-medium": {
51
+ temperature: 0.2,
52
+ model: "grok-4-fast-non-reasoning",
53
+ stream: true
54
+ },
55
+ "MEDIUM-fast": {
56
+ temperature: 0.2,
57
+ model: "grok-4-1-fast-reasoning",
58
+ stream: true
59
+ },
60
+ "MEDIUM": {
61
+ temperature: 0.2,
62
+ model: "grok-4-fast-reasoning",
63
+ stream: true
64
+ },
65
+ "HIGH-fast": {
66
+ model: "grok-4-1-fast-reasoning",
67
+ temperature: 0.2,
68
+ stream: true
69
+ },
70
+ "HIGH": {
71
+ model: "grok-4",
72
+ temperature: 0.2,
73
+ stream: true
74
+ },
75
+ "HIGH-medium": {
76
+ model: "grok-4",
77
+ temperature: 0.2,
78
+ stream: true
79
+ },
80
+ "SEARCH-fast": {
81
+ temperature: 0.2,
82
+ model: "grok-code-fast-1",
83
+ tools: [{ type: "web_search" }],
84
+ },
85
+ "SEARCH": {
86
+ temperature: 0.2,
87
+ model: "grok-4-fast-reasoning",
88
+ tools: [{ type: "web_search" }],
89
+ },
90
+ };
@@ -1,17 +1,5 @@
1
- import { CompletionUsage } from "openai/resources";
2
- import { Usage } from "./types";
3
- type ModelPricing = {
4
- input: number;
5
- cachedInput?: number;
6
- output: number;
7
- };
8
- export declare const modelPricing: Record<string, ModelPricing>;
9
- export declare function calculateCost(model: string, usage?: CompletionUsage): number;
10
- export declare function accumulateCost(currentUsage: Usage, model: string, usage?: CompletionUsage): number;
11
- export declare function LLM(openai: any): any;
12
- export declare const LLMxai: any;
13
1
  /**
14
- * Get model mapping for OpenAI
2
+ * @deprecated Ce fichier est conservé pour rétrocompatibilité.
3
+ * Utiliser les imports depuis './llm/pricing' directement.
15
4
  */
16
- export declare const LLMopenai: any;
17
- export {};
5
+ export { modelPricing, calculateCost, accumulateCost, LLM, LLMxai, LLMopenai, } from './llm/pricing';
@@ -1,234 +1,14 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LLMopenai = exports.LLMxai = exports.modelPricing = void 0;
4
- exports.calculateCost = calculateCost;
5
- exports.accumulateCost = accumulateCost;
6
- exports.LLM = LLM;
7
- //
8
- // get/update pricing from openai
9
- // - https://platform.openai.com/docs/pricing#latest-models
10
- exports.modelPricing = {
11
- "gpt-4.5-preview": { input: 0.000075, cachedInput: 0.0000325, output: 0.000125 },
12
- "gpt-4.1": { input: 0.000002, cachedInput: 0.0000005, output: 0.000008 },
13
- "gpt-4.1-mini": { input: 0.0000004, cachedInput: 0.0000001, output: 0.0000016 },
14
- "gpt-4.1-nano": { input: 0.0000001, cachedInput: 0.000000025, output: 0.0000004 },
15
- "gpt-4o": { input: 0.0000025, cachedInput: 0.00000125, output: 0.00001 },
16
- "gpt-4o-audio-preview": { input: 0.0000025, output: 0.00001 },
17
- "gpt-4o-realtime-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
18
- "gpt-4o-search-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
19
- "gpt-4o-mini": { input: 0.00000015, cachedInput: 0.000000075, output: 0.0000006 },
20
- "gpt-4o-mini-audio-preview": { input: 0.00000015, output: 0.0000006 },
21
- "gpt-4o-mini-realtime-preview": { input: 0.0000006, cachedInput: 0.0000003, output: 0.0000024 },
22
- "gpt-4o-mini-search-preview": { input: 0.0000015, cachedInput: 0.00000075, output: 0.000006 },
23
- // GPT-5 family
24
- "gpt-5": { input: 0.00000125, output: 0.00001 },
25
- "gpt-5-mini": { input: 0.00000025, output: 0.000002 },
26
- "gpt-5-nano": { input: 0.00000005, output: 0.0000004 },
27
- "o1": { input: 0.000015, cachedInput: 0.0000075, output: 0.00006 },
28
- "o4-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
29
- "o3-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
30
- "o1-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
31
- // Grok 4 family - Official pricing from xAI docs (per million tokens)
32
- "grok-4-fast-reasoning": { input: 0.0002, cachedInput: 0.0001, output: 0.0005 },
33
- "grok-4-fast-non-reasoning": { input: 0.0002, cachedInput: 0.0001, output: 0.0005 },
34
- "grok-4-0709": { input: 0.003, cachedInput: 0.0015, output: 0.015 },
35
- "grok-code-fast-1": { input: 0.0002, cachedInput: 0.0001, output: 0.0015 },
36
- };
37
- function calculateCost(model, usage) {
38
- if (!usage) {
39
- return 0;
40
- }
41
- if (!exports.modelPricing[model]) {
42
- throw new Error("Unknown model");
43
- }
44
- const pricing = exports.modelPricing[model];
45
- const cost = usage.prompt_tokens * pricing.input +
46
- // usage.completion_tokens * (pricing.cachedInput || 0) +
47
- usage.completion_tokens * pricing.output;
48
- return cost;
49
- }
50
- function accumulateCost(currentUsage, model, usage) {
51
- if (!usage) {
52
- return 0;
53
- }
54
- currentUsage.prompt += usage.prompt_tokens || 0;
55
- currentUsage.completion += usage.completion_tokens || 0;
56
- currentUsage.total += usage.total_tokens || 0;
57
- const cost = calculateCost(model, usage);
58
- currentUsage.cost += cost;
59
- return currentUsage.cost;
60
- }
61
- //
62
- // depending on the API source, return the correct mapping between ALIAS and destination models and options.
63
- // - LOW-fast: openai => gpt-5-nano, xAI => grok-4-nano
64
- // - etc.
65
- function LLM(openai) {
66
- //
67
- // Detect provider based on baseURL
68
- const mapping = openai?.baseURL ? LLMmapping[openai?.baseURL] : LLMmapping["default"];
69
- return mapping;
70
- }
71
- exports.LLMxai = {
72
- "LOW-fast": {
73
- temperature: 0.2,
74
- model: "grok-code-fast-1", // Fast code model, good for low-complexity tasks
75
- stream: true
76
- },
77
- "LOW": {
78
- temperature: 0.2,
79
- model: "grok-4-fast-non-reasoning", // Fast non-reasoning model
80
- stream: true
81
- },
82
- "MEDIUM-fast": {
83
- temperature: 0.2,
84
- model: "grok-4-fast-reasoning", // Fast reasoning model
85
- stream: true
86
- },
87
- "MEDIUM": {
88
- temperature: 0.2,
89
- model: "grok-4-fast-reasoning", // Fast reasoning model
90
- stream: true
91
- },
92
- "HIGH-fast": {
93
- model: "grok-4-fast-reasoning",
94
- temperature: 0.2,
95
- stream: true
96
- },
97
- "HIGH-low": {
98
- model: "grok-4-0709", // Most capable Grok-4 model
99
- temperature: 0.2,
100
- stream: true
101
- },
102
- "HIGH-medium": {
103
- model: "grok-4-0709", // Most capable Grok-4 model
104
- temperature: 0.2,
105
- stream: true
106
- },
107
- "SEARCH-fast": {
108
- temperature: 0.2,
109
- model: "grok-code-fast-1",
110
- web_search_options: {
111
- user_location: {
112
- type: "approximate",
113
- approximate: {
114
- country: "CH",
115
- city: "Geneva",
116
- region: "Geneva",
117
- },
118
- },
119
- },
120
- },
121
- "SEARCH": {
122
- temperature: 0.2,
123
- model: "grok-4-fast-non-reasoning",
124
- web_search_options: {
125
- user_location: {
126
- type: "approximate",
127
- approximate: {
128
- country: "CH",
129
- city: "Geneva",
130
- region: "Geneva",
131
- },
132
- },
133
- },
134
- },
135
- };
3
+ exports.LLMopenai = exports.LLMxai = exports.LLM = exports.accumulateCost = exports.calculateCost = exports.modelPricing = void 0;
136
4
  /**
137
- * Get model mapping for OpenAI
5
+ * @deprecated Ce fichier est conservé pour rétrocompatibilité.
6
+ * Utiliser les imports depuis './llm/pricing' directement.
138
7
  */
139
- exports.LLMopenai = {
140
- "LOW-fast": {
141
- temperature: 1,
142
- frequency_penalty: 0.0,
143
- presence_penalty: 0.0,
144
- model: "gpt-5-nano",
145
- reasoning_effort: "minimal",
146
- verbosity: "low",
147
- stream: true
148
- },
149
- "LOW": {
150
- temperature: 1,
151
- frequency_penalty: 0.0,
152
- presence_penalty: 0.0,
153
- model: "gpt-5-nano",
154
- reasoning_effort: "medium",
155
- verbosity: "low",
156
- stream: true
157
- },
158
- "MEDIUM-fast": {
159
- temperature: 1,
160
- frequency_penalty: 0.0,
161
- presence_penalty: 0.0,
162
- model: "gpt-5-mini",
163
- reasoning_effort: "minimal",
164
- verbosity: "low",
165
- stream: true
166
- },
167
- "LOW-4.1": {
168
- temperature: .2,
169
- frequency_penalty: 0.0,
170
- presence_penalty: 0.0,
171
- model: "gpt-4.1-nano",
172
- stream: true
173
- },
174
- "MEDIUM-4.1-mini": {
175
- temperature: .2,
176
- frequency_penalty: 0.0,
177
- presence_penalty: 0.0,
178
- model: "gpt-4.1-mini",
179
- stream: true
180
- },
181
- "MEDIUM-4.1": {
182
- temperature: .2,
183
- frequency_penalty: 0.0,
184
- presence_penalty: 0.0,
185
- model: "gpt-4.1",
186
- stream: true
187
- },
188
- "MEDIUM": {
189
- temperature: 1,
190
- frequency_penalty: 0.0,
191
- presence_penalty: 0.0,
192
- model: "gpt-5-mini",
193
- reasoning_effort: "low",
194
- verbosity: "low",
195
- stream: true
196
- },
197
- "HIGH-fast": {
198
- model: "gpt-5",
199
- reasoning_effort: "minimal",
200
- verbosity: "low",
201
- temperature: 1,
202
- stream: true
203
- },
204
- "HIGH-low": {
205
- model: "gpt-5",
206
- reasoning_effort: "low",
207
- verbosity: "low",
208
- stream: true
209
- },
210
- "HIGH-medium": {
211
- model: "gpt-5",
212
- reasoning_effort: "medium",
213
- verbosity: "low",
214
- stream: true
215
- },
216
- "SEARCH": {
217
- model: "gpt-4o-mini-search-preview",
218
- web_search_options: {
219
- user_location: {
220
- type: "approximate",
221
- approximate: {
222
- country: "CH",
223
- city: "Geneva",
224
- region: "Geneva",
225
- },
226
- },
227
- },
228
- },
229
- };
230
- const LLMmapping = {
231
- "https://api.x.ai/v1": exports.LLMxai,
232
- "https://api.openai.com/v1": exports.LLMopenai,
233
- "default": exports.LLMopenai
234
- };
8
+ var pricing_1 = require("./llm/pricing");
9
+ Object.defineProperty(exports, "modelPricing", { enumerable: true, get: function () { return pricing_1.modelPricing; } });
10
+ Object.defineProperty(exports, "calculateCost", { enumerable: true, get: function () { return pricing_1.calculateCost; } });
11
+ Object.defineProperty(exports, "accumulateCost", { enumerable: true, get: function () { return pricing_1.accumulateCost; } });
12
+ Object.defineProperty(exports, "LLM", { enumerable: true, get: function () { return pricing_1.LLM; } });
13
+ Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return pricing_1.LLMxai; } });
14
+ Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return pricing_1.LLMopenai; } });
@@ -11,5 +11,4 @@ export interface UsecaseExtractionOptions {
11
11
  examples?: string;
12
12
  extended?: string;
13
13
  }
14
- export declare const usecaseExtractionPrompt_OLD: (file: string) => string;
15
14
  export declare const usecaseExtractionPrompt: (file: string, options?: UsecaseExtractionOptions) => string;