agentic-api 2.0.314 → 2.0.491

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/src/agents/prompts.d.ts +1 -1
  2. package/dist/src/agents/prompts.js +9 -7
  3. package/dist/src/agents/simulator.d.ts +7 -3
  4. package/dist/src/agents/simulator.executor.d.ts +9 -3
  5. package/dist/src/agents/simulator.executor.js +43 -17
  6. package/dist/src/agents/simulator.js +47 -19
  7. package/dist/src/agents/simulator.prompts.d.ts +9 -8
  8. package/dist/src/agents/simulator.prompts.js +68 -62
  9. package/dist/src/agents/simulator.types.d.ts +4 -1
  10. package/dist/src/agents/simulator.utils.js +0 -2
  11. package/dist/src/execute/helpers.d.ts +75 -0
  12. package/dist/src/execute/helpers.js +139 -0
  13. package/dist/src/execute/index.d.ts +11 -0
  14. package/dist/src/execute/index.js +44 -0
  15. package/dist/src/execute/legacy.d.ts +46 -0
  16. package/dist/src/{execute.js → execute/legacy.js} +130 -232
  17. package/dist/src/execute/modelconfig.d.ts +19 -0
  18. package/dist/src/execute/modelconfig.js +56 -0
  19. package/dist/src/execute/responses.d.ts +55 -0
  20. package/dist/src/execute/responses.js +594 -0
  21. package/dist/src/execute/shared.d.ts +83 -0
  22. package/dist/src/execute/shared.js +188 -0
  23. package/dist/src/index.js +1 -1
  24. package/dist/src/pricing.llm.d.ts +1 -1
  25. package/dist/src/pricing.llm.js +39 -18
  26. package/dist/src/rag/embeddings.js +8 -2
  27. package/dist/src/rag/rag.manager.js +27 -15
  28. package/dist/src/rules/git/git.e2e.helper.js +21 -2
  29. package/dist/src/rules/git/git.health.d.ts +4 -2
  30. package/dist/src/rules/git/git.health.js +58 -16
  31. package/dist/src/rules/git/index.d.ts +1 -1
  32. package/dist/src/rules/git/index.js +3 -2
  33. package/dist/src/rules/git/repo.d.ts +46 -3
  34. package/dist/src/rules/git/repo.js +264 -23
  35. package/dist/src/rules/git/repo.pr.js +117 -13
  36. package/dist/src/rules/types.d.ts +11 -0
  37. package/dist/src/rules/utils.matter.js +16 -7
  38. package/dist/src/scrapper.js +1 -0
  39. package/dist/src/stategraph/stategraph.d.ts +26 -1
  40. package/dist/src/stategraph/stategraph.js +43 -2
  41. package/dist/src/stategraph/stategraph.storage.js +4 -0
  42. package/dist/src/stategraph/types.d.ts +5 -0
  43. package/dist/src/types.d.ts +42 -7
  44. package/dist/src/types.js +8 -7
  45. package/dist/src/usecase.js +1 -1
  46. package/dist/src/utils.js +28 -4
  47. package/package.json +9 -7
  48. package/dist/src/execute.d.ts +0 -63
@@ -0,0 +1,56 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.modelConfig = modelConfig;
4
+ const utils_1 = require("../utils");
5
+ const pricing_llm_1 = require("../pricing.llm");
6
+ /**
7
+ * Configuration des modèles pour Chat Completions (legacy) et Responses API
8
+ *
9
+ * Gère la configuration des modèles avec migration automatique des paramètres
10
+ * selon le provider (OpenAI, xAI) et l'API utilisée (legacy ou Responses)
11
+ *
12
+ * @param model - Alias du modèle (LOW-fast, MEDIUM-fast, HIGH-fast, HIGH-medium, etc.)
13
+ * @param custom - Options custom
14
+ * @param custom.thinking - Si true, active reasoning_effort élevé (via LLM mapping)
15
+ * @param forResponses - Si true, retourne format Responses API avec mappings:
16
+ * - reasoning_effort → reasoning: { effort }
17
+ * - verbosity → text.verbosity
18
+ * @returns Configuration du modèle
19
+ *
20
+ * NOTE: Pour GPT-5, temperature est toujours fixée à 1
21
+ * NOTE: reasoning_effort est géré automatiquement par LLM() selon le provider
22
+ */
23
+ function modelConfig(model, custom, forResponses = false) {
24
+ const thinking = custom?.thinking || false;
25
+ delete custom?.thinking;
26
+ const defaultOptions = Object.assign({
27
+ stream_options: { "include_usage": true },
28
+ }, custom || {});
29
+ //
30
+ // Get mapping based on provider (OpenAI vs xAI)
31
+ // LLM() applique automatiquement reasoning_effort si thinking=true
32
+ const mapping = (0, pricing_llm_1.LLM)((0, utils_1.openaiInstance)(), thinking);
33
+ const options = Object.assign({}, mapping[model], defaultOptions);
34
+ //
35
+ // Pour Responses API : mapper vers la nouvelle structure et filtrer les paramètres incompatibles
36
+ // Chat Completions utilise reasoning_effort (string) et verbosity (string)
37
+ // Responses API utilise reasoning: { effort: string } et text.verbosity (string)
38
+ if (forResponses) {
39
+ // Mapper reasoning_effort → reasoning: { effort }
40
+ if (options.reasoning_effort) {
41
+ options.reasoning = { effort: options.reasoning_effort };
42
+ delete options.reasoning_effort;
43
+ }
44
+ // Mapper verbosity → text.verbosity
45
+ if (options.verbosity) {
46
+ options.text = { verbosity: options.verbosity };
47
+ delete options.verbosity;
48
+ }
49
+ // Supprimer les paramètres non supportés par Responses API
50
+ const unsupportedParams = ['frequency_penalty', 'presence_penalty', 'stream_options', 'web_search_options'];
51
+ unsupportedParams.forEach(param => {
52
+ delete options[param];
53
+ });
54
+ }
55
+ return options;
56
+ }
@@ -0,0 +1,55 @@
1
+ /**
2
+ * Responses API Implementation
3
+ *
4
+ * Migration de Chat Completions vers Responses API pour:
5
+ * - Meilleur support des modèles reasoning (o-series, gpt-5)
6
+ * - Tool calls préservés pendant le raisonnement via reasoning: { effort }
7
+ * - Events SSE typés (response.output_text.delta, response.function_call.*)
8
+ * - Plus de boucle moreThinkin (obsolète) : reasoning géré nativement
9
+ *
10
+ * Différences clés vs legacy:
11
+ * - `input` remplace `messages` (même format array, juste renommage du paramètre)
12
+ * - `reasoning: { effort: 'low' | 'medium' | 'high' }` remplace `reasoning_effort`
13
+ * - Events SSE structurés vs delta progressifs
14
+ * - `response.usage` vs `final.usage`
15
+ * - Tools/function calling: format identique, `parallel_tool_calls` supporté
16
+ *
17
+ * Optimisations appliquées (vs code original):
18
+ * - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
19
+ * - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
20
+ * - BUG FIX: moreThinkin supprimé (obsolète)
21
+ * - BUG FIX: reasoning_effort géré par modelConfig uniquement
22
+ *
23
+ * TODO [Phase 2]: Migration vers openai-agents-js
24
+ * https://openai.github.io/openai-agents-js/
25
+ * - Remplacer handleTransferCall par handoff natif du SDK
26
+ * - Utiliser swarm.run() pour orchestration multi-agents
27
+ * - Mapper downstreamAgents vers router policies
28
+ * - Intégration optionnelle avec Vercel AI SDK (multi-providers)
29
+ * - stateGraph restera compatible (AgentMessage générique, agnostic de l'API)
30
+ */
31
+ import { AgentConfig, AgenticContext, ExecuteAgentResult, ExecutionResult } from "../types";
32
+ import { ReadCompletionsStreamOptions, ExecuteAgentSetParams } from "./shared";
33
+ /**
34
+ * RESPONSES API: Traite le stream de responses avec tool calls
35
+ *
36
+ * Utilise les events SSE typés de Responses API:
37
+ * - response.output_text.delta → texte
38
+ * - response.function_call.* → tool calls
39
+ * - response.completed → finalisation
40
+ */
41
+ export declare function readCompletionsStream(params: ReadCompletionsStreamOptions): Promise<ExecutionResult>;
42
+ /**
43
+ * Executes a set of agents to process a user query
44
+ *
45
+ * RESPONSES API VERSION
46
+ */
47
+ export declare function executeAgentSet(agentSet: AgentConfig[], context: AgenticContext, params: ExecuteAgentSetParams): Promise<ExecutionResult>;
48
+ /**
49
+ * Execute agent - Responses API version
50
+ */
51
+ export declare function executeAgent(agentSet: AgentConfig[], params: ExecuteAgentSetParams): Promise<ExecuteAgentResult>;
52
+ /**
53
+ * Execute query - Responses API version
54
+ */
55
+ export declare function executeQuery(params: ExecuteAgentSetParams): Promise<ExecuteAgentResult>;