@inkeep/agents-run-api 0.39.2 → 0.39.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/dist/SandboxExecutorFactory.cjs +895 -0
  2. package/dist/SandboxExecutorFactory.js +893 -0
  3. package/dist/SandboxExecutorFactory.js.map +1 -0
  4. package/dist/chunk-VBDAOXYI.cjs +927 -0
  5. package/dist/chunk-VBDAOXYI.js +832 -0
  6. package/dist/chunk-VBDAOXYI.js.map +1 -0
  7. package/dist/chunk.cjs +34 -0
  8. package/dist/conversations.cjs +7 -0
  9. package/dist/conversations.js +7 -0
  10. package/dist/conversations2.cjs +209 -0
  11. package/dist/conversations2.js +180 -0
  12. package/dist/conversations2.js.map +1 -0
  13. package/dist/dbClient.cjs +9676 -0
  14. package/dist/dbClient.js +9670 -0
  15. package/dist/dbClient.js.map +1 -0
  16. package/dist/dbClient2.cjs +5 -0
  17. package/dist/dbClient2.js +5 -0
  18. package/dist/env.cjs +59 -0
  19. package/dist/env.js +54 -0
  20. package/dist/env.js.map +1 -0
  21. package/dist/execution-limits.cjs +260 -0
  22. package/dist/execution-limits.js +63 -0
  23. package/dist/execution-limits.js.map +1 -0
  24. package/dist/index.cjs +10548 -20592
  25. package/dist/index.d.cts +26 -22
  26. package/dist/index.d.cts.map +1 -0
  27. package/dist/index.d.ts +27 -22
  28. package/dist/index.d.ts.map +1 -0
  29. package/dist/index.js +10548 -12917
  30. package/dist/index.js.map +1 -0
  31. package/dist/instrumentation.cjs +12 -121
  32. package/dist/instrumentation.d.cts +9 -7
  33. package/dist/instrumentation.d.cts.map +1 -0
  34. package/dist/instrumentation.d.ts +9 -7
  35. package/dist/instrumentation.d.ts.map +1 -0
  36. package/dist/instrumentation.js +5 -1
  37. package/dist/instrumentation2.cjs +116 -0
  38. package/dist/instrumentation2.js +69 -0
  39. package/dist/instrumentation2.js.map +1 -0
  40. package/dist/json-postprocessor.cjs +20 -0
  41. package/dist/json-postprocessor.js +20 -0
  42. package/dist/json-postprocessor.js.map +1 -0
  43. package/dist/logger.cjs +5 -0
  44. package/dist/logger.js +3 -0
  45. package/dist/logger2.cjs +1 -0
  46. package/dist/logger2.js +3 -0
  47. package/dist/nodefs.cjs +29 -0
  48. package/dist/nodefs.js +27 -0
  49. package/dist/nodefs.js.map +1 -0
  50. package/dist/opfs-ahp.cjs +367 -0
  51. package/dist/opfs-ahp.js +368 -0
  52. package/dist/opfs-ahp.js.map +1 -0
  53. package/package.json +3 -3
  54. package/dist/SandboxExecutorFactory-2N27SE3B.js +0 -943
  55. package/dist/chunk-A2S7GSHL.js +0 -1
  56. package/dist/chunk-EVOISBFH.js +0 -5070
  57. package/dist/chunk-JCVMVG3J.js +0 -592
  58. package/dist/chunk-KBZIYCPJ.js +0 -40
  59. package/dist/chunk-KCJWSIDZ.js +0 -246
  60. package/dist/chunk-THWNUGWP.js +0 -204
  61. package/dist/chunk-UC2EPLSW.js +0 -75
  62. package/dist/conversations-XPSTWUMK.js +0 -1
  63. package/dist/dbClient-MAHUR4TO.js +0 -1
  64. package/dist/json-postprocessor-IGYTSWFB.js +0 -12
  65. package/dist/logger-3EE6BUSU.js +0 -1
  66. package/dist/nodefs-RPE52Q4Z.js +0 -21
  67. package/dist/opfs-ahp-QL4REJJW.js +0 -318
@@ -1,246 +0,0 @@
1
- import { dbClient_default } from './chunk-EVOISBFH.js';
2
- import { CONVERSATION_HISTORY_DEFAULT_LIMIT } from './chunk-THWNUGWP.js';
3
- import { CONVERSATION_HISTORY_MAX_OUTPUT_TOKENS_DEFAULT, createMessage, generateId, getConversationHistory } from '@inkeep/agents-core';
4
-
5
- function createDefaultConversationHistoryConfig(mode = "full") {
6
- return {
7
- mode,
8
- limit: CONVERSATION_HISTORY_DEFAULT_LIMIT,
9
- includeInternal: true,
10
- messageTypes: ["chat", "tool-result"],
11
- maxOutputTokens: CONVERSATION_HISTORY_MAX_OUTPUT_TOKENS_DEFAULT
12
- };
13
- }
14
- function extractA2AMessageText(parts) {
15
- return parts.filter((part) => part.kind === "text" && part.text).map((part) => part.text).join("");
16
- }
17
- async function saveA2AMessageResponse(response, params) {
18
- if (response.error) {
19
- throw new Error(response.error.message);
20
- }
21
- let messageText = "";
22
- if (response.result.kind === "message") {
23
- messageText = extractA2AMessageText(response.result.parts);
24
- } else if (response.result.kind === "task") {
25
- if (response.result.artifacts && response.result.artifacts.length > 0) {
26
- const firstArtifact = response.result.artifacts[0];
27
- if (firstArtifact.parts) {
28
- messageText = extractA2AMessageText(firstArtifact.parts);
29
- }
30
- }
31
- } else if (typeof response.result === "string") {
32
- messageText = response.result;
33
- }
34
- if (!messageText || messageText.trim() === "") {
35
- return null;
36
- }
37
- return await createMessage(dbClient_default)({
38
- id: generateId(),
39
- tenantId: params.tenantId,
40
- projectId: params.projectId,
41
- conversationId: params.conversationId,
42
- role: "agent",
43
- content: {
44
- text: messageText
45
- },
46
- visibility: params.visibility,
47
- messageType: params.messageType,
48
- fromSubAgentId: params.fromSubAgentId,
49
- toSubAgentId: params.toSubAgentId,
50
- fromExternalAgentId: params.fromExternalAgentId,
51
- toExternalAgentId: params.toExternalAgentId,
52
- a2aTaskId: params.a2aTaskId,
53
- a2aSessionId: params.a2aSessionId,
54
- metadata: params.metadata
55
- });
56
- }
57
- async function getScopedHistory({
58
- tenantId,
59
- projectId,
60
- conversationId,
61
- filters,
62
- options
63
- }) {
64
- try {
65
- const messages = await getConversationHistory(dbClient_default)({
66
- scopes: { tenantId, projectId },
67
- conversationId,
68
- options
69
- });
70
- if (!filters || !filters.subAgentId && !filters.taskId && !filters.delegationId && filters.isDelegated === void 0) {
71
- return messages;
72
- }
73
- const relevantMessages = messages.filter((msg) => {
74
- if (msg.role === "user") return true;
75
- let matchesAgent = true;
76
- let matchesTask = true;
77
- let matchesDelegation = true;
78
- if (filters.subAgentId) {
79
- matchesAgent = msg.role === "agent" && msg.visibility === "user-facing" || msg.toSubAgentId === filters.subAgentId || msg.fromSubAgentId === filters.subAgentId;
80
- }
81
- if (filters.taskId) {
82
- matchesTask = msg.taskId === filters.taskId || msg.a2aTaskId === filters.taskId;
83
- }
84
- if (filters.delegationId !== void 0 || filters.isDelegated !== void 0) {
85
- if (msg.messageType === "tool-result") {
86
- const messageDelegationId = msg.metadata?.a2a_metadata?.delegationId;
87
- const messageIsDelegated = msg.metadata?.a2a_metadata?.isDelegated;
88
- if (filters.delegationId) {
89
- matchesDelegation = messageDelegationId === filters.delegationId || !messageDelegationId;
90
- } else if (filters.isDelegated === false) {
91
- matchesDelegation = !messageIsDelegated;
92
- } else if (filters.isDelegated === true) {
93
- matchesDelegation = messageIsDelegated === true;
94
- }
95
- }
96
- }
97
- const conditions = [];
98
- if (filters.subAgentId) conditions.push(matchesAgent);
99
- if (filters.taskId) conditions.push(matchesTask);
100
- if (filters.delegationId !== void 0 || filters.isDelegated !== void 0)
101
- conditions.push(matchesDelegation);
102
- const finalResult = conditions.length === 0 || conditions.every(Boolean);
103
- return finalResult;
104
- });
105
- return relevantMessages;
106
- } catch (error) {
107
- console.error("Failed to fetch scoped messages:", error);
108
- return [];
109
- }
110
- }
111
- async function getUserFacingHistory(tenantId, projectId, conversationId, limit = CONVERSATION_HISTORY_DEFAULT_LIMIT) {
112
- return await getConversationHistory(dbClient_default)({
113
- scopes: { tenantId, projectId },
114
- conversationId,
115
- options: {
116
- limit,
117
- includeInternal: false,
118
- messageTypes: ["chat"]
119
- }
120
- });
121
- }
122
- async function getFullConversationContext(tenantId, projectId, conversationId, maxTokens) {
123
- const defaultConfig = createDefaultConversationHistoryConfig();
124
- return await getConversationHistory(dbClient_default)({
125
- scopes: { tenantId, projectId },
126
- conversationId,
127
- options: {
128
- ...defaultConfig,
129
- limit: 100,
130
- includeInternal: true,
131
- maxOutputTokens: maxTokens
132
- }
133
- });
134
- }
135
- async function getFormattedConversationHistory({
136
- tenantId,
137
- projectId,
138
- conversationId,
139
- currentMessage,
140
- options,
141
- filters
142
- }) {
143
- const historyOptions = options ?? createDefaultConversationHistoryConfig();
144
- const conversationHistory = await getScopedHistory({
145
- tenantId,
146
- projectId,
147
- conversationId,
148
- filters,
149
- options: historyOptions
150
- });
151
- let messagesToFormat = conversationHistory;
152
- if (currentMessage && conversationHistory.length > 0) {
153
- const lastMessage = conversationHistory[conversationHistory.length - 1];
154
- if (lastMessage.content.text === currentMessage) {
155
- messagesToFormat = conversationHistory.slice(0, -1);
156
- }
157
- }
158
- if (!messagesToFormat.length) {
159
- return "";
160
- }
161
- const formattedHistory = messagesToFormat.map((msg) => {
162
- let roleLabel;
163
- if (msg.role === "user") {
164
- roleLabel = "user";
165
- } else if (msg.role === "agent" && (msg.messageType === "a2a-request" || msg.messageType === "a2a-response")) {
166
- const fromSubAgent = msg.fromSubAgentId || msg.fromExternalAgentId || "unknown";
167
- const toSubAgent = msg.toSubAgentId || msg.toExternalAgentId || "unknown";
168
- roleLabel = `${fromSubAgent} to ${toSubAgent}`;
169
- } else if (msg.role === "agent" && msg.messageType === "chat") {
170
- const fromSubAgent = msg.fromSubAgentId || "unknown";
171
- roleLabel = `${fromSubAgent} to User`;
172
- } else if (msg.role === "assistant" && msg.messageType === "tool-result") {
173
- const fromSubAgent = msg.fromSubAgentId || "unknown";
174
- const toolName = msg.metadata?.a2a_metadata?.toolName || "unknown";
175
- roleLabel = `${fromSubAgent} tool: ${toolName}`;
176
- } else {
177
- roleLabel = msg.role || "system";
178
- }
179
- return `${roleLabel}: """${msg.content.text}"""`;
180
- }).join("\n");
181
- return `<conversation_history>
182
- ${formattedHistory}
183
- </conversation_history>
184
- `;
185
- }
186
- async function getConversationScopedArtifacts(params) {
187
- const { tenantId, projectId, conversationId, historyConfig } = params;
188
- if (!conversationId) {
189
- return [];
190
- }
191
- try {
192
- if (historyConfig.mode === "none") {
193
- return [];
194
- }
195
- const visibleMessages = await getScopedHistory({
196
- tenantId,
197
- projectId,
198
- conversationId,
199
- options: historyConfig
200
- });
201
- if (visibleMessages.length === 0) {
202
- return [];
203
- }
204
- const visibleMessageIds = visibleMessages.filter(
205
- (msg) => !(msg.messageType === "system" && msg.content?.text?.includes("Previous conversation history truncated"))
206
- ).map((msg) => msg.id);
207
- if (visibleMessageIds.length === 0) {
208
- return [];
209
- }
210
- const { getLedgerArtifacts } = await import('@inkeep/agents-core');
211
- const dbClient = (await import('./dbClient-MAHUR4TO.js')).default;
212
- const visibleTaskIds = visibleMessages.map((msg) => msg.taskId).filter((taskId) => Boolean(taskId));
213
- const referenceArtifacts = [];
214
- for (const taskId of visibleTaskIds) {
215
- const artifacts = await getLedgerArtifacts(dbClient)({
216
- scopes: { tenantId, projectId },
217
- taskId
218
- });
219
- referenceArtifacts.push(...artifacts);
220
- }
221
- const logger = (await import('./logger-3EE6BUSU.js')).getLogger("conversations");
222
- logger.debug(
223
- {
224
- conversationId,
225
- visibleMessages: visibleMessages.length,
226
- visibleTasks: visibleTaskIds.length,
227
- artifacts: referenceArtifacts.length,
228
- historyMode: historyConfig.mode
229
- },
230
- "Loaded conversation-scoped artifacts"
231
- );
232
- return referenceArtifacts;
233
- } catch (error) {
234
- const logger = (await import('./logger-3EE6BUSU.js')).getLogger("conversations");
235
- logger.error(
236
- {
237
- error: error instanceof Error ? error.message : "Unknown error",
238
- conversationId
239
- },
240
- "Failed to get conversation-scoped artifacts"
241
- );
242
- return [];
243
- }
244
- }
245
-
246
- export { createDefaultConversationHistoryConfig, getConversationScopedArtifacts, getFormattedConversationHistory, getFullConversationContext, getScopedHistory, getUserFacingHistory, saveA2AMessageResponse };
@@ -1,204 +0,0 @@
1
- import { z } from '@hono/zod-openapi';
2
- import { loadEnvironmentFiles } from '@inkeep/agents-core';
3
-
4
- // src/constants/execution-limits/index.ts
5
-
6
- // src/constants/execution-limits/defaults.ts
7
- var executionLimitsDefaults = {
8
- // Sub Agent Turn Execution
9
- // During a Sub Agent's turn, it makes decisions by calling the LLM (language model). Each decision
10
- // point is called a "generation step" - for example, deciding to call a tool, transfer to another
11
- // Sub Agent, delegate a subtask, or send a response to the user.
12
- // AGENT_EXECUTION_MAX_CONSECUTIVE_ERRORS: Maximum errors tolerated during a single Sub Agent's turn before stopping execution
13
- // AGENT_EXECUTION_MAX_GENERATION_STEPS: Maximum LLM inference calls allowed within a single Sub Agent turn
14
- AGENT_EXECUTION_MAX_CONSECUTIVE_ERRORS: 3,
15
- AGENT_EXECUTION_MAX_GENERATION_STEPS: 5,
16
- // Sub Agent Decision-Making Timeouts
17
- // These control how long to wait for the LLM to make decisions during a Sub Agent's turn.
18
- // "First call" = initial decision at start of turn (may include tool results from previous actions)
19
- // "Subsequent call" = follow-up decisions after executing tools within the same turn
20
- // Streaming mode has longer timeout because it waits for the full streamed response to the user
21
- // LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_STREAMING: Timeout for initial streaming response to user
22
- // LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_NON_STREAMING: Timeout for initial non-streaming (internal) decision
23
- // LLM_GENERATION_SUBSEQUENT_CALL_TIMEOUT_MS: Timeout for follow-up decisions after tool execution
24
- // LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS: Maximum timeout allowed regardless of configuration
25
- LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_STREAMING: 27e4,
26
- // 4.5 minutes
27
- LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_NON_STREAMING: 9e4,
28
- // 1.5 minutes
29
- LLM_GENERATION_SUBSEQUENT_CALL_TIMEOUT_MS: 9e4,
30
- // 1.5 minutes
31
- LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS: 6e5,
32
- // 10 minutes
33
- // Function Tool Execution (Sandboxed Environments)
34
- // Function Tools are custom JavaScript functions that Sub Agents can call. They run in secure
35
- // isolated sandboxes (containerized environments) to prevent malicious code execution.
36
- // For performance, sandboxes are cached and reused across multiple tool calls until they expire.
37
- // FUNCTION_TOOL_EXECUTION_TIMEOUT_MS_DEFAULT: Maximum execution time for a Function Tool call
38
- // FUNCTION_TOOL_SANDBOX_VCPUS_DEFAULT: Virtual CPUs allocated to each sandbox (affects compute capacity)
39
- // FUNCTION_TOOL_SANDBOX_POOL_TTL_MS: Time-to-live for cached sandboxes (after this, sandbox is discarded)
40
- // FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT: Maximum reuses of a sandbox before it's refreshed (prevents resource leaks)
41
- // FUNCTION_TOOL_SANDBOX_MAX_OUTPUT_SIZE_BYTES: Maximum size of Function Tool output (prevents memory exhaustion)
42
- // FUNCTION_TOOL_SANDBOX_QUEUE_WAIT_TIMEOUT_MS: Maximum wait time for sandbox to become available when pool is full
43
- // FUNCTION_TOOL_SANDBOX_CLEANUP_INTERVAL_MS: How often to check for and remove expired sandboxes from the pool
44
- FUNCTION_TOOL_EXECUTION_TIMEOUT_MS_DEFAULT: 3e4,
45
- // 30 seconds
46
- FUNCTION_TOOL_SANDBOX_VCPUS_DEFAULT: 4,
47
- FUNCTION_TOOL_SANDBOX_POOL_TTL_MS: 3e5,
48
- // 5 minutes
49
- FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT: 50,
50
- FUNCTION_TOOL_SANDBOX_MAX_OUTPUT_SIZE_BYTES: 1048576,
51
- // 1 MB
52
- FUNCTION_TOOL_SANDBOX_QUEUE_WAIT_TIMEOUT_MS: 3e4,
53
- // 30 seconds
54
- FUNCTION_TOOL_SANDBOX_CLEANUP_INTERVAL_MS: 6e4,
55
- // 1 minute
56
- // MCP Tool Execution
57
- // MCP (Model Context Protocol) Servers are external services that provide tools to Sub Agents.
58
- // When a Sub Agent calls an MCP Tool, the request is sent to the external MCP Server.
59
- // Note: MCP connection/retry constants are defined in @inkeep/agents-core/constants/execution-limits-shared
60
- // MCP_TOOL_REQUEST_TIMEOUT_MS_DEFAULT: Maximum wait time for an MCP tool call to complete
61
- MCP_TOOL_REQUEST_TIMEOUT_MS_DEFAULT: 6e4,
62
- // 60 seconds
63
- // Sub Agent Delegation (Retry Strategy)
64
- // When a Sub Agent delegates a subtask to another Sub Agent, it uses the A2A (Agent-to-Agent)
65
- // protocol to communicate. If the delegation request fails, these constants control the
66
- // exponential backoff retry strategy. Formula: delay = min(INITIAL * EXPONENT^attempt, MAX)
67
- // DELEGATION_TOOL_BACKOFF_INITIAL_INTERVAL_MS: Starting delay before first retry
68
- // DELEGATION_TOOL_BACKOFF_MAX_INTERVAL_MS: Maximum delay between retries (caps exponential growth)
69
- // DELEGATION_TOOL_BACKOFF_EXPONENT: Multiplier applied to delay after each retry (2 = doubles each time)
70
- // DELEGATION_TOOL_BACKOFF_MAX_ELAPSED_TIME_MS: Total time to keep retrying before giving up
71
- DELEGATION_TOOL_BACKOFF_INITIAL_INTERVAL_MS: 100,
72
- DELEGATION_TOOL_BACKOFF_MAX_INTERVAL_MS: 1e4,
73
- // 10 seconds
74
- DELEGATION_TOOL_BACKOFF_EXPONENT: 2,
75
- DELEGATION_TOOL_BACKOFF_MAX_ELAPSED_TIME_MS: 2e4,
76
- // 20 seconds
77
- // General Agent-to-Agent (A2A) Communication (Retry Strategy)
78
- // These control retries for broader A2A protocol operations beyond delegation (e.g., status checks,
79
- // conversation updates). Uses more conservative retry parameters than delegation-specific retries.
80
- // A2A_BACKOFF_INITIAL_INTERVAL_MS: Starting delay before first retry
81
- // A2A_BACKOFF_MAX_INTERVAL_MS: Maximum delay between retries
82
- // A2A_BACKOFF_EXPONENT: Multiplier for exponential backoff (1.5 = grows 50% each retry)
83
- // A2A_BACKOFF_MAX_ELAPSED_TIME_MS: Total time to keep retrying before giving up
84
- A2A_BACKOFF_INITIAL_INTERVAL_MS: 500,
85
- A2A_BACKOFF_MAX_INTERVAL_MS: 6e4,
86
- // 1 minute
87
- A2A_BACKOFF_EXPONENT: 1.5,
88
- A2A_BACKOFF_MAX_ELAPSED_TIME_MS: 3e4,
89
- // 30 seconds
90
- // Artifact Processing
91
- // Artifacts are tool outputs saved for later reference by Sub Agents or users. When a tool generates
92
- // an artifact, the system automatically generates a human-readable name and description using the LLM.
93
- // These constants control artifact name/description generation and context window management.
94
- // ARTIFACT_GENERATION_MAX_RETRIES: Retry attempts for LLM-based artifact name/description generation
95
- // ARTIFACT_SESSION_MAX_PENDING: Maximum unprocessed artifacts in queue (prevents unbounded growth)
96
- // ARTIFACT_SESSION_MAX_PREVIOUS_SUMMARIES: Historical artifact summaries kept in context for reference
97
- // ARTIFACT_GENERATION_BACKOFF_INITIAL_MS: Starting delay for retry backoff when generation fails
98
- // ARTIFACT_GENERATION_BACKOFF_MAX_MS: Maximum delay between retries (formula: min(INITIAL * 2^attempt, MAX))
99
- ARTIFACT_GENERATION_MAX_RETRIES: 3,
100
- ARTIFACT_SESSION_MAX_PENDING: 100,
101
- ARTIFACT_SESSION_MAX_PREVIOUS_SUMMARIES: 3,
102
- ARTIFACT_GENERATION_BACKOFF_INITIAL_MS: 1e3,
103
- // 1 second
104
- ARTIFACT_GENERATION_BACKOFF_MAX_MS: 1e4,
105
- // 10 seconds
106
- // Conversation Session & Cache Management
107
- // A "session" represents the state of an ongoing conversation with an Agent. Tool results are cached
108
- // within the session for performance - this is especially important for artifact processing where the
109
- // same tool outputs may be referenced multiple times across Sub Agent turns.
110
- // SESSION_TOOL_RESULT_CACHE_TIMEOUT_MS: How long tool results are kept in cache before expiring
111
- // SESSION_CLEANUP_INTERVAL_MS: How often to check for and remove expired cached tool results
112
- SESSION_TOOL_RESULT_CACHE_TIMEOUT_MS: 3e5,
113
- // 5 minutes
114
- SESSION_CLEANUP_INTERVAL_MS: 6e4,
115
- // 1 minute
116
- // Status Updates
117
- // Status Updates are real-time progress messages sent to users during longer Sub Agent operations.
118
- // The system automatically generates status updates based on activity thresholds - either after a
119
- // certain number of significant events OR after a time interval (whichever comes first).
120
- // Events include: tool calls, Sub Agent transfers, delegations, or other significant activities.
121
- // STATUS_UPDATE_DEFAULT_NUM_EVENTS: Number of significant events before triggering a status update
122
- // STATUS_UPDATE_DEFAULT_INTERVAL_SECONDS: Time interval (in seconds) before generating status update
123
- STATUS_UPDATE_DEFAULT_NUM_EVENTS: 1,
124
- STATUS_UPDATE_DEFAULT_INTERVAL_SECONDS: 2,
125
- // Response Streaming (Internal Buffering Limits)
126
- // These are internal infrastructure limits for streaming responses to users. Streaming enables
127
- // real-time updates as Sub Agents generate responses, Data Components, and Status Updates.
128
- // STREAM_PARSER_MAX_SNAPSHOT_SIZE: Maximum Data Component snapshots buffered before clearing old ones
129
- // STREAM_PARSER_MAX_STREAMED_SIZE: Maximum streamed component IDs tracked simultaneously
130
- // STREAM_PARSER_MAX_COLLECTED_PARTS: Maximum accumulated stream parts before forcing flush
131
- // STREAM_BUFFER_MAX_SIZE_BYTES: Maximum total buffer size in bytes (prevents memory exhaustion)
132
- // STREAM_TEXT_GAP_THRESHOLD_MS: Time gap that triggers bundling text with artifact data vs separate send
133
- // STREAM_MAX_LIFETIME_MS: Maximum duration a stream can stay open before forced closure
134
- STREAM_PARSER_MAX_SNAPSHOT_SIZE: 100,
135
- STREAM_PARSER_MAX_STREAMED_SIZE: 1e3,
136
- STREAM_PARSER_MAX_COLLECTED_PARTS: 1e4,
137
- STREAM_BUFFER_MAX_SIZE_BYTES: 5242880,
138
- // 5 MB
139
- STREAM_TEXT_GAP_THRESHOLD_MS: 2e3,
140
- // 2 seconds
141
- STREAM_MAX_LIFETIME_MS: 6e5,
142
- // 10 minutes
143
- // Conversation History Message Retrieval
144
- // CONVERSATION_HISTORY_DEFAULT_LIMIT: Default number of recent conversation messages to retrieve
145
- CONVERSATION_HISTORY_DEFAULT_LIMIT: 50
146
- };
147
-
148
- // src/constants/execution-limits/index.ts
149
- loadEnvironmentFiles();
150
- var constantsSchema = z.object(
151
- Object.fromEntries(
152
- Object.keys(executionLimitsDefaults).map((key) => [
153
- `AGENTS_${key}`,
154
- z.coerce.number().optional()
155
- ])
156
- )
157
- );
158
- var parseConstants = () => {
159
- const envOverrides = constantsSchema.parse(process.env);
160
- return Object.fromEntries(
161
- Object.entries(executionLimitsDefaults).map(([key, defaultValue]) => [
162
- key,
163
- envOverrides[`AGENTS_${key}`] ?? defaultValue
164
- ])
165
- );
166
- };
167
- var constants = parseConstants();
168
- var {
169
- AGENT_EXECUTION_MAX_CONSECUTIVE_ERRORS,
170
- AGENT_EXECUTION_MAX_GENERATION_STEPS,
171
- LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_STREAMING,
172
- LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_NON_STREAMING,
173
- LLM_GENERATION_SUBSEQUENT_CALL_TIMEOUT_MS,
174
- LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS,
175
- FUNCTION_TOOL_EXECUTION_TIMEOUT_MS_DEFAULT,
176
- FUNCTION_TOOL_SANDBOX_VCPUS_DEFAULT,
177
- FUNCTION_TOOL_SANDBOX_POOL_TTL_MS,
178
- FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT,
179
- FUNCTION_TOOL_SANDBOX_MAX_OUTPUT_SIZE_BYTES,
180
- FUNCTION_TOOL_SANDBOX_QUEUE_WAIT_TIMEOUT_MS,
181
- FUNCTION_TOOL_SANDBOX_CLEANUP_INTERVAL_MS,
182
- DELEGATION_TOOL_BACKOFF_INITIAL_INTERVAL_MS,
183
- DELEGATION_TOOL_BACKOFF_MAX_INTERVAL_MS,
184
- DELEGATION_TOOL_BACKOFF_EXPONENT,
185
- DELEGATION_TOOL_BACKOFF_MAX_ELAPSED_TIME_MS,
186
- ARTIFACT_GENERATION_MAX_RETRIES,
187
- ARTIFACT_SESSION_MAX_PENDING,
188
- ARTIFACT_SESSION_MAX_PREVIOUS_SUMMARIES,
189
- ARTIFACT_GENERATION_BACKOFF_INITIAL_MS,
190
- ARTIFACT_GENERATION_BACKOFF_MAX_MS,
191
- SESSION_TOOL_RESULT_CACHE_TIMEOUT_MS,
192
- SESSION_CLEANUP_INTERVAL_MS,
193
- STATUS_UPDATE_DEFAULT_NUM_EVENTS,
194
- STATUS_UPDATE_DEFAULT_INTERVAL_SECONDS,
195
- STREAM_PARSER_MAX_SNAPSHOT_SIZE,
196
- STREAM_PARSER_MAX_STREAMED_SIZE,
197
- STREAM_PARSER_MAX_COLLECTED_PARTS,
198
- STREAM_BUFFER_MAX_SIZE_BYTES,
199
- STREAM_TEXT_GAP_THRESHOLD_MS,
200
- STREAM_MAX_LIFETIME_MS,
201
- CONVERSATION_HISTORY_DEFAULT_LIMIT
202
- } = constants;
203
-
204
- export { AGENT_EXECUTION_MAX_CONSECUTIVE_ERRORS, AGENT_EXECUTION_MAX_GENERATION_STEPS, ARTIFACT_GENERATION_BACKOFF_INITIAL_MS, ARTIFACT_GENERATION_BACKOFF_MAX_MS, ARTIFACT_GENERATION_MAX_RETRIES, ARTIFACT_SESSION_MAX_PENDING, ARTIFACT_SESSION_MAX_PREVIOUS_SUMMARIES, CONVERSATION_HISTORY_DEFAULT_LIMIT, DELEGATION_TOOL_BACKOFF_EXPONENT, DELEGATION_TOOL_BACKOFF_INITIAL_INTERVAL_MS, DELEGATION_TOOL_BACKOFF_MAX_ELAPSED_TIME_MS, DELEGATION_TOOL_BACKOFF_MAX_INTERVAL_MS, FUNCTION_TOOL_EXECUTION_TIMEOUT_MS_DEFAULT, FUNCTION_TOOL_SANDBOX_CLEANUP_INTERVAL_MS, FUNCTION_TOOL_SANDBOX_MAX_OUTPUT_SIZE_BYTES, FUNCTION_TOOL_SANDBOX_MAX_USE_COUNT, FUNCTION_TOOL_SANDBOX_POOL_TTL_MS, FUNCTION_TOOL_SANDBOX_QUEUE_WAIT_TIMEOUT_MS, FUNCTION_TOOL_SANDBOX_VCPUS_DEFAULT, LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_NON_STREAMING, LLM_GENERATION_FIRST_CALL_TIMEOUT_MS_STREAMING, LLM_GENERATION_MAX_ALLOWED_TIMEOUT_MS, LLM_GENERATION_SUBSEQUENT_CALL_TIMEOUT_MS, SESSION_CLEANUP_INTERVAL_MS, SESSION_TOOL_RESULT_CACHE_TIMEOUT_MS, STATUS_UPDATE_DEFAULT_INTERVAL_SECONDS, STATUS_UPDATE_DEFAULT_NUM_EVENTS, STREAM_BUFFER_MAX_SIZE_BYTES, STREAM_MAX_LIFETIME_MS, STREAM_PARSER_MAX_COLLECTED_PARTS, STREAM_PARSER_MAX_SNAPSHOT_SIZE, STREAM_PARSER_MAX_STREAMED_SIZE, STREAM_TEXT_GAP_THRESHOLD_MS };
@@ -1,75 +0,0 @@
1
- import { env } from './chunk-KBZIYCPJ.js';
2
- import { getLogger } from './chunk-A2S7GSHL.js';
3
- import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node';
4
- import { BaggageSpanProcessor, ALLOW_ALL_BAGGAGE_KEYS } from '@opentelemetry/baggage-span-processor';
5
- import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks';
6
- import { CompositePropagator, W3CTraceContextPropagator, W3CBaggagePropagator } from '@opentelemetry/core';
7
- import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
8
- import { resourceFromAttributes } from '@opentelemetry/resources';
9
- import { NodeSDK } from '@opentelemetry/sdk-node';
10
- import { BatchSpanProcessor, NoopSpanProcessor } from '@opentelemetry/sdk-trace-base';
11
- import { ATTR_SERVICE_NAME } from '@opentelemetry/semantic-conventions';
12
-
13
- var otlpExporter = new OTLPTraceExporter();
14
- var logger = getLogger("instrumentation");
15
- function createSafeBatchProcessor() {
16
- try {
17
- return new BatchSpanProcessor(otlpExporter, {
18
- scheduledDelayMillis: env.OTEL_BSP_SCHEDULE_DELAY,
19
- maxExportBatchSize: env.OTEL_BSP_MAX_EXPORT_BATCH_SIZE
20
- });
21
- } catch (error) {
22
- logger.warn({ error }, "Failed to create batch processor");
23
- return new NoopSpanProcessor();
24
- }
25
- }
26
- var defaultBatchProcessor = createSafeBatchProcessor();
27
- var defaultResource = resourceFromAttributes({
28
- [ATTR_SERVICE_NAME]: "inkeep-agents-run-api"
29
- });
30
- var defaultInstrumentations = [
31
- getNodeAutoInstrumentations({
32
- "@opentelemetry/instrumentation-http": {
33
- enabled: true,
34
- requestHook: (span, request) => {
35
- const url = request?.url ?? request?.path;
36
- if (!url) return;
37
- const u = new URL(url, "http://localhost");
38
- span.updateName(`${request?.method || "UNKNOWN"} ${u.pathname}`);
39
- }
40
- },
41
- "@opentelemetry/instrumentation-undici": {
42
- requestHook: (span) => {
43
- const method = span.attributes?.["http.request.method"];
44
- const host = span.attributes?.["server.address"];
45
- const path = span.attributes?.["url.path"];
46
- if (method && path)
47
- span.updateName(host ? `${method} ${host}${path}` : `${method} ${path}`);
48
- }
49
- }
50
- })
51
- ];
52
- var defaultSpanProcessors = [
53
- new BaggageSpanProcessor(ALLOW_ALL_BAGGAGE_KEYS),
54
- defaultBatchProcessor
55
- ];
56
- var defaultContextManager = new AsyncLocalStorageContextManager();
57
- var defaultTextMapPropagator = new CompositePropagator({
58
- propagators: [new W3CTraceContextPropagator(), new W3CBaggagePropagator()]
59
- });
60
- var defaultSDK = new NodeSDK({
61
- resource: defaultResource,
62
- contextManager: defaultContextManager,
63
- textMapPropagator: defaultTextMapPropagator,
64
- spanProcessors: defaultSpanProcessors,
65
- instrumentations: defaultInstrumentations
66
- });
67
- async function flushBatchProcessor() {
68
- try {
69
- await defaultBatchProcessor.forceFlush();
70
- } catch (error) {
71
- logger.warn({ error }, "Failed to flush batch processor");
72
- }
73
- }
74
-
75
- export { defaultBatchProcessor, defaultContextManager, defaultInstrumentations, defaultResource, defaultSDK, defaultSpanProcessors, defaultTextMapPropagator, flushBatchProcessor };
@@ -1 +0,0 @@
1
- export { createDefaultConversationHistoryConfig, getConversationScopedArtifacts, getFormattedConversationHistory, getFullConversationContext, getScopedHistory, getUserFacingHistory, saveA2AMessageResponse } from './chunk-KCJWSIDZ.js';
@@ -1 +0,0 @@
1
- export { dbClient_default as default } from './chunk-EVOISBFH.js';
@@ -1,12 +0,0 @@
1
- // src/utils/json-postprocessor.ts
2
- function stripJsonCodeBlocks(text) {
3
- return text.trim().replace(/^```json\s*/is, "").replace(/^```\s*/s, "").replace(/\s*```$/s, "").replace(/^```json\s*([\s\S]*?)\s*```$/i, "$1").replace(/^```\s*([\s\S]*?)\s*```$/i, "$1").trim();
4
- }
5
- function withJsonPostProcessing(config) {
6
- return {
7
- ...config,
8
- experimental_transform: (text) => stripJsonCodeBlocks(text)
9
- };
10
- }
11
-
12
- export { stripJsonCodeBlocks, withJsonPostProcessing };
@@ -1 +0,0 @@
1
- export { getLogger } from './chunk-A2S7GSHL.js';
@@ -1,21 +0,0 @@
1
- import { u, ur, C } from './chunk-JCVMVG3J.js';
2
- import * as s from 'fs';
3
- import * as o from 'path';
4
-
5
- u();
6
- var m = class extends ur {
7
- constructor(t) {
8
- super(t), this.rootDir = o.resolve(t), s.existsSync(o.join(this.rootDir)) || s.mkdirSync(this.rootDir);
9
- }
10
- async init(t, e) {
11
- return this.pg = t, { emscriptenOpts: { ...e, preRun: [...e.preRun || [], (r) => {
12
- let c = r.FS.filesystems.NODEFS;
13
- r.FS.mkdir(C), r.FS.mount(c, { root: this.rootDir }, C);
14
- }] } };
15
- }
16
- async closeFs() {
17
- this.pg.Module.FS.quit();
18
- }
19
- };
20
-
21
- export { m as NodeFS };