deepagentsdk 0.13.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/dist/adapters/elements/index.cjs +322 -82
  2. package/dist/adapters/elements/index.cjs.map +1 -1
  3. package/dist/adapters/elements/index.d.cts +28 -3
  4. package/dist/adapters/elements/index.d.mts +28 -3
  5. package/dist/adapters/elements/index.mjs +321 -82
  6. package/dist/adapters/elements/index.mjs.map +1 -1
  7. package/dist/{agent-BDM-PIu8.d.mts → agent-DHUp_-Fx.d.mts} +114 -4
  8. package/dist/{agent-DToEVxs-.d.cts → agent-tfRthBvX.d.cts} +114 -4
  9. package/dist/{chunk-C5azi7Hr.cjs → chunk-DUZBydyJ.cjs} +7 -0
  10. package/dist/cli/index.cjs +2 -2
  11. package/dist/cli/index.mjs +1 -1
  12. package/dist/{file-saver-Hj5so3dV.mjs → file-saver-CQWTIr8z.mjs} +87 -4
  13. package/dist/file-saver-CQWTIr8z.mjs.map +1 -0
  14. package/dist/{file-saver-BYPKakT4.cjs → file-saver-ZDVH1zHI.cjs} +84 -4
  15. package/dist/file-saver-ZDVH1zHI.cjs.map +1 -0
  16. package/dist/index.cjs +27508 -5
  17. package/dist/index.cjs.map +1 -1
  18. package/dist/index.d.cts +299 -2
  19. package/dist/index.d.mts +301 -4
  20. package/dist/index.mjs +27499 -4
  21. package/dist/index.mjs.map +1 -1
  22. package/dist/{load-BrRAKlO6.cjs → load-BnaAQyCo.cjs} +4 -4
  23. package/dist/{load-BrRAKlO6.cjs.map → load-BnaAQyCo.cjs.map} +1 -1
  24. package/dist/load-CLVcFzo7.cjs +4 -0
  25. package/dist/{load-BDxe6Cet.mjs → load-DRzSpESX.mjs} +1 -1
  26. package/dist/{load-BBYEnMwz.mjs → load-FjxJSusX.mjs} +2 -2
  27. package/dist/{load-BBYEnMwz.mjs.map → load-FjxJSusX.mjs.map} +1 -1
  28. package/package.json +5 -1
  29. package/dist/file-saver-BYPKakT4.cjs.map +0 -1
  30. package/dist/file-saver-Hj5so3dV.mjs.map +0 -1
  31. package/dist/load-DqllBbDc.cjs +0 -4
@@ -0,0 +1 @@
1
+ {"version":3,"file":"file-saver-CQWTIr8z.mjs","names":["DEFAULT_EVICTION_TOKEN_LIMIT","DEFAULT_SUMMARIZATION_THRESHOLD","DEFAULT_KEEP_MESSAGES","glob","glob","CENTRALIZED_EVICTION_LIMIT","getBackend","glob","tool","CENTRALIZED_THRESHOLD","CENTRALIZED_KEEP","glob"],"sources":["../src/constants/limits.ts","../src/utils/events.ts","../src/types/backend.ts","../src/prompts.ts","../src/tools/todos.ts","../src/constants/errors.ts","../src/backends/utils.ts","../src/backends/state.ts","../src/utils/eviction.ts","../src/tools/filesystem.ts","../src/utils/approval.ts","../src/tools/web.ts","../src/tools/execute.ts","../src/tools/subagent.ts","../src/utils/patch-tool-calls.ts","../src/utils/summarization.ts","../src/agent.ts","../src/backends/sandbox.ts","../src/backends/local-sandbox.ts","../src/utils/model-parser.ts","../src/checkpointer/file-saver.ts"],"sourcesContent":["/**\n * Centralized token, size, and timeout limits.\n *\n * These constants prevent magic number scattering across the codebase and provide\n * a single source of truth for configuration values. When updating these values,\n * consider the impact on performance, user experience, and API limits.\n *\n * @module constants/limits\n */\n\n// ============================================================================\n// Token Limits\n// ============================================================================\n\n/**\n * Default token limit for tool result eviction.\n *\n * When a tool result exceeds this limit, it is automatically evicted to a file\n * to prevent context overflow. The evicted content is stored in the backend and\n * a summary is kept in the conversation history.\n *\n * @default 20000\n * @see {@link ../utils/eviction | evictToolResult}\n */\nexport const DEFAULT_EVICTION_TOKEN_LIMIT = 20000;\n\n/**\n * Default threshold for message summarization.\n *\n * When the estimated token count of messages exceeds this threshold, the system\n * automatically summarizes older messages to stay within context limits. This\n * helps maintain conversation continuity while reducing token usage.\n *\n * @default 170000\n * @see {@link ../utils/summarization | summarizeIfNeeded}\n */\nexport const DEFAULT_SUMMARIZATION_THRESHOLD = 170000;\n\n/**\n * Maximum context window size for Claude models.\n *\n * This represents the maximum number of tokens that can be processed in a single\n * conversation. Used for calculating token usage percentages and determining when\n * summarization is needed.\n *\n * @default 200000\n * @see {@link ../utils/summarization | estimateMessagesTokens}\n */\nexport const CONTEXT_WINDOW = 200000;\n\n// ============================================================================\n// Message Limits\n// ============================================================================\n\n/**\n * Default number of recent messages to keep during summarization.\n *\n * When summarization is triggered, this many of the most recent messages are\n * preserved verbatim while older messages are summarized. This ensures recent\n * context is immediately available to the agent.\n *\n * @default 6\n */\nexport const DEFAULT_KEEP_MESSAGES = 6;\n\n/**\n * Default maximum number of reasoning steps for the main agent.\n *\n * The agent will stop after reaching this many steps to prevent infinite loops\n * or excessive token usage. Each step represents one tool invocation cycle.\n *\n * @default 100\n */\nexport const DEFAULT_MAX_STEPS = 100;\n\n/**\n * Default maximum number of reasoning steps for subagents.\n *\n * Subagents are given a lower step limit than the main agent to prevent them\n * from consuming too many resources. This ensures the parent agent maintains\n * control over the overall task.\n *\n * @default 50\n * @see {@link ../tools/subagent | createTaskTool}\n */\nexport const DEFAULT_SUBAGENT_MAX_STEPS = 50;\n\n/**\n * Default maximum number of messages to keep in CLI history.\n *\n * The CLI maintains a history of conversation messages for display purposes.\n * This limit prevents memory issues in long-running sessions.\n *\n * @default 100\n */\nexport const DEFAULT_MAX_HISTORY = 100;\n\n// ============================================================================\n// File Size Limits\n// ============================================================================\n\n/**\n * Default maximum number of lines to read from a file.\n *\n * The read_file tool defaults to reading this many lines to prevent loading\n * extremely large files into context. Can be overridden per-read operation.\n *\n * @default 2000\n * @see {@link ../tools/filesystem | createReadFileTool}\n */\nexport const DEFAULT_READ_LIMIT = 2000;\n\n/**\n * Maximum line length before content is considered invalid.\n *\n * Lines exceeding this length may indicate minified code, binary content, or\n * other data that should not be processed as text. Used for validation.\n *\n * @default 10000\n */\nexport const MAX_LINE_LENGTH = 10000;\n\n/**\n * Maximum file size in megabytes for file operations.\n *\n * Files larger than this size will be rejected to prevent memory issues and\n * excessive token usage. This is a soft limit that can be adjusted for specific\n * use cases.\n *\n * @default 10\n */\nexport const MAX_FILE_SIZE_MB = 10;\n\n/**\n * Maximum output size in bytes before truncation.\n *\n * Tool results larger than this size will be truncated or evicted to prevent\n * context overflow. This helps maintain stable performance even with large\n * outputs.\n *\n * @default 1048576 (1 MB)\n */\nexport const MAX_OUTPUT_SIZE_BYTES = 1048576; // 1MB\n\n// ============================================================================\n// Timeouts\n// ============================================================================\n\n/**\n * Default timeout for network requests in seconds.\n *\n * Used by web tools (http_request, fetch_url) to prevent hanging indefinitely\n * on slow or unresponsive servers. Can be overridden per-request.\n *\n * @default 30\n * @see {@link ../tools/web | createHttpRequestTool}\n */\nexport const DEFAULT_TIMEOUT_SECONDS = 30;\n\n/**\n * Default timeout in milliseconds (derived from DEFAULT_TIMEOUT_SECONDS).\n *\n * Provided for convenience when working with APIs that expect milliseconds\n * instead of seconds.\n *\n * @default 30000 (30 seconds)\n */\nexport const DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1000;\n\n/**\n * Timeout for filesystem operations in milliseconds.\n *\n * Used by sandboxed filesystem operations to prevent blocking indefinitely on\n * slow I/O operations.\n *\n * @default 30000 (30 seconds)\n * @see {@link ../backends/sandbox | SandboxBackend}\n */\nexport const FILESYSTEM_TIMEOUT_MS = 30000;\n\n// ============================================================================\n// Formatting\n// ============================================================================\n\n/**\n * Width for line number formatting in file read operations.\n *\n * When displaying file content with line numbers, this specifies the minimum\n * width for the line number column. Ensures consistent alignment across\n * different file sizes.\n *\n * @default 6\n * @see {@link ../backends/utils | formatFileContent}\n */\nexport const LINE_NUMBER_WIDTH = 6;\n","/**\n * Type-safe event creation helpers for Deep Agent.\n *\n * These factory functions provide type-safe ways to create DeepAgentEvent objects,\n * reducing duplication and ensuring event objects are correctly structured.\n *\n * @example\n * ```typescript\n * import { createFileReadEvent, createToolCallEvent } from './utils/events';\n *\n * // Create events with type inference\n * const fileEvent = createFileReadEvent('/path/to/file.ts', 100);\n * const toolEvent = createToolCallEvent('read_file', { path: '/file.txt' }, 'call-123');\n * ```\n */\n\nimport type {\n TextEvent,\n StepStartEvent,\n ToolCallEvent,\n ToolResultEvent,\n TodosChangedEvent,\n FileWriteStartEvent,\n FileWrittenEvent,\n FileEditedEvent,\n FileReadEvent,\n LsEvent,\n GlobEvent,\n GrepEvent,\n ExecuteStartEvent,\n ExecuteFinishEvent,\n WebSearchStartEvent,\n WebSearchFinishEvent,\n HttpRequestStartEvent,\n HttpRequestFinishEvent,\n FetchUrlStartEvent,\n FetchUrlFinishEvent,\n SubagentStartEvent,\n SubagentFinishEvent,\n SubagentStepEvent,\n TextSegmentEvent,\n UserMessageEvent,\n DoneEvent,\n ErrorEvent,\n ApprovalRequestedEvent,\n ApprovalResponseEvent,\n CheckpointSavedEvent,\n CheckpointLoadedEvent,\n DeepAgentEvent,\n DeepAgentState,\n} from \"../types\";\n\n// ============================================================================\n// Basic Event Factories\n// ============================================================================\n\n/**\n * Create a text streaming event.\n */\nexport function createTextEvent(text: string): TextEvent {\n return { type: \"text\", text };\n}\n\n/**\n * Create a step-start event.\n */\nexport function createStepStartEvent(stepNumber: number): StepStartEvent {\n return { type: \"step-start\", stepNumber };\n}\n\n/**\n * Create a tool-call event.\n */\nexport function createToolCallEvent(\n toolName: string,\n args: unknown,\n toolCallId: string\n): ToolCallEvent {\n return { type: \"tool-call\", toolName, toolCallId, args };\n}\n\n/**\n * Create a tool-result event.\n */\nexport function createToolResultEvent(\n toolName: string,\n toolCallId: string,\n result: unknown\n): ToolResultEvent {\n return { type: \"tool-result\", toolName, toolCallId, result };\n}\n\n/**\n * Create a text-segment event (for CLI display).\n */\nexport function createTextSegmentEvent(text: string): TextSegmentEvent {\n return { type: \"text-segment\", text };\n}\n\n/**\n * Create a user-message event (for CLI history).\n */\nexport function createUserMessageEvent(content: string): UserMessageEvent {\n return { type: \"user-message\", content };\n}\n\n/**\n * Create a done event.\n */\nexport function createDoneEvent(\n state: DeepAgentState,\n options?: { text?: string; messages?: DoneEvent[\"messages\"]; output?: DoneEvent[\"output\"] }\n): DoneEvent {\n const event: DoneEvent = { type: \"done\", state, ...options };\n return event;\n}\n\n/**\n * Create an error event.\n */\nexport function createErrorEvent(error: Error): ErrorEvent {\n return { type: \"error\", error };\n}\n\n// ============================================================================\n// Todo Event Factories\n// ============================================================================\n\n/**\n * Create a todos-changed event.\n */\nexport function createTodosChangedEvent(todos: TodosChangedEvent[\"todos\"]): TodosChangedEvent {\n return { type: \"todos-changed\", todos };\n}\n\n// ============================================================================\n// File Event Factories\n// ============================================================================\n\n/**\n * Create a file-write-start event (preview before write).\n */\nexport function createFileWriteStartEvent(\n path: string,\n content: string\n): FileWriteStartEvent {\n return { type: \"file-write-start\", path, content };\n}\n\n/**\n * Create a file-written event (after successful write).\n */\nexport function createFileWrittenEvent(\n path: string,\n content: string\n): FileWrittenEvent {\n return { type: \"file-written\", path, content };\n}\n\n/**\n * Create a file-edited event.\n */\nexport function createFileEditedEvent(\n path: string,\n occurrences: number\n): FileEditedEvent {\n return { type: \"file-edited\", path, occurrences };\n}\n\n/**\n * Create a file-read event.\n */\nexport function createFileReadEvent(path: string, lines: number): FileReadEvent {\n return { type: \"file-read\", path, lines };\n}\n\n// ============================================================================\n// Filesystem Operation Event Factories\n// ============================================================================\n\n/**\n * Create an ls (list) event.\n */\nexport function createLsEvent(path: string, count: number): LsEvent {\n return { type: \"ls\", path, count };\n}\n\n/**\n * Create a glob (pattern search) event.\n */\nexport function createGlobEvent(pattern: string, count: number): GlobEvent {\n return { type: \"glob\", pattern, count };\n}\n\n/**\n * Create a grep (content search) event.\n */\nexport function createGrepEvent(pattern: string, count: number): GrepEvent {\n return { type: \"grep\", pattern, count };\n}\n\n// ============================================================================\n// Execute Event Factories\n// ============================================================================\n\n/**\n * Create an execute-start event.\n */\nexport function createExecuteStartEvent(\n command: string,\n sandboxId: string\n): ExecuteStartEvent {\n return { type: \"execute-start\", command, sandboxId };\n}\n\n/**\n * Create an execute-finish event.\n */\nexport function createExecuteFinishEvent(\n command: string,\n sandboxId: string,\n exitCode: number | null,\n truncated: boolean\n): ExecuteFinishEvent {\n return { type: \"execute-finish\", command, sandboxId, exitCode, truncated };\n}\n\n// ============================================================================\n// Web Event Factories\n// ============================================================================\n\n/**\n * Create a web-search-start event.\n */\nexport function createWebSearchStartEvent(query: string): WebSearchStartEvent {\n return { type: \"web-search-start\", query };\n}\n\n/**\n * Create a web-search-finish event.\n */\nexport function createWebSearchFinishEvent(\n query: string,\n resultCount: number\n): WebSearchFinishEvent {\n return { type: \"web-search-finish\", query, resultCount };\n}\n\n/**\n * Create an http-request-start event.\n */\nexport function createHttpRequestStartEvent(\n url: string,\n method: string\n): HttpRequestStartEvent {\n return { type: \"http-request-start\", url, method };\n}\n\n/**\n * Create an http-request-finish event.\n */\nexport function createHttpRequestFinishEvent(\n url: string,\n statusCode: number\n): HttpRequestFinishEvent {\n return { type: \"http-request-finish\", url, statusCode };\n}\n\n/**\n * Create a fetch-url-start event.\n */\nexport function createFetchUrlStartEvent(url: string): FetchUrlStartEvent {\n return { type: \"fetch-url-start\", url };\n}\n\n/**\n * Create a fetch-url-finish event.\n */\nexport function createFetchUrlFinishEvent(\n url: string,\n success: boolean\n): FetchUrlFinishEvent {\n return { type: \"fetch-url-finish\", url, success };\n}\n\n// ============================================================================\n// Subagent Event Factories\n// ============================================================================\n\n/**\n * Create a subagent-start event.\n */\nexport function createSubagentStartEvent(\n name: string,\n task: string\n): SubagentStartEvent {\n return { type: \"subagent-start\", name, task };\n}\n\n/**\n * Create a subagent-finish event.\n */\nexport function createSubagentFinishEvent(\n name: string,\n result: string\n): SubagentFinishEvent {\n return { type: \"subagent-finish\", name, result };\n}\n\n/**\n * Create a subagent-step event.\n */\nexport function createSubagentStepEvent(\n stepIndex: number,\n toolCalls: SubagentStepEvent[\"toolCalls\"]\n): SubagentStepEvent {\n return { type: \"subagent-step\", stepIndex, toolCalls };\n}\n\n// ============================================================================\n// Approval Event Factories\n// ============================================================================\n\n/**\n * Create an approval-requested event.\n */\nexport function createApprovalRequestedEvent(\n approvalId: string,\n toolCallId: string,\n toolName: string,\n args: unknown\n): ApprovalRequestedEvent {\n return { type: \"approval-requested\", approvalId, toolCallId, toolName, args };\n}\n\n/**\n * Create an approval-response event.\n */\nexport function createApprovalResponseEvent(\n approvalId: string,\n approved: boolean\n): ApprovalResponseEvent {\n return { type: \"approval-response\", approvalId, approved };\n}\n\n// ============================================================================\n// Checkpoint Event Factories\n// ============================================================================\n\n/**\n * Create a checkpoint-saved event.\n */\nexport function createCheckpointSavedEvent(\n threadId: string,\n step: number\n): CheckpointSavedEvent {\n return { type: \"checkpoint-saved\", threadId, step };\n}\n\n/**\n * Create a checkpoint-loaded event.\n */\nexport function createCheckpointLoadedEvent(\n threadId: string,\n step: number,\n messagesCount: number\n): CheckpointLoadedEvent {\n return { type: \"checkpoint-loaded\", threadId, step, messagesCount };\n}\n\n// ============================================================================\n// Utility Functions\n// ============================================================================\n\n/**\n * Type guard to check if an event is a specific type.\n * Useful for filtering or discriminating union types.\n *\n * @example\n * ```typescript\n * if (isEventType(event, \"file-read\")) {\n * // TypeScript knows event is FileReadEvent here\n * console.log(event.lines);\n * }\n * ```\n */\nexport function isEventType<T extends DeepAgentEvent[\"type\"]>(\n event: DeepAgentEvent,\n type: T\n): event is Extract<DeepAgentEvent, { type: T }> {\n return event.type === type;\n}\n\n/**\n * Get the event type as a string.\n * Utility function for logging or debugging.\n */\nexport function getEventType(event: DeepAgentEvent): string {\n return event.type;\n}\n\n/**\n * Create a generic event object from a type and data.\n * This is a more flexible but less type-safe alternative to the specific factories.\n *\n * @example\n * ```typescript\n * const event = createEvent(\"file-read\", { path: \"/file.txt\", lines: 100 });\n * ```\n */\nexport function createEvent<T extends DeepAgentEvent>(\n type: T[\"type\"],\n data: Omit<T, \"type\">\n): T {\n return { type, ...data } as T;\n}\n","/**\n * Backend protocol and filesystem types.\n */\n\nimport type { TodoItem } from \"./core\";\n\n/**\n * File data structure used by backends.\n */\nexport interface FileData {\n /** Lines of text content */\n content: string[];\n /** ISO format timestamp of creation */\n created_at: string;\n /** ISO format timestamp of last modification */\n modified_at: string;\n}\n\n/**\n * Structured file listing info.\n */\nexport interface FileInfo {\n /** File path */\n path: string;\n /** Whether this is a directory */\n is_dir?: boolean;\n /** File size in bytes (approximate) */\n size?: number;\n /** ISO 8601 timestamp of last modification */\n modified_at?: string;\n}\n\n/**\n * Structured grep match entry.\n */\nexport interface GrepMatch {\n /** File path where match was found */\n path: string;\n /** Line number (1-indexed) */\n line: number;\n /** The matching line text */\n text: string;\n}\n\n/**\n * Result from backend write operations.\n */\nexport interface WriteResult {\n /** Whether the write operation succeeded */\n success: boolean;\n /** Error message on failure, undefined on success */\n error?: string;\n /** File path of written file, undefined on failure */\n path?: string;\n}\n\n/**\n * Result from backend edit operations.\n */\nexport interface EditResult {\n /** Whether the edit operation succeeded */\n success: boolean;\n /** Error message on failure, undefined on success */\n error?: string;\n /** File path of edited file, undefined on failure */\n path?: string;\n /** Number of replacements made, undefined on failure */\n occurrences?: number;\n}\n\n/**\n * Standardized error codes for file upload/download operations.\n *\n * These represent common, recoverable errors that an LLM can understand and potentially fix:\n * - \"file_not_found\": The requested file doesn't exist (download)\n * - \"permission_denied\": Access denied\n * - \"is_directory\": Attempted to download a directory as a file\n * - \"invalid_path\": Path syntax is malformed\n *\n * @example\n * ```typescript\n * type FileError = FileOperationError;\n * // Valid values: \"file_not_found\" | \"permission_denied\" | \"is_directory\" | \"invalid_path\"\n * ```\n */\nexport type FileOperationError =\n | \"file_not_found\"\n | \"permission_denied\"\n | \"is_directory\"\n | \"invalid_path\";\n\n/**\n * Result of a single file download operation.\n *\n * The response is designed to allow partial success in batch operations.\n * The errors are standardized using FileOperationError literals.\n *\n * @example Success\n * ```typescript\n * { path: \"/app/config.json\", content: new Uint8Array(...), error: null }\n * ```\n *\n * @example Failure\n * ```typescript\n * { path: \"/wrong/path.txt\", content: null, error: \"file_not_found\" }\n * ```\n */\nexport interface FileDownloadResponse {\n /** The file path that was requested */\n path: string;\n /** File contents as bytes on success, null on failure */\n content: Uint8Array | null;\n /** Standardized error code on failure, null on success */\n error: FileOperationError | null;\n}\n\n/**\n * Result of a single file upload operation.\n *\n * The response is designed to allow partial success in batch operations.\n * The errors are standardized using FileOperationError literals.\n *\n * @example Success\n * ```typescript\n * { path: \"/app/data.txt\", error: null }\n * ```\n *\n * @example Failure\n * ```typescript\n * { path: \"/readonly/file.txt\", error: \"permission_denied\" }\n * ```\n */\nexport interface FileUploadResponse {\n /** The file path that was requested */\n path: string;\n /** Standardized error code on failure, null on success */\n error: FileOperationError | null;\n}\n\n/**\n * Shared state for deep agent operations.\n * This is passed to tools and modified during execution.\n */\nexport interface DeepAgentState {\n /** Current todo list */\n todos: TodoItem[];\n /** Virtual filesystem (for StateBackend) */\n files: Record<string, FileData>;\n}\n\n/**\n * Protocol for pluggable memory backends.\n */\nexport interface BackendProtocol {\n /**\n * Structured listing with file metadata.\n */\n lsInfo(path: string): FileInfo[] | Promise<FileInfo[]>;\n\n /**\n * Read file content with line numbers or an error string.\n */\n read(\n filePath: string,\n offset?: number,\n limit?: number\n ): string | Promise<string>;\n\n /**\n * Read file content as raw FileData.\n */\n readRaw(filePath: string): FileData | Promise<FileData>;\n\n /**\n * Structured search results or error string for invalid input.\n */\n grepRaw(\n pattern: string,\n path?: string | null,\n glob?: string | null\n ): GrepMatch[] | string | Promise<GrepMatch[] | string>;\n\n /**\n * Structured glob matching returning FileInfo objects.\n */\n globInfo(pattern: string, path?: string): FileInfo[] | Promise<FileInfo[]>;\n\n /**\n * Create a new file or overwrite existing file.\n */\n write(filePath: string, content: string): WriteResult | Promise<WriteResult>;\n\n /**\n * Edit a file by replacing string occurrences.\n */\n edit(\n filePath: string,\n oldString: string,\n newString: string,\n replaceAll?: boolean\n ): EditResult | Promise<EditResult>;\n}\n\n/**\n * Factory function type for creating backend instances from agent state.\n */\nexport type BackendFactory = (state: DeepAgentState) => BackendProtocol;\n\n/**\n * Result of command execution in a sandbox.\n */\nexport interface ExecuteResponse {\n /** Combined stdout and stderr output of the executed command */\n output: string;\n /** Exit code (0 = success, non-zero = failure, null = unknown/timeout) */\n exitCode: number | null;\n /** Whether the output was truncated due to size limits */\n truncated: boolean;\n}\n\n/**\n * Protocol for sandbox backends with command execution capability.\n */\nexport interface SandboxBackendProtocol extends BackendProtocol {\n /**\n * Execute a shell command in the sandbox.\n */\n execute(command: string): Promise<ExecuteResponse>;\n\n /**\n * Unique identifier for this sandbox instance.\n */\n readonly id: string;\n\n /**\n * Upload multiple files to the sandbox.\n *\n * This API is designed to allow partial success - individual uploads may fail\n * without affecting others. Check the error field in each response.\n *\n * @param files - Array of [path, content] tuples to upload\n * @returns Array of FileUploadResponse objects, one per input file\n *\n * @example\n * ```typescript\n * const responses = await sandbox.uploadFiles([\n * [\"/app/config.json\", new Uint8Array(b\"...\")],\n * [\"/app/data.txt\", new Uint8Array(b\"content\")],\n * ]);\n * // Check for errors\n * responses.forEach(r => {\n * if (r.error) console.error(`Failed to upload ${r.path}: ${r.error}`);\n * });\n * ```\n */\n uploadFiles(files: Array<[string, Uint8Array]>): Promise<FileUploadResponse[]>;\n\n /**\n * Download multiple files from the sandbox.\n *\n * This API is designed to allow partial success - individual downloads may fail\n * without affecting others. Check the error field in each response.\n *\n * @param paths - Array of file paths to download\n * @returns Array of FileDownloadResponse objects, one per input path\n *\n * @example\n * ```typescript\n * const responses = await sandbox.downloadFiles([\n * \"/app/config.json\",\n * \"/app/data.txt\",\n * ]);\n * // Process successful downloads\n * for (const r of responses) {\n * if (r.content) {\n * console.log(`Downloaded ${r.path}: ${r.content.length} bytes`);\n * } else if (r.error) {\n * console.error(`Failed to download ${r.path}: ${r.error}`);\n * }\n * }\n * ```\n */\n downloadFiles(paths: string[]): Promise<FileDownloadResponse[]>;\n}\n\n/**\n * Type guard to check if a backend is a SandboxBackendProtocol.\n */\nexport function isSandboxBackend(\n backend: BackendProtocol\n): backend is SandboxBackendProtocol {\n return (\n typeof (backend as SandboxBackendProtocol).execute === \"function\" &&\n typeof (backend as SandboxBackendProtocol).uploadFiles === \"function\" &&\n typeof (backend as SandboxBackendProtocol).downloadFiles === \"function\" &&\n typeof (backend as SandboxBackendProtocol).id === \"string\"\n );\n}\n","/**\n * System prompts for Deep Agent.\n */\n\nexport const BASE_PROMPT = `In order to complete the objective that the user asks of you, you have access to a number of standard tools.`;\n\nexport const TODO_SYSTEM_PROMPT = `## \\`write_todos\\` (task planning)\n\nYou have access to a \\`write_todos\\` tool to help you manage and plan tasks. Use this tool whenever you are working on a complex task.\n\n### When to Use This Tool\n\nUse proactively for:\n1. Complex multi-step tasks (3+ distinct steps)\n2. Non-trivial tasks requiring careful planning\n3. After receiving new instructions - capture requirements as todos\n4. After completing tasks - mark complete and add follow-ups\n5. When starting new tasks - mark as in_progress (ideally only one at a time)\n\n### When NOT to Use\n\nSkip for:\n1. Single, straightforward tasks\n2. Trivial tasks with no organizational benefit\n3. Tasks completable in < 3 trivial steps\n4. Purely conversational/informational requests\n\n### Task States and Management\n\n1. **Task States:**\n - pending: Not yet started\n - in_progress: Currently working on\n - completed: Finished successfully\n - cancelled: No longer needed\n\n2. **Task Management:**\n - Update status in real-time\n - Mark complete IMMEDIATELY after finishing\n - Only ONE task in_progress at a time\n - Complete current tasks before starting new ones`;\n\nexport const FILESYSTEM_SYSTEM_PROMPT = `## Virtual Filesystem\n\nYou have access to a virtual filesystem. All file paths must start with a /.\n\n- ls: list files in a directory (requires absolute path)\n- read_file: read a file from the filesystem\n- write_file: write to a file in the filesystem\n- edit_file: edit a file in the filesystem\n- glob: find files matching a pattern (e.g., \"**/*.py\")\n- grep: search for text within files`;\n\nexport const TASK_SYSTEM_PROMPT = `## \\`task\\` (subagent spawner)\n\nYou have access to a \\`task\\` tool to launch short-lived subagents that handle isolated tasks. These agents are ephemeral — they live only for the duration of the task and return a single result.\n\nWhen to use the task tool:\n- When a task is complex and multi-step, and can be fully delegated in isolation\n- When a task is independent of other tasks and can run in parallel\n- When a task requires focused reasoning or heavy token/context usage that would bloat the orchestrator thread\n- When sandboxing improves reliability (e.g. code execution, structured searches, data formatting)\n- When you only care about the output of the subagent, and not the intermediate steps\n\nSubagent lifecycle:\n1. **Spawn** → Provide clear role, instructions, and expected output\n2. **Run** → The subagent completes the task autonomously\n3. **Return** → The subagent provides a single structured result\n4. **Reconcile** → Incorporate or synthesize the result into the main thread\n\nWhen NOT to use the task tool:\n- If you need to see the intermediate reasoning or steps after the subagent has completed (the task tool hides them)\n- If the task is trivial (a few tool calls or simple lookup)\n- If delegating does not reduce token usage, complexity, or context switching\n- If splitting would add latency without benefit\n\n## Important Task Tool Usage Notes\n- Whenever possible, parallelize the work that you do. Whenever you have independent steps to complete - kick off tasks (subagents) in parallel to accomplish them faster.\n- Remember to use the \\`task\\` tool to silo independent tasks within a multi-part objective.\n- You should use the \\`task\\` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete.`;\n\n/**\n * Get the task tool description with available subagent types.\n */\nexport function getTaskToolDescription(subagentDescriptions: string[]): string {\n return `\nLaunch an ephemeral subagent to handle complex, multi-step independent tasks with isolated context windows.\n\nAvailable agent types and the tools they have access to:\n${subagentDescriptions.join(\"\\n\")}\n\nWhen using the Task tool, you must specify a subagent_type parameter to select which agent type to use.\n\n## Usage notes:\n1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses\n2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.\n3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.\n4. The agent's outputs should generally be trusted\n5. Clearly tell the agent whether you expect it to create content, perform analysis, or just do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent\n6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.\n7. When only the general-purpose agent is provided, you should use it for all tasks. It is great for isolating context and token usage, and completing specific, complex tasks, as it has all the same capabilities as the main agent.\n\n### Example usage of the general-purpose agent:\n\n<example_agent_descriptions>\n\"general-purpose\": use this agent for general purpose tasks, it has access to all tools as the main agent.\n</example_agent_descriptions>\n\n<example>\nUser: \"I want to conduct research on the accomplishments of Lebron James, Michael Jordan, and Kobe Bryant, and then compare them.\"\nAssistant: *Uses the task tool in parallel to conduct isolated research on each of the three players*\nAssistant: *Synthesizes the results of the three isolated research tasks and responds to the User*\n<commentary>\nResearch is a complex, multi-step task in it of itself.\nThe research of each individual player is not dependent on the research of the other players.\nThe assistant uses the task tool to break down the complex objective into three isolated tasks.\nEach research task only needs to worry about context and tokens about one player, then returns synthesized information about each player as the Tool Result.\nThis means each research task can dive deep and spend tokens and context deeply researching each player, but the final result is synthesized information, and saves us tokens in the long run when comparing the players to each other.\n</commentary>\n</example>\n\n<example>\nUser: \"Analyze a single large code repository for security vulnerabilities and generate a report.\"\nAssistant: *Launches a single \\`task\\` subagent for the repository analysis*\nAssistant: *Receives report and integrates results into final summary*\n<commentary>\nSubagent is used to isolate a large, context-heavy task, even though there is only one. This prevents the main thread from being overloaded with details.\nIf the user then asks followup questions, we have a concise report to reference instead of the entire history of analysis and tool calls, which is good and saves us time and money.\n</commentary>\n</example>\n `.trim();\n}\n\nexport const DEFAULT_GENERAL_PURPOSE_DESCRIPTION =\n \"General-purpose agent for researching complex questions, searching for files and content, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. This agent has access to all tools as the main agent.\";\n\nexport const DEFAULT_SUBAGENT_PROMPT =\n \"In order to complete the objective that the user asks of you, you have access to a number of standard tools.\";\n\nexport const EXECUTE_SYSTEM_PROMPT = `## \\`execute\\` (shell command execution)\n\nYou have access to an \\`execute\\` tool to run shell commands in the sandbox environment.\n\n### When to Use This Tool\n\nUse for:\n- Running build commands (npm install, npm run build, bun install)\n- Running tests (npm test, bun test, pytest)\n- Executing scripts (node script.js, python script.py)\n- Installing dependencies\n- Checking system state (ls, cat, pwd, which)\n- Any shell command that helps accomplish the task\n\n### Important Notes\n\n1. **Exit Codes**: Always check the exit code to determine success\n - 0 = success\n - non-zero = failure\n - null = possibly timed out\n\n2. **Command Chaining**:\n - Use \\`&&\\` to chain commands that depend on each other\n - Use \\`;\\` to run commands sequentially regardless of success\n\n3. **Timeouts**: Long-running commands may timeout\n\n4. **Working Directory**: Commands run in the sandbox's working directory`;\n\n/**\n * Build skills section for system prompt with progressive disclosure.\n */\nexport function buildSkillsPrompt(skills: Array<{ name: string; description: string; path: string }>): string {\n if (skills.length === 0) {\n return '';\n }\n\n const skillsList = skills\n .map(skill => `- **${skill.name}**: ${skill.description}\\n → Read \\`${skill.path}\\` for full instructions`)\n .join('\\n');\n\n return `## Skills System\n\nYou have access to a skills library providing specialized domain knowledge and workflows.\n\n**Available Skills:**\n\n${skillsList}\n\n**How to Use Skills (Progressive Disclosure):**\n\n1. **Recognize when a skill applies**: Check if the user's task matches any skill's domain\n2. **Read the skill's full instructions**: Use read_file to load the SKILL.md content\n3. **Follow the skill's workflow**: Skills contain step-by-step instructions and examples\n4. **Access supporting files**: Skills may include helper scripts or configuration files in their directory\n\nSkills provide expert knowledge for specialized tasks. Always read the full skill before using it.`;\n}\n","/**\n * Todo list tool for task planning and tracking.\n */\n\nimport { tool } from \"ai\";\nimport { z } from \"zod\";\nimport type { DeepAgentState, TodoItem, EventCallback } from \"../types\";\nimport { createTodosChangedEvent } from \"../utils/events\";\n\nconst TodoItemSchema = z.object({\n id: z.string().describe(\"Unique identifier for the todo item\"),\n content: z\n .string()\n .max(100)\n .describe(\"The description/content of the todo item (max 100 chars)\"),\n status: z\n .enum([\"pending\", \"in_progress\", \"completed\", \"cancelled\"])\n .describe(\"The current status of the todo item\"),\n});\n\n/**\n * Create the write_todos tool for task planning.\n * @param state - The shared agent state\n * @param onEvent - Optional callback for emitting events\n */\nexport function createTodosTool(state: DeepAgentState, onEvent?: EventCallback) {\n return tool({\n description: `Manage and plan tasks using a structured todo list. Use this tool for:\n- Complex multi-step tasks (3+ steps)\n- After receiving new instructions - capture requirements\n- When starting tasks - mark as in_progress (only one at a time)\n- After completing tasks - mark complete immediately\n\nTask states: pending, in_progress, completed, cancelled\n\nWhen merge=true, updates are merged with existing todos by id.\nWhen merge=false, the new todos replace all existing todos.`,\n inputSchema: z.object({\n todos: z\n .array(TodoItemSchema)\n .min(1)\n .describe(\"Array of todo items to write\"),\n merge: z\n .boolean()\n .default(true)\n .describe(\n \"Whether to merge with existing todos (true) or replace all (false)\"\n ),\n }),\n execute: async ({ todos, merge }) => {\n if (merge) {\n // Merge by id\n const existingMap = new Map<string, TodoItem>();\n for (const todo of state.todos) {\n existingMap.set(todo.id, todo);\n }\n\n for (const newTodo of todos) {\n const existing = existingMap.get(newTodo.id);\n if (existing) {\n // Update existing todo\n existingMap.set(newTodo.id, {\n ...existing,\n ...newTodo,\n });\n } else {\n // Add new todo\n existingMap.set(newTodo.id, newTodo);\n }\n }\n\n state.todos = Array.from(existingMap.values());\n } else {\n // Replace all\n state.todos = todos;\n }\n\n // Emit event if callback provided\n if (onEvent) {\n onEvent(createTodosChangedEvent([...state.todos]));\n }\n\n // Format current todo list for response\n const todoList = state.todos\n .map((t) => `- [${t.status}] ${t.id}: ${t.content}`)\n .join(\"\\n\");\n\n return `Todo list updated successfully.\\n\\nCurrent todos:\\n${todoList}`;\n },\n });\n}\n\n// ============================================================================\n// Individual Tool Reference\n// ============================================================================\n\n/**\n * Individual builtin tool reference for selective subagent configuration.\n * This is a reference to the creator function, not an instance.\n */\nexport const write_todos = createTodosTool;\n","/**\n * Centralized error message constants for backend operations\n * Reduces duplication across 6 backend implementations\n */\n\nexport const FILE_NOT_FOUND = (path: string) =>\n `Error: File '${path}' not found`;\n\nexport const FILE_ALREADY_EXISTS = (path: string) =>\n `Cannot write to ${path} because it already exists. Read and then make an edit, or write to a new path.`;\n\nexport const STRING_NOT_FOUND = (path: string, string: string) =>\n `Error: String not found in file: '${path}'\\n\\n${string}`;\n\nexport const INVALID_REGEX = (message: string) =>\n `Invalid regex pattern: ${message}`;\n\nexport const WEB_SEARCH_ERROR = (message: string) =>\n `Web search error: ${message}`;\n\nexport const REQUEST_TIMEOUT = (timeout: number) =>\n `Request timed out after ${timeout} seconds`;\n\nexport const SYSTEM_REMINDER_FILE_EMPTY =\n 'System reminder: File exists but has empty contents';\n\n// Generic errors\nexport const OPERATION_ERROR = (operation: string, message: string) =>\n `Operation error: ${operation} - ${message}`;\n","/**\n * Shared utility functions for memory backend implementations.\n */\n\nimport micromatch from \"micromatch\";\nimport { basename } from \"path\";\nimport type { FileData, GrepMatch } from \"../types\";\nimport { SYSTEM_REMINDER_FILE_EMPTY, INVALID_REGEX } from \"../constants/errors\";\nimport {\n MAX_LINE_LENGTH,\n LINE_NUMBER_WIDTH,\n DEFAULT_EVICTION_TOKEN_LIMIT,\n} from \"../constants/limits\";\n\n// Constants\nexport const EMPTY_CONTENT_WARNING = SYSTEM_REMINDER_FILE_EMPTY;\n// Re-export from limits for backward compatibility\nexport { MAX_LINE_LENGTH, LINE_NUMBER_WIDTH };\nexport const TOOL_RESULT_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT;\nexport const TRUNCATION_GUIDANCE =\n \"... [results truncated, try being more specific with your parameters]\";\n\n/**\n * Format file content with line numbers (cat -n style).\n */\nexport function formatContentWithLineNumbers(\n content: string | string[],\n startLine: number = 1\n): string {\n let lines: string[];\n if (typeof content === \"string\") {\n lines = content.split(\"\\n\");\n if (lines.length > 0 && lines[lines.length - 1] === \"\") {\n lines = lines.slice(0, -1);\n }\n } else {\n lines = content;\n }\n\n const resultLines: string[] = [];\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i];\n const lineNum = i + startLine;\n\n if (line && line.length <= MAX_LINE_LENGTH) {\n resultLines.push(\n `${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\\t${line}`\n );\n } else if (line) {\n // Split long line into chunks with continuation markers\n const numChunks = Math.ceil(line.length / MAX_LINE_LENGTH);\n for (let chunkIdx = 0; chunkIdx < numChunks; chunkIdx++) {\n const start = chunkIdx * MAX_LINE_LENGTH;\n const end = Math.min(start + MAX_LINE_LENGTH, line.length);\n const chunk = line.substring(start, end);\n if (chunkIdx === 0) {\n resultLines.push(\n `${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\\t${chunk}`\n );\n } else {\n const continuationMarker = `${lineNum}.${chunkIdx}`;\n resultLines.push(\n `${continuationMarker.padStart(LINE_NUMBER_WIDTH)}\\t${chunk}`\n );\n }\n }\n } else {\n resultLines.push(\n `${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\\t`\n );\n }\n }\n\n return resultLines.join(\"\\n\");\n}\n\n/**\n * Check if content is empty and return warning message.\n */\nexport function checkEmptyContent(content: string): string | null {\n if (!content || content.trim() === \"\") {\n return EMPTY_CONTENT_WARNING;\n }\n return null;\n}\n\n/**\n * Convert FileData to plain string content.\n */\nexport function fileDataToString(fileData: FileData): string {\n return fileData.content.join(\"\\n\");\n}\n\n/**\n * Create a FileData object with timestamps.\n */\nexport function createFileData(content: string, createdAt?: string): FileData {\n const lines = typeof content === \"string\" ? content.split(\"\\n\") : content;\n const now = new Date().toISOString();\n\n return {\n content: lines,\n created_at: createdAt || now,\n modified_at: now,\n };\n}\n\n/**\n * Update FileData with new content, preserving creation timestamp.\n */\nexport function updateFileData(fileData: FileData, content: string): FileData {\n const lines = typeof content === \"string\" ? content.split(\"\\n\") : content;\n const now = new Date().toISOString();\n\n return {\n content: lines,\n created_at: fileData.created_at,\n modified_at: now,\n };\n}\n\n/**\n * Format file data for read response with line numbers.\n */\nexport function formatReadResponse(\n fileData: FileData,\n offset: number,\n limit: number\n): string {\n const content = fileDataToString(fileData);\n const emptyMsg = checkEmptyContent(content);\n if (emptyMsg) {\n return emptyMsg;\n }\n\n const lines = content.split(\"\\n\");\n const startIdx = offset;\n const endIdx = Math.min(startIdx + limit, lines.length);\n\n if (startIdx >= lines.length) {\n return `Error: Line offset ${offset} exceeds file length (${lines.length} lines)`;\n }\n\n const selectedLines = lines.slice(startIdx, endIdx);\n return formatContentWithLineNumbers(selectedLines, startIdx + 1);\n}\n\n/**\n * Perform string replacement with occurrence validation.\n */\nexport function performStringReplacement(\n content: string,\n oldString: string,\n newString: string,\n replaceAll: boolean\n): [string, number] | string {\n const occurrences = content.split(oldString).length - 1;\n\n if (occurrences === 0) {\n return `Error: String not found in file: '${oldString}'`;\n }\n\n if (occurrences > 1 && !replaceAll) {\n return `Error: String '${oldString}' appears ${occurrences} times in file. Use replace_all=true to replace all instances, or provide a more specific string with surrounding context.`;\n }\n\n const newContent = content.split(oldString).join(newString);\n return [newContent, occurrences];\n}\n\n/**\n * Validate and normalize a path.\n */\nexport function validatePath(path: string | null | undefined): string {\n const pathStr = path || \"/\";\n if (!pathStr || pathStr.trim() === \"\") {\n throw new Error(\"Path cannot be empty\");\n }\n\n let normalized = pathStr.startsWith(\"/\") ? pathStr : \"/\" + pathStr;\n\n if (!normalized.endsWith(\"/\")) {\n normalized += \"/\";\n }\n\n return normalized;\n}\n\n/**\n * Search files dict for paths matching glob pattern.\n */\nexport function globSearchFiles(\n files: Record<string, FileData>,\n pattern: string,\n path: string = \"/\"\n): string {\n let normalizedPath: string;\n try {\n normalizedPath = validatePath(path);\n } catch {\n return \"No files found\";\n }\n\n const filtered = Object.fromEntries(\n Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath))\n );\n\n const matches: Array<[string, string]> = [];\n for (const [filePath, fileData] of Object.entries(filtered)) {\n let relative = filePath.substring(normalizedPath.length);\n if (relative.startsWith(\"/\")) {\n relative = relative.substring(1);\n }\n if (!relative) {\n const parts = filePath.split(\"/\");\n relative = parts[parts.length - 1] || \"\";\n }\n\n if (\n micromatch.isMatch(relative, pattern, {\n dot: true,\n nobrace: false,\n })\n ) {\n matches.push([filePath, fileData.modified_at]);\n }\n }\n\n matches.sort((a, b) => b[1].localeCompare(a[1]));\n\n if (matches.length === 0) {\n return \"No files found\";\n }\n\n return matches.map(([fp]) => fp).join(\"\\n\");\n}\n\n/**\n * Return structured grep matches from an in-memory files mapping.\n */\nexport function grepMatchesFromFiles(\n files: Record<string, FileData>,\n pattern: string,\n path: string | null = null,\n glob: string | null = null\n): GrepMatch[] | string {\n let regex: RegExp;\n try {\n regex = new RegExp(pattern);\n } catch (e: unknown) {\n const error = e as Error;\n return INVALID_REGEX(error.message);\n }\n\n let normalizedPath: string;\n try {\n normalizedPath = validatePath(path);\n } catch {\n return [];\n }\n\n let filtered = Object.fromEntries(\n Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath))\n );\n\n if (glob) {\n filtered = Object.fromEntries(\n Object.entries(filtered).filter(([fp]) =>\n micromatch.isMatch(basename(fp), glob, { dot: true, nobrace: false })\n )\n );\n }\n\n const matches: GrepMatch[] = [];\n for (const [filePath, fileData] of Object.entries(filtered)) {\n for (let i = 0; i < fileData.content.length; i++) {\n const line = fileData.content[i];\n const lineNum = i + 1;\n if (line && regex.test(line)) {\n matches.push({ path: filePath, line: lineNum, text: line });\n }\n }\n }\n\n return matches;\n}\n\n","/**\n * StateBackend: Store files in shared state (ephemeral, in-memory).\n */\n\nimport type {\n BackendProtocol,\n EditResult,\n FileData,\n FileInfo,\n GrepMatch,\n WriteResult,\n DeepAgentState,\n} from \"../types\";\nimport {\n createFileData,\n fileDataToString,\n formatReadResponse,\n globSearchFiles,\n grepMatchesFromFiles,\n performStringReplacement,\n updateFileData,\n} from \"./utils\";\nimport {\n FILE_NOT_FOUND,\n FILE_ALREADY_EXISTS,\n} from \"../constants/errors\";\n\n/**\n * Backend that stores files in shared state (ephemeral).\n *\n * Files persist within a single agent invocation but not across invocations.\n * This is the default backend for Deep Agent when no backend is specified.\n *\n * Files are stored in memory as part of the `DeepAgentState`, making this backend\n * fast but non-persistent. Use `FilesystemBackend` or `PersistentBackend` for\n * cross-session persistence.\n *\n * @example Default usage (no backend specified)\n * ```typescript\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * // StateBackend is used by default\n * });\n * ```\n *\n * @example Explicit usage\n * ```typescript\n * const state: DeepAgentState = { todos: [], files: {} };\n * const backend = new StateBackend(state);\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend,\n * });\n * ```\n */\nexport class StateBackend implements BackendProtocol {\n private state: DeepAgentState;\n\n /**\n * Create a new StateBackend instance.\n *\n * @param state - The DeepAgentState object that will store the files.\n * Files are stored in `state.files` as a Record<string, FileData>.\n */\n constructor(state: DeepAgentState) {\n this.state = state;\n }\n\n /**\n * Get files from current state.\n */\n private getFiles(): Record<string, FileData> {\n return this.state.files || {};\n }\n\n /**\n * List files and directories in the specified directory (non-recursive).\n */\n lsInfo(path: string): FileInfo[] {\n const files = this.getFiles();\n const infos: FileInfo[] = [];\n const subdirs = new Set<string>();\n\n const normalizedPath = path.endsWith(\"/\") ? path : path + \"/\";\n\n for (const [k, fd] of Object.entries(files)) {\n if (!k.startsWith(normalizedPath)) {\n continue;\n }\n\n const relative = k.substring(normalizedPath.length);\n\n if (relative.includes(\"/\")) {\n const subdirName = relative.split(\"/\")[0];\n subdirs.add(normalizedPath + subdirName + \"/\");\n continue;\n }\n\n const size = fd.content.join(\"\\n\").length;\n infos.push({\n path: k,\n is_dir: false,\n size: size,\n modified_at: fd.modified_at,\n });\n }\n\n for (const subdir of Array.from(subdirs).sort()) {\n infos.push({\n path: subdir,\n is_dir: true,\n size: 0,\n modified_at: \"\",\n });\n }\n\n infos.sort((a, b) => a.path.localeCompare(b.path));\n return infos;\n }\n\n /**\n * Read file content with line numbers.\n */\n read(filePath: string, offset: number = 0, limit: number = 2000): string {\n const files = this.getFiles();\n const fileData = files[filePath];\n\n if (!fileData) {\n return FILE_NOT_FOUND(filePath);\n }\n\n return formatReadResponse(fileData, offset, limit);\n }\n\n /**\n * Read file content as raw FileData.\n */\n readRaw(filePath: string): FileData {\n const files = this.getFiles();\n const fileData = files[filePath];\n\n if (!fileData) throw new Error(`File '${filePath}' not found`);\n return fileData;\n }\n\n /**\n * Create a new file with content.\n */\n write(filePath: string, content: string): WriteResult {\n const files = this.getFiles();\n\n // Validate file path\n if (!filePath || filePath.trim() === \"\") {\n return {\n success: false,\n error: \"File path cannot be empty\",\n };\n }\n\n if (filePath in files) {\n return {\n success: false,\n error: FILE_ALREADY_EXISTS(filePath),\n };\n }\n\n const newFileData = createFileData(content);\n this.state.files[filePath] = newFileData;\n return { success: true, path: filePath };\n }\n\n /**\n * Edit a file by replacing string occurrences.\n */\n edit(\n filePath: string,\n oldString: string,\n newString: string,\n replaceAll: boolean = false\n ): EditResult {\n const files = this.getFiles();\n const fileData = files[filePath];\n\n if (!fileData) {\n return { success: false, error: FILE_NOT_FOUND(filePath) };\n }\n\n const content = fileDataToString(fileData);\n const result = performStringReplacement(\n content,\n oldString,\n newString,\n replaceAll\n );\n\n if (typeof result === \"string\") {\n return { success: false, error: result };\n }\n\n const [newContent, occurrences] = result;\n const newFileData = updateFileData(fileData, newContent);\n this.state.files[filePath] = newFileData;\n return { success: true, path: filePath, occurrences };\n }\n\n /**\n * Structured search results or error string for invalid input.\n */\n grepRaw(\n pattern: string,\n path: string = \"/\",\n glob: string | null = null\n ): GrepMatch[] | string {\n const files = this.getFiles();\n return grepMatchesFromFiles(files, pattern, path, glob);\n }\n\n /**\n * Structured glob matching returning FileInfo objects.\n */\n globInfo(pattern: string, path: string = \"/\"): FileInfo[] {\n const files = this.getFiles();\n const result = globSearchFiles(files, pattern, path);\n\n if (result === \"No files found\") {\n return [];\n }\n\n const paths = result.split(\"\\n\");\n const infos: FileInfo[] = [];\n for (const p of paths) {\n const fd = files[p];\n const size = fd ? fd.content.join(\"\\n\").length : 0;\n infos.push({\n path: p,\n is_dir: false,\n size: size,\n modified_at: fd?.modified_at || \"\",\n });\n }\n return infos;\n }\n}\n\n","/**\n * Tool result eviction utility.\n *\n * When tool results exceed a certain size threshold, this utility\n * writes them to the filesystem and returns a reference instead.\n * This prevents context overflow from large tool outputs.\n */\n\nimport type { BackendProtocol, BackendFactory, DeepAgentState } from \"../types\";\nimport { DEFAULT_EVICTION_TOKEN_LIMIT as CENTRALIZED_EVICTION_LIMIT } from \"../constants/limits\";\n\n/**\n * Default token limit before evicting a tool result.\n * Approximately 20,000 tokens (~80KB of text).\n */\nexport const DEFAULT_EVICTION_TOKEN_LIMIT = CENTRALIZED_EVICTION_LIMIT;\n\n/**\n * Approximate characters per token (rough estimate).\n */\nconst CHARS_PER_TOKEN = 4;\n\n/**\n * Sanitize a tool call ID for use as a filename.\n * Removes or replaces characters that are invalid in file paths.\n */\nexport function sanitizeToolCallId(toolCallId: string): string {\n return toolCallId\n .replace(/[^a-zA-Z0-9_-]/g, \"_\")\n .substring(0, 100); // Limit length\n}\n\n/**\n * Estimate the number of tokens in a string.\n * Uses a simple character-based approximation.\n */\nexport function estimateTokens(text: string): number {\n return Math.ceil(text.length / CHARS_PER_TOKEN);\n}\n\n/**\n * Check if a tool result should be evicted based on size.\n */\nexport function shouldEvict(\n result: string,\n tokenLimit: number = DEFAULT_EVICTION_TOKEN_LIMIT\n): boolean {\n return estimateTokens(result) > tokenLimit;\n}\n\n/**\n * Options for evicting a tool result.\n */\nexport interface EvictOptions {\n /** The tool result content */\n result: string;\n /** The tool call ID (used for filename) */\n toolCallId: string;\n /** The tool name */\n toolName: string;\n /** Backend to write the evicted content to */\n backend: BackendProtocol;\n /** Token limit before eviction (default: 20000) */\n tokenLimit?: number;\n}\n\n/**\n * Result of an eviction operation.\n */\nexport interface EvictResult {\n /** Whether the result was evicted */\n evicted: boolean;\n /** The content to return (either original or truncated message) */\n content: string;\n /** Path where content was evicted to (if evicted) */\n evictedPath?: string;\n}\n\n/**\n * Evict a large tool result to the filesystem.\n *\n * If the result exceeds the token limit, writes it to a file and\n * returns a truncated message with the file path.\n *\n * @param options - Eviction options\n * @returns Eviction result with content and metadata\n *\n * @example\n * ```typescript\n * const result = await evictToolResult({\n * result: veryLongString,\n * toolCallId: \"call_123\",\n * toolName: \"grep\",\n * backend: filesystemBackend,\n * });\n *\n * if (result.evicted) {\n * console.log(`Content saved to ${result.evictedPath}`);\n * }\n * ```\n */\nexport async function evictToolResult(\n options: EvictOptions\n): Promise<EvictResult> {\n const {\n result,\n toolCallId,\n toolName,\n backend,\n tokenLimit = DEFAULT_EVICTION_TOKEN_LIMIT,\n } = options;\n\n // Check if eviction is needed\n if (!shouldEvict(result, tokenLimit)) {\n return {\n evicted: false,\n content: result,\n };\n }\n\n // Generate eviction path\n const sanitizedId = sanitizeToolCallId(toolCallId);\n const evictPath = `/large_tool_results/${toolName}_${sanitizedId}.txt`;\n\n // Write to backend\n const writeResult = await backend.write(evictPath, result);\n\n if (writeResult.error) {\n // If write fails, return original content (may cause context issues)\n console.warn(`Failed to evict tool result: ${writeResult.error}`);\n return {\n evicted: false,\n content: result,\n };\n }\n\n // Return truncated message\n const estimatedTokens = estimateTokens(result);\n const truncatedContent = `Tool result too large (~${estimatedTokens} tokens). Content saved to ${evictPath}. Use read_file to access the full content.`;\n\n return {\n evicted: true,\n content: truncatedContent,\n evictedPath: evictPath,\n };\n}\n\n/**\n * Create a tool result wrapper that automatically evicts large results.\n *\n * @param backend - Backend or factory for filesystem operations\n * @param state - Current agent state (for factory backends)\n * @param tokenLimit - Token limit before eviction\n * @returns Function that wraps tool results with eviction\n */\nexport function createToolResultWrapper(\n backend: BackendProtocol | BackendFactory,\n state: DeepAgentState,\n tokenLimit: number = DEFAULT_EVICTION_TOKEN_LIMIT\n): (result: string, toolCallId: string, toolName: string) => Promise<string> {\n // Resolve backend if factory\n const resolvedBackend =\n typeof backend === \"function\" ? backend(state) : backend;\n\n return async (\n result: string,\n toolCallId: string,\n toolName: string\n ): Promise<string> => {\n const evictResult = await evictToolResult({\n result,\n toolCallId,\n toolName,\n backend: resolvedBackend,\n tokenLimit,\n });\n\n return evictResult.content;\n };\n}\n\n","/**\n * Filesystem tools for virtual file operations.\n */\n\nimport { tool } from \"ai\";\nimport { z } from \"zod\";\nimport type {\n BackendProtocol,\n DeepAgentState,\n BackendFactory,\n EventCallback,\n} from \"../types\";\nimport { StateBackend } from \"../backends/state\";\nimport {\n evictToolResult,\n DEFAULT_EVICTION_TOKEN_LIMIT,\n} from \"../utils/eviction\";\nimport {\n createFileReadEvent,\n createFileWriteStartEvent,\n createFileWrittenEvent,\n createFileEditedEvent,\n} from \"../utils/events\";\n\n// Tool descriptions\nconst LS_TOOL_DESCRIPTION = \"List files and directories in a directory. Paths are relative to the working directory.\";\nconst READ_FILE_TOOL_DESCRIPTION = \"Read the contents of a file. Paths are relative to the working directory.\";\nconst WRITE_FILE_TOOL_DESCRIPTION =\n \"Write content to a new file. Returns an error if the file already exists. Paths are relative to the working directory.\";\nconst EDIT_FILE_TOOL_DESCRIPTION =\n \"Edit a file by replacing a specific string with a new string. Paths are relative to the working directory.\";\nconst GLOB_TOOL_DESCRIPTION =\n \"Find files matching a glob pattern (e.g., '**/*.py' for all Python files). Paths are relative to the working directory.\";\nconst GREP_TOOL_DESCRIPTION =\n \"Search for a regex pattern in files. Returns matching files and line numbers. Paths are relative to the working directory.\";\n\n/**\n * Resolve backend from factory or instance.\n */\nfunction getBackend(\n backend: BackendProtocol | BackendFactory,\n state: DeepAgentState\n): BackendProtocol {\n if (typeof backend === \"function\") {\n return backend(state);\n }\n return backend;\n}\n\n/**\n * Create the ls tool.\n */\nexport function createLsTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n onEvent?: EventCallback\n) {\n return tool({\n description: LS_TOOL_DESCRIPTION,\n inputSchema: z.object({\n path: z\n .string()\n .default(\".\")\n .describe(\"Directory path to list (default: current directory)\"),\n }),\n execute: async ({ path }) => {\n const resolvedBackend = getBackend(backend, state);\n const infos = await resolvedBackend.lsInfo(path || \".\");\n\n // Emit ls event\n if (onEvent) {\n onEvent({\n type: \"ls\",\n path: path || \".\",\n count: infos.length,\n });\n }\n\n if (infos.length === 0) {\n return `No files found in ${path}`;\n }\n\n const lines: string[] = [];\n for (const info of infos) {\n if (info.is_dir) {\n lines.push(`${info.path} (directory)`);\n } else {\n const size = info.size ? ` (${info.size} bytes)` : \"\";\n lines.push(`${info.path}${size}`);\n }\n }\n return lines.join(\"\\n\");\n },\n });\n}\n\n/**\n * Create the read_file tool.\n */\nexport function createReadFileTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n evictionLimit?: number,\n onEvent?: EventCallback\n) {\n return tool({\n description: READ_FILE_TOOL_DESCRIPTION,\n inputSchema: z.object({\n file_path: z.string().describe(\"Path to the file to read (e.g., 'src/main.ts' or './main.ts')\"),\n offset: z\n .number()\n .default(0)\n .describe(\"Line offset to start reading from (0-indexed)\"),\n limit: z\n .number()\n .default(2000)\n .describe(\"Maximum number of lines to read\"),\n }),\n execute: async ({ file_path, offset, limit }, { toolCallId }) => {\n const resolvedBackend = getBackend(backend, state);\n const content = await resolvedBackend.read(file_path, offset ?? 0, limit ?? 2000);\n \n // Emit file-read event\n if (onEvent) {\n const lineCount = content.split(\"\\n\").length;\n onEvent(createFileReadEvent(file_path, lineCount));\n }\n \n // Evict large results if limit is set\n if (evictionLimit && evictionLimit > 0) {\n const evictResult = await evictToolResult({\n result: content,\n toolCallId: toolCallId || `read_${Date.now()}`,\n toolName: \"read_file\",\n backend: resolvedBackend,\n tokenLimit: evictionLimit,\n });\n return evictResult.content;\n }\n \n return content;\n },\n });\n}\n\n/**\n * Create the write_file tool.\n */\nexport function createWriteFileTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n onEvent?: EventCallback\n) {\n return tool({\n description: WRITE_FILE_TOOL_DESCRIPTION,\n inputSchema: z.object({\n file_path: z.string().describe(\"Path to the file to write (e.g., 'src/main.ts' or './main.ts')\"),\n content: z.string().describe(\"Content to write to the file\"),\n }),\n execute: async ({ file_path, content }) => {\n // Emit file-write-start event for preview\n if (onEvent) {\n onEvent(createFileWriteStartEvent(file_path, content));\n }\n\n const resolvedBackend = getBackend(backend, state);\n const result = await resolvedBackend.write(file_path, content);\n\n if (result.error) {\n return result.error;\n }\n\n // Emit file-written event with content\n if (onEvent) {\n onEvent(createFileWrittenEvent(file_path, content));\n }\n\n return `Successfully wrote to '${file_path}'`;\n },\n });\n}\n\n/**\n * Create the edit_file tool.\n */\nexport function createEditFileTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n onEvent?: EventCallback\n) {\n return tool({\n description: EDIT_FILE_TOOL_DESCRIPTION,\n inputSchema: z.object({\n file_path: z.string().describe(\"Path to the file to edit (e.g., 'src/main.ts' or './main.ts')\"),\n old_string: z\n .string()\n .describe(\"String to be replaced (must match exactly)\"),\n new_string: z.string().describe(\"String to replace with\"),\n replace_all: z\n .boolean()\n .default(false)\n .describe(\"Whether to replace all occurrences\"),\n }),\n execute: async ({ file_path, old_string, new_string, replace_all }) => {\n const resolvedBackend = getBackend(backend, state);\n const result = await resolvedBackend.edit(\n file_path,\n old_string,\n new_string,\n replace_all ?? false\n );\n\n if (result.error) {\n return result.error;\n }\n\n // Emit event if callback provided\n if (onEvent) {\n onEvent(createFileEditedEvent(file_path, result.occurrences ?? 0));\n }\n\n return `Successfully replaced ${result.occurrences} occurrence(s) in '${file_path}'`;\n },\n });\n}\n\n/**\n * Create the glob tool.\n */\nexport function createGlobTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n onEvent?: EventCallback\n) {\n return tool({\n description: GLOB_TOOL_DESCRIPTION,\n inputSchema: z.object({\n pattern: z.string().describe(\"Glob pattern (e.g., '*.py', '**/*.ts')\"),\n path: z\n .string()\n .default(\".\")\n .describe(\"Base path to search from (default: current directory)\"),\n }),\n execute: async ({ pattern, path }) => {\n const resolvedBackend = getBackend(backend, state);\n const infos = await resolvedBackend.globInfo(pattern, path || \".\");\n\n // Emit glob event\n if (onEvent) {\n onEvent({\n type: \"glob\",\n pattern,\n count: infos.length,\n });\n }\n\n if (infos.length === 0) {\n return `No files found matching pattern '${pattern}'`;\n }\n\n return infos.map((info) => info.path).join(\"\\n\");\n },\n });\n}\n\n/**\n * Create the grep tool.\n */\nexport function createGrepTool(\n state: DeepAgentState,\n backend: BackendProtocol | BackendFactory,\n evictionLimit?: number,\n onEvent?: EventCallback\n) {\n return tool({\n description: GREP_TOOL_DESCRIPTION,\n inputSchema: z.object({\n pattern: z.string().describe(\"Regex pattern to search for\"),\n path: z\n .string()\n .default(\".\")\n .describe(\"Base path to search from (default: current directory)\"),\n glob: z\n .string()\n .optional()\n .nullable()\n .describe(\"Optional glob pattern to filter files (e.g., '*.py')\"),\n }),\n execute: async ({ pattern, path, glob }, { toolCallId }) => {\n const resolvedBackend = getBackend(backend, state);\n const result = await resolvedBackend.grepRaw(\n pattern,\n path || \".\",\n glob ?? null\n );\n\n if (typeof result === \"string\") {\n // Emit grep event even for string results (errors)\n if (onEvent) {\n onEvent({\n type: \"grep\",\n pattern,\n count: 0,\n });\n }\n return result;\n }\n\n // Emit grep event\n if (onEvent) {\n onEvent({\n type: \"grep\",\n pattern,\n count: result.length,\n });\n }\n\n if (result.length === 0) {\n return `No matches found for pattern '${pattern}'`;\n }\n\n // Format output: group by file\n const lines: string[] = [];\n let currentFile: string | null = null;\n for (const match of result) {\n if (match.path !== currentFile) {\n currentFile = match.path;\n lines.push(`\\n${currentFile}:`);\n }\n lines.push(` ${match.line}: ${match.text}`);\n }\n\n const content = lines.join(\"\\n\");\n \n // Evict large results if limit is set\n if (evictionLimit && evictionLimit > 0) {\n const evictResult = await evictToolResult({\n result: content,\n toolCallId: toolCallId || `grep_${Date.now()}`,\n toolName: \"grep\",\n backend: resolvedBackend,\n tokenLimit: evictionLimit,\n });\n return evictResult.content;\n }\n\n return content;\n },\n });\n}\n\n/**\n * Options for creating filesystem tools.\n */\nexport interface CreateFilesystemToolsOptions {\n /** Backend for filesystem operations */\n backend?: BackendProtocol | BackendFactory;\n /** Callback for emitting events */\n onEvent?: EventCallback;\n /** Token limit before evicting large tool results (default: disabled) */\n toolResultEvictionLimit?: number;\n}\n\n/**\n * Create all filesystem tools.\n * @param state - The shared agent state\n * @param backendOrOptions - Backend or options object\n * @param onEvent - Optional callback for emitting events (deprecated, use options)\n */\nexport function createFilesystemTools(\n state: DeepAgentState,\n backendOrOptions?: BackendProtocol | BackendFactory | CreateFilesystemToolsOptions,\n onEvent?: EventCallback\n) {\n // Handle both old and new API\n let backend: BackendProtocol | BackendFactory | undefined;\n let eventCallback: EventCallback | undefined = onEvent;\n let evictionLimit: number | undefined;\n\n if (backendOrOptions && typeof backendOrOptions === \"object\" && \"backend\" in backendOrOptions) {\n // New options API\n const options = backendOrOptions as CreateFilesystemToolsOptions;\n backend = options.backend;\n eventCallback = options.onEvent;\n evictionLimit = options.toolResultEvictionLimit;\n } else {\n // Old API (backend directly)\n backend = backendOrOptions as BackendProtocol | BackendFactory | undefined;\n }\n\n // Default to StateBackend if no backend provided\n const resolvedBackend =\n backend || ((s: DeepAgentState) => new StateBackend(s));\n\n return {\n ls: createLsTool(state, resolvedBackend, eventCallback),\n read_file: createReadFileTool(state, resolvedBackend, evictionLimit, eventCallback),\n write_file: createWriteFileTool(state, resolvedBackend, eventCallback),\n edit_file: createEditFileTool(state, resolvedBackend, eventCallback),\n glob: createGlobTool(state, resolvedBackend, eventCallback),\n grep: createGrepTool(state, resolvedBackend, evictionLimit, eventCallback),\n };\n}\n\n// ============================================================================\n// Individual Tool References\n// ============================================================================\n\n/**\n * Individual builtin tool references for selective subagent configuration.\n * These are references to the creator functions, not instances.\n */\nexport const ls = createLsTool;\nexport const read_file = createReadFileTool;\nexport const write_file = createWriteFileTool;\nexport const edit_file = createEditFileTool;\nexport const glob = createGlobTool;\nexport const grep = createGrepTool;\n","/**\n * Utilities for applying tool approval configuration.\n */\n\nimport { tool, type ToolSet } from \"ai\";\nimport type { InterruptOnConfig, DynamicApprovalConfig } from \"../types\";\n\n/**\n * Callback type for requesting approval from the user.\n */\nexport type ApprovalCallback = (request: {\n approvalId: string;\n toolCallId: string;\n toolName: string;\n args: unknown;\n}) => Promise<boolean>;\n\n/**\n * Check if approval is needed based on config.\n */\nasync function checkNeedsApproval(\n config: boolean | DynamicApprovalConfig,\n args: unknown\n): Promise<boolean> {\n if (typeof config === \"boolean\") {\n return config;\n }\n \n if (config.shouldApprove) {\n return config.shouldApprove(args);\n }\n \n return true;\n}\n\n/**\n * Convert interruptOn config to needsApproval function for a tool.\n */\nfunction configToNeedsApproval(\n config: boolean | DynamicApprovalConfig\n): boolean | ((args: unknown) => boolean | Promise<boolean>) {\n if (typeof config === \"boolean\") {\n return config;\n }\n \n if (config.shouldApprove) {\n return config.shouldApprove;\n }\n \n return true;\n}\n\nlet approvalCounter = 0;\nfunction generateApprovalId(): string {\n return `approval-${Date.now()}-${++approvalCounter}`;\n}\n\n/**\n * Apply interruptOn configuration to a toolset.\n * \n * This adds the `needsApproval` property to tools based on the config.\n * \n * @param tools - The original toolset\n * @param interruptOn - Configuration mapping tool names to approval settings\n * @returns New toolset with needsApproval applied\n * \n * @example\n * ```typescript\n * const approvedTools = applyInterruptConfig(tools, {\n * write_file: true,\n * execute: { shouldApprove: (args) => args.command.includes('rm') },\n * });\n * ```\n */\nexport function applyInterruptConfig(\n tools: ToolSet,\n interruptOn?: InterruptOnConfig\n): ToolSet {\n if (!interruptOn) {\n return tools;\n }\n\n const result: ToolSet = {};\n\n for (const [name, tool] of Object.entries(tools)) {\n const config = interruptOn[name];\n \n if (config === undefined || config === false) {\n // No approval needed - use tool as-is\n result[name] = tool;\n } else {\n // Apply needsApproval\n result[name] = {\n ...tool,\n needsApproval: configToNeedsApproval(config),\n };\n }\n }\n\n return result;\n}\n\n/**\n * Wrap tools with approval checking that intercepts execution.\n *\n * Unlike applyInterruptConfig which just sets needsApproval metadata,\n * this actually wraps the execute function to request approval before running.\n *\n * If no approval callback is provided, tools requiring approval will be auto-denied.\n *\n * @param tools - The original toolset\n * @param interruptOn - Configuration mapping tool names to approval settings\n * @param onApprovalRequest - Callback to request approval from user (optional)\n * @returns New toolset with wrapped execute functions\n */\nexport function wrapToolsWithApproval(\n tools: ToolSet,\n interruptOn: InterruptOnConfig | undefined,\n onApprovalRequest: ApprovalCallback | undefined\n): ToolSet {\n if (!interruptOn) {\n return tools;\n }\n\n const result: ToolSet = {};\n\n for (const [name, existingTool] of Object.entries(tools)) {\n const config = interruptOn[name];\n\n if (config === undefined || config === false) {\n // No approval needed - use tool as-is\n result[name] = existingTool;\n } else {\n // Wrap the execute function with approval check\n const originalExecute = existingTool.execute;\n if (!originalExecute) {\n // Tool has no execute function - skip wrapping\n result[name] = existingTool;\n continue;\n }\n\n // Create a completely new tool using the AI SDK tool() function\n // This ensures proper integration with AI SDK's execution mechanism\n result[name] = tool({\n description: existingTool.description,\n inputSchema: existingTool.inputSchema,\n execute: async (args, options) => {\n // Check if this specific call needs approval\n const needsApproval = await checkNeedsApproval(config, args);\n\n if (needsApproval) {\n // If no callback provided, auto-deny\n if (!onApprovalRequest) {\n return `Tool execution denied. No approval callback provided. The ${name} tool was not executed.`;\n }\n\n // Generate unique IDs for this approval request\n const approvalId = generateApprovalId();\n const toolCallId = options?.toolCallId || approvalId;\n\n // Request approval from user\n const approved = await onApprovalRequest({\n approvalId,\n toolCallId,\n toolName: name,\n args,\n });\n\n if (!approved) {\n // User denied - return an error message instead of executing\n return `Tool execution denied by user. The ${name} tool was not executed.`;\n }\n }\n\n // Approved or no approval needed - execute the tool\n return originalExecute(args, options);\n },\n });\n }\n }\n\n return result;\n}\n\n/**\n * Check if a toolset has any tools requiring approval.\n */\nexport function hasApprovalTools(interruptOn?: InterruptOnConfig): boolean {\n if (!interruptOn) return false;\n return Object.values(interruptOn).some((v) => v !== false);\n}\n\n/**\n * Create interrupt data for checkpoint when approval is requested.\n * \n * This is used to save checkpoint state when a tool requires approval,\n * allowing the agent to resume from the interrupt point later.\n */\nexport function createInterruptData(\n toolCallId: string,\n toolName: string,\n args: unknown,\n step: number\n): import(\"../checkpointer/types\").InterruptData {\n return {\n toolCall: {\n toolCallId,\n toolName,\n args,\n },\n step,\n };\n}\n","/**\n * Web tools for search and HTTP requests.\n * Based on LangChain DeepAgents implementation.\n */\n\nimport { tool } from \"ai\";\nimport { z } from \"zod\";\nimport { tavily } from \"@tavily/core\";\nimport TurndownService from \"turndown\";\nimport { Readability } from \"@mozilla/readability\";\nimport { JSDOM } from \"jsdom\";\nimport type {\n BackendProtocol,\n BackendFactory,\n DeepAgentState,\n EventCallback,\n} from \"../types\";\nimport { evictToolResult } from \"../utils/eviction\";\nimport {\n WEB_SEARCH_ERROR,\n REQUEST_TIMEOUT,\n} from \"../constants/errors\";\nimport { DEFAULT_TIMEOUT_SECONDS } from \"../constants/limits\";\nimport {\n createWebSearchStartEvent,\n createWebSearchFinishEvent,\n createHttpRequestStartEvent,\n createHttpRequestFinishEvent,\n createFetchUrlStartEvent,\n createFetchUrlFinishEvent,\n} from \"../utils/events\";\n\n// ============================================================================\n// Helper Functions\n// ============================================================================\n\n/**\n * Helper to resolve backend from factory or instance.\n */\nfunction getBackend(\n backend: BackendProtocol | BackendFactory | undefined,\n state: DeepAgentState\n): BackendProtocol | null {\n if (!backend) return null;\n if (typeof backend === \"function\") {\n return backend(state);\n }\n return backend;\n}\n\n// ============================================================================\n// HTML to Markdown Utilities\n// ============================================================================\n\n/**\n * Convert HTML to Markdown with article extraction.\n * Uses Mozilla Readability to extract main content, then converts to Markdown.\n */\nfunction htmlToMarkdown(html: string, url: string): string {\n try {\n // Parse HTML with JSDOM\n const dom = new JSDOM(html, { url });\n\n // Extract article content with Readability\n const reader = new Readability(dom.window.document);\n const article = reader.parse();\n\n if (!article) {\n // If Readability fails, fall back to body content\n const bodyContent = dom.window.document.body?.textContent || \"\";\n return bodyContent.trim();\n }\n\n // Convert extracted HTML to Markdown\n const turndown = new TurndownService({\n headingStyle: \"atx\",\n codeBlockStyle: \"fenced\",\n });\n\n const markdown = turndown.turndown(article.content || \"\");\n\n // Prepend title if available\n if (article.title) {\n return `# ${article.title}\\n\\n${markdown}`;\n }\n\n return markdown;\n } catch (error) {\n // On error, return error message\n return `Error converting HTML to Markdown: ${error instanceof Error ? error.message : String(error)}`;\n }\n}\n\n// ============================================================================\n// Tool Implementations\n// ============================================================================\n\n/**\n * Tool description for web_search.\n */\nconst WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.\n\nReturns an array of search results with titles, URLs, relevant excerpts, and relevance scores.\n\nIMPORTANT AGENT INSTRUCTIONS:\n- You MUST synthesize information from search results into a coherent answer\n- NEVER show raw JSON or result objects to the user\n- Cite sources by including URLs in your response\n- If search fails or returns no results, explain this clearly to the user`;\n\n/**\n * Create the web_search tool.\n */\nexport function createWebSearchTool(\n state: DeepAgentState,\n options: {\n backend?: BackendProtocol | BackendFactory;\n onEvent?: EventCallback;\n toolResultEvictionLimit?: number;\n tavilyApiKey: string;\n }\n) {\n const { backend, onEvent, toolResultEvictionLimit, tavilyApiKey } = options;\n\n return tool({\n description: WEB_SEARCH_TOOL_DESCRIPTION,\n inputSchema: z.object({\n query: z.string().describe(\n \"The search query (be specific and detailed for best results)\"\n ),\n max_results: z\n .number()\n .default(5)\n .describe(\"Number of results to return (1-20)\"),\n topic: z\n .enum([\"general\", \"news\", \"finance\"])\n .default(\"general\")\n .describe(\"Search topic category\"),\n include_raw_content: z\n .boolean()\n .default(false)\n .describe(\"Include full page content (warning: uses more tokens)\"),\n }),\n execute: async ({ query, max_results, topic, include_raw_content }, { toolCallId }) => {\n // Emit start event\n if (onEvent) {\n onEvent(createWebSearchStartEvent(query));\n }\n\n try {\n // Initialize Tavily client\n const tvly = tavily({ apiKey: tavilyApiKey });\n\n // Perform search\n const response = await tvly.search(query, {\n maxResults: max_results,\n topic,\n includeRawContent: include_raw_content ? \"text\" : false,\n });\n\n // Format results\n const results = response.results || [];\n const formattedResults = results\n .map(\n (r: any, i: number) =>\n `## Result ${i + 1}: ${r.title}\\n` +\n `URL: ${r.url}\\n` +\n `Score: ${r.score?.toFixed(2) || \"N/A\"}\\n` +\n `Content: ${r.content}\\n`\n )\n .join(\"\\n---\\n\\n\");\n\n const output = `Found ${results.length} results for query: \"${query}\"\\n\\n${formattedResults}`;\n\n // Emit finish event\n if (onEvent) {\n onEvent(createWebSearchFinishEvent(query, results.length));\n }\n\n // Evict if needed\n if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {\n const resolvedBackend = getBackend(backend, state);\n if (resolvedBackend) {\n const evictResult = await evictToolResult({\n result: output,\n toolCallId: toolCallId || `web_search_${Date.now()}`,\n toolName: \"web_search\",\n backend: resolvedBackend,\n tokenLimit: toolResultEvictionLimit,\n });\n return evictResult.content;\n }\n }\n\n return output;\n } catch (error: unknown) {\n const err = error as Error;\n const errorMessage = WEB_SEARCH_ERROR(err.message);\n\n // Emit finish event with 0 results (error case)\n if (onEvent) {\n onEvent(createWebSearchFinishEvent(query, 0));\n }\n\n return errorMessage;\n }\n },\n });\n}\n\n/**\n * Tool description for http_request.\n */\nconst HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.\n\nSupports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.\n\nReturns structured response with status code, headers, and parsed content (JSON or text).`;\n\n/**\n * Create the http_request tool.\n */\nexport function createHttpRequestTool(\n state: DeepAgentState,\n options: {\n backend?: BackendProtocol | BackendFactory;\n onEvent?: EventCallback;\n toolResultEvictionLimit?: number;\n defaultTimeout: number;\n }\n) {\n const { backend, onEvent, toolResultEvictionLimit, defaultTimeout } = options;\n\n return tool({\n description: HTTP_REQUEST_TOOL_DESCRIPTION,\n inputSchema: z.object({\n url: z.string().url().describe(\"Target URL (must be valid HTTP/HTTPS URL)\"),\n method: z\n .enum([\"GET\", \"POST\", \"PUT\", \"DELETE\", \"PATCH\"])\n .default(\"GET\")\n .describe(\"HTTP method\"),\n headers: z\n .record(z.string())\n .optional()\n .describe(\"HTTP headers as key-value pairs\"),\n body: z\n .union([z.string(), z.record(z.any())])\n .optional()\n .describe(\"Request body (string or JSON object)\"),\n params: z\n .record(z.string())\n .optional()\n .describe(\"URL query parameters as key-value pairs\"),\n timeout: z\n .number()\n .default(defaultTimeout)\n .describe(\"Request timeout in seconds\"),\n }),\n execute: async ({ url, method, headers, body, params, timeout }, { toolCallId }) => {\n // Emit start event\n if (onEvent) {\n onEvent(createHttpRequestStartEvent(url, method));\n }\n\n try {\n // Build URL with query params\n const urlObj = new URL(url);\n if (params) {\n Object.entries(params).forEach(([key, value]) => {\n urlObj.searchParams.append(key, value);\n });\n }\n\n // Build request options\n const requestOptions: RequestInit = {\n method,\n headers: headers || {},\n signal: AbortSignal.timeout(timeout * 1000),\n };\n\n // Add body if provided\n if (body) {\n if (typeof body === \"string\") {\n requestOptions.body = body;\n } else {\n requestOptions.body = JSON.stringify(body);\n (requestOptions.headers as Record<string, string>)[\"Content-Type\"] =\n \"application/json\";\n }\n }\n\n // Execute request\n const response = await fetch(urlObj.toString(), requestOptions);\n\n // Parse response\n const contentType = response.headers.get(\"content-type\") || \"\";\n let content: any;\n\n if (contentType.includes(\"application/json\")) {\n try {\n content = await response.json();\n } catch {\n content = await response.text();\n }\n } else {\n content = await response.text();\n }\n\n // Format response\n const formattedOutput =\n `HTTP ${method} ${url}\\n` +\n `Status: ${response.status}\\n` +\n `Success: ${response.ok}\\n` +\n `Content:\\n${typeof content === \"string\" ? content : JSON.stringify(content, null, 2)}`;\n\n // Emit finish event\n if (onEvent) {\n onEvent(createHttpRequestFinishEvent(response.url, response.status));\n }\n\n // Evict if needed\n if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {\n const resolvedBackend = getBackend(backend, state);\n if (resolvedBackend) {\n const evictResult = await evictToolResult({\n result: formattedOutput,\n toolCallId: toolCallId || `http_request_${Date.now()}`,\n toolName: \"http_request\",\n backend: resolvedBackend,\n tokenLimit: toolResultEvictionLimit,\n });\n return evictResult.content;\n }\n }\n\n return formattedOutput;\n } catch (error: unknown) {\n const err = error as Error;\n let errorMessage: string;\n\n if (err.name === \"TimeoutError\" || err.name === \"AbortError\") {\n errorMessage = REQUEST_TIMEOUT(timeout);\n } else {\n errorMessage = `HTTP request error: ${err.message}`;\n }\n\n // Emit finish event with error status\n if (onEvent) {\n onEvent(createHttpRequestFinishEvent(url, 0));\n }\n\n return errorMessage;\n }\n },\n });\n}\n\n/**\n * Tool description for fetch_url.\n */\nconst FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.\n\nUses Mozilla Readability to extract main article content and Turndown to convert to Markdown.\n\nReturns the page content as formatted Markdown, suitable for analysis and summarization.\n\nIMPORTANT AGENT INSTRUCTIONS:\n- Use this tool to read documentation, articles, and web pages\n- The content is already cleaned and formatted as Markdown\n- Cite the URL when referencing fetched content`;\n\n/**\n * Create the fetch_url tool.\n */\nexport function createFetchUrlTool(\n state: DeepAgentState,\n options: {\n backend?: BackendProtocol | BackendFactory;\n onEvent?: EventCallback;\n toolResultEvictionLimit?: number;\n defaultTimeout: number;\n }\n) {\n const { backend, onEvent, toolResultEvictionLimit, defaultTimeout } = options;\n\n return tool({\n description: FETCH_URL_TOOL_DESCRIPTION,\n inputSchema: z.object({\n url: z.string().url().describe(\"The URL to fetch (must be valid HTTP/HTTPS URL)\"),\n timeout: z\n .number()\n .default(defaultTimeout)\n .describe(\"Request timeout in seconds\"),\n extract_article: z\n .boolean()\n .default(true)\n .describe(\n \"Extract main article content using Readability (disable for non-article pages)\"\n ),\n }),\n execute: async ({ url, timeout, extract_article }, { toolCallId }) => {\n // Emit start event\n if (onEvent) {\n onEvent(createFetchUrlStartEvent(url));\n }\n\n try {\n // Fetch HTML\n const response = await fetch(url, {\n signal: AbortSignal.timeout(timeout * 1000),\n headers: {\n \"User-Agent\": \"Mozilla/5.0 (compatible; DeepAgents/1.0)\",\n },\n });\n\n if (!response.ok) {\n const errorMsg = `HTTP error: ${response.status} ${response.statusText}`;\n\n if (onEvent) {\n onEvent(createFetchUrlFinishEvent(response.url, false));\n }\n\n return errorMsg;\n }\n\n const html = await response.text();\n\n // Parse DOM\n const dom = new JSDOM(html, { url });\n\n let contentToConvert = html;\n\n // Extract article content if requested\n if (extract_article) {\n try {\n const reader = new Readability(dom.window.document);\n const article = reader.parse();\n\n if (article && article.content) {\n contentToConvert = article.content;\n }\n } catch (readabilityError) {\n // If Readability fails, fall back to full HTML\n console.warn(\"Readability extraction failed, using full HTML\");\n }\n }\n\n // Convert to Markdown\n const turndownService = new TurndownService({\n headingStyle: \"atx\",\n codeBlockStyle: \"fenced\",\n });\n\n const markdown = turndownService.turndown(contentToConvert);\n\n // Emit finish event\n if (onEvent) {\n onEvent(createFetchUrlFinishEvent(response.url, true));\n }\n\n // Evict large content\n if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {\n const resolvedBackend = getBackend(backend, state);\n if (resolvedBackend) {\n const evictResult = await evictToolResult({\n result: markdown,\n toolCallId: toolCallId || `fetch_url_${Date.now()}`,\n toolName: \"fetch_url\",\n backend: resolvedBackend,\n tokenLimit: toolResultEvictionLimit,\n });\n\n return evictResult.content;\n }\n }\n\n return markdown;\n } catch (error: unknown) {\n const err = error as Error;\n let errorMessage: string;\n\n if (err.name === \"TimeoutError\" || err.name === \"AbortError\") {\n errorMessage = REQUEST_TIMEOUT(timeout);\n } else {\n errorMessage = `Error fetching URL: ${err.message}`;\n }\n\n // Emit error finish event\n if (onEvent) {\n onEvent(createFetchUrlFinishEvent(url, false));\n }\n\n return errorMessage;\n }\n },\n });\n}\n\n// ============================================================================\n// Main Factory Function\n// ============================================================================\n\n/**\n * Options for creating web tools.\n */\nexport interface CreateWebToolsOptions {\n /** Backend for filesystem operations (for eviction) */\n backend?: BackendProtocol | BackendFactory;\n /** Callback for emitting events */\n onEvent?: EventCallback;\n /** Token limit before evicting large tool results (default: disabled) */\n toolResultEvictionLimit?: number;\n /** Tavily API key (defaults to process.env.TAVILY_API_KEY) */\n tavilyApiKey?: string;\n /** Default timeout for HTTP requests in seconds (default: 30) */\n defaultTimeout?: number;\n}\n\n/**\n * Create all web tools (web_search, http_request, fetch_url).\n * Tools are only created if TAVILY_API_KEY is available.\n */\nexport function createWebTools(\n state: DeepAgentState,\n options?: CreateWebToolsOptions\n): Record<string, any> {\n const {\n backend,\n onEvent,\n toolResultEvictionLimit,\n tavilyApiKey = process.env.TAVILY_API_KEY,\n defaultTimeout = DEFAULT_TIMEOUT_SECONDS,\n } = options || {};\n\n // Return empty object if no Tavily API key\n if (!tavilyApiKey) {\n console.warn(\n \"Tavily API key not found. Web tools (web_search, fetch_url, http_request) will not be available. \" +\n \"Set TAVILY_API_KEY environment variable to enable web tools.\"\n );\n return {};\n }\n\n return {\n web_search: createWebSearchTool(state, { backend, onEvent, toolResultEvictionLimit, tavilyApiKey }),\n http_request: createHttpRequestTool(state, { backend, onEvent, toolResultEvictionLimit, defaultTimeout }),\n fetch_url: createFetchUrlTool(state, { backend, onEvent, toolResultEvictionLimit, defaultTimeout }),\n };\n}\n\n// ============================================================================\n// Exports\n// ============================================================================\n\nexport { htmlToMarkdown };\n\n// ============================================================================\n// Individual Tool References\n// ============================================================================\n\n/**\n * Individual builtin tool references for selective subagent configuration.\n * These are references to the creator functions, not instances.\n */\nexport const web_search = createWebSearchTool;\nexport const http_request = createHttpRequestTool;\nexport const fetch_url = createFetchUrlTool;\n","/**\n * Execute tool for running shell commands in sandbox backends.\n *\n * This tool is only available when the backend implements SandboxBackendProtocol.\n */\n\nimport { tool } from \"ai\";\nimport { z } from \"zod\";\nimport type { SandboxBackendProtocol, EventCallback } from \"../types\";\n\n/**\n * Tool description for the execute tool.\n */\nconst EXECUTE_TOOL_DESCRIPTION = `Execute a shell command in the sandbox environment.\n\nUse this tool to:\n- Run build commands (npm install, npm run build, bun install)\n- Run tests (npm test, bun test, pytest)\n- Execute scripts (node script.js, python script.py)\n- Check system state (ls, cat, pwd, which)\n- Install dependencies\n- Run any shell command\n\nThe command runs in the sandbox's working directory. Commands have a timeout limit.\n\nIMPORTANT:\n- Always check the exit code to determine success (0 = success)\n- Long-running commands may timeout\n- Use && to chain commands that depend on each other\n- Use ; to run commands sequentially regardless of success`;\n\n/**\n * Options for creating the execute tool.\n */\nexport interface CreateExecuteToolOptions {\n /** The sandbox backend to execute commands in */\n backend: SandboxBackendProtocol;\n /** Optional callback for emitting events */\n onEvent?: EventCallback;\n /** Optional custom description for the tool */\n description?: string;\n}\n\n/**\n * Create an execute tool for running shell commands.\n *\n * @param options - Options including the sandbox backend and optional event callback\n * @returns An AI SDK tool that executes shell commands\n *\n * @example Basic usage\n * ```typescript\n * import { LocalSandbox, createExecuteTool } from 'deepagentsdk';\n *\n * const sandbox = new LocalSandbox({ cwd: './workspace' });\n * const executeTool = createExecuteTool({ backend: sandbox });\n *\n * // Use with agent\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend: sandbox,\n * tools: { execute: executeTool },\n * });\n * ```\n *\n * @example With event streaming\n * ```typescript\n * const executeTool = createExecuteTool({\n * backend: sandbox,\n * onEvent: (event) => {\n * if (event.type === 'execute-start') {\n * console.log(`Running: ${event.command}`);\n * } else if (event.type === 'execute-finish') {\n * console.log(`Exit code: ${event.exitCode}`);\n * }\n * },\n * });\n * ```\n */\nexport function createExecuteTool(options: CreateExecuteToolOptions) {\n const { backend, onEvent, description } = options;\n\n return tool({\n description: description || EXECUTE_TOOL_DESCRIPTION,\n inputSchema: z.object({\n command: z\n .string()\n .describe(\"The shell command to execute (e.g., 'npm install', 'ls -la', 'cat file.txt')\"),\n }),\n execute: async ({ command }) => {\n // Emit execute-start event\n if (onEvent) {\n onEvent({\n type: \"execute-start\",\n command,\n sandboxId: backend.id,\n });\n }\n\n // Execute the command\n const result = await backend.execute(command);\n\n // Emit execute-finish event\n if (onEvent) {\n onEvent({\n type: \"execute-finish\",\n command,\n exitCode: result.exitCode,\n truncated: result.truncated,\n sandboxId: backend.id,\n });\n }\n\n // Format the response\n const parts: string[] = [];\n\n if (result.output) {\n parts.push(result.output);\n }\n\n // Add exit code information\n if (result.exitCode === 0) {\n parts.push(`\\n[Exit code: 0 (success)]`);\n } else if (result.exitCode !== null) {\n parts.push(`\\n[Exit code: ${result.exitCode} (failure)]`);\n } else {\n parts.push(`\\n[Exit code: unknown (possibly timed out)]`);\n }\n\n // Note if output was truncated\n if (result.truncated) {\n parts.push(`[Output truncated due to size limit]`);\n }\n\n return parts.join(\"\");\n },\n });\n}\n\n/**\n * Convenience function to create execute tool from just a backend.\n * Useful for simple cases without event handling.\n *\n * @param backend - The sandbox backend\n * @returns An AI SDK tool that executes shell commands\n *\n * @example\n * ```typescript\n * const sandbox = new LocalSandbox({ cwd: './workspace' });\n * const tools = {\n * execute: createExecuteToolFromBackend(sandbox),\n * };\n * ```\n */\nexport function createExecuteToolFromBackend(backend: SandboxBackendProtocol) {\n return createExecuteTool({ backend });\n}\n\n// ============================================================================\n// Individual Tool Reference\n// ============================================================================\n\n/**\n * Individual builtin tool reference for selective subagent configuration.\n * This is a reference to the creator function, not an instance.\n */\nexport const execute = createExecuteTool;\n\n","/**\n * Subagent tool for task delegation using AI SDK v6 ToolLoopAgent.\n */\n\nimport { tool, ToolLoopAgent, stepCountIs, Output, type ToolSet, type LanguageModel } from \"ai\";\nimport { z } from \"zod\";\nimport type {\n SubAgent,\n DeepAgentState,\n BackendProtocol,\n BackendFactory,\n EventCallback,\n InterruptOnConfig,\n CreateDeepAgentParams,\n BuiltinToolCreator,\n SubagentToolConfig,\n} from \"../types\";\nimport { applyInterruptConfig } from \"../utils/approval\";\nimport {\n DEFAULT_SUBAGENT_MAX_STEPS,\n DEFAULT_TIMEOUT_SECONDS,\n} from \"../constants/limits\";\nimport {\n getTaskToolDescription,\n DEFAULT_GENERAL_PURPOSE_DESCRIPTION,\n DEFAULT_SUBAGENT_PROMPT,\n TODO_SYSTEM_PROMPT,\n FILESYSTEM_SYSTEM_PROMPT,\n BASE_PROMPT,\n} from \"../prompts\";\nimport {\n createSubagentStartEvent,\n createSubagentStepEvent,\n createSubagentFinishEvent,\n} from \"../utils/events\";\nimport { createTodosTool } from \"./todos\";\nimport { createFilesystemTools } from \"./filesystem\";\nimport {\n createWebSearchTool,\n createHttpRequestTool,\n createFetchUrlTool,\n} from \"./web\";\nimport {\n createLsTool,\n createReadFileTool,\n createWriteFileTool,\n createEditFileTool,\n createGlobTool,\n createGrepTool,\n} from \"./filesystem\";\nimport { createExecuteTool } from \"./execute\";\n\n// ============================================================================\n// Helper Functions for Builtin Tool Instantiation\n// ============================================================================\n\n/**\n * Check if a value is a builtin tool creator function.\n */\nfunction isBuiltinToolCreator(value: any): value is BuiltinToolCreator {\n return typeof value === \"function\" && (\n value === createWebSearchTool ||\n value === createHttpRequestTool ||\n value === createFetchUrlTool ||\n value === createLsTool ||\n value === createReadFileTool ||\n value === createWriteFileTool ||\n value === createEditFileTool ||\n value === createGlobTool ||\n value === createGrepTool ||\n value === createTodosTool ||\n value === createExecuteTool\n );\n}\n\n/**\n * Instantiate a builtin tool creator with the given context.\n */\nfunction instantiateBuiltinTool(\n creator: BuiltinToolCreator,\n state: DeepAgentState,\n options: {\n backend?: BackendProtocol | BackendFactory;\n onEvent?: EventCallback;\n toolResultEvictionLimit?: number;\n }\n): ToolSet {\n const { backend, onEvent, toolResultEvictionLimit } = options;\n\n // Web tools - require API key and timeout defaults\n const tavilyApiKey = process.env.TAVILY_API_KEY || \"\";\n const defaultTimeout = DEFAULT_TIMEOUT_SECONDS;\n\n if (creator === createWebSearchTool) {\n if (!tavilyApiKey) {\n console.warn(\"web_search tool requested but TAVILY_API_KEY not set\");\n return {};\n }\n return {\n web_search: createWebSearchTool(state, { backend, onEvent, toolResultEvictionLimit, tavilyApiKey }),\n };\n }\n if (creator === createHttpRequestTool) {\n return {\n http_request: createHttpRequestTool(state, { backend, onEvent, toolResultEvictionLimit, defaultTimeout }),\n };\n }\n if (creator === createFetchUrlTool) {\n return {\n fetch_url: createFetchUrlTool(state, { backend, onEvent, toolResultEvictionLimit, defaultTimeout }),\n };\n }\n\n // Filesystem tools\n if (creator === createLsTool) {\n return {\n ls: createLsTool(state, backend!, onEvent),\n };\n }\n if (creator === createReadFileTool) {\n return {\n read_file: createReadFileTool(state, backend!, toolResultEvictionLimit, onEvent),\n };\n }\n if (creator === createWriteFileTool) {\n return {\n write_file: createWriteFileTool(state, backend!, onEvent),\n };\n }\n if (creator === createEditFileTool) {\n return {\n edit_file: createEditFileTool(state, backend!, onEvent),\n };\n }\n if (creator === createGlobTool) {\n return {\n glob: createGlobTool(state, backend!, onEvent),\n };\n }\n if (creator === createGrepTool) {\n return {\n grep: createGrepTool(state, backend!, toolResultEvictionLimit, onEvent),\n };\n }\n\n // Utility tools\n if (creator === createTodosTool) {\n return {\n write_todos: createTodosTool(state, onEvent),\n };\n }\n if (creator === createExecuteTool) {\n // Execute tool requires special handling - needs a sandbox backend\n throw new Error(\"execute tool cannot be used via selective tools - it requires a SandboxBackendProtocol\");\n }\n\n throw new Error(`Unknown builtin tool creator: ${creator}`);\n}\n\n/**\n * Process subagent tool configuration (array or ToolSet) into a ToolSet.\n */\nfunction processSubagentTools(\n toolConfig: ToolSet | SubagentToolConfig[] | undefined,\n state: DeepAgentState,\n options: {\n backend?: BackendProtocol | BackendFactory;\n onEvent?: EventCallback;\n toolResultEvictionLimit?: number;\n }\n): ToolSet {\n if (!toolConfig) {\n return {};\n }\n\n // If it's already a ToolSet object, return as-is\n if (!Array.isArray(toolConfig)) {\n return toolConfig;\n }\n\n // Process array of SubagentToolConfig items\n let result: ToolSet = {};\n for (const item of toolConfig) {\n if (isBuiltinToolCreator(item)) {\n // Instantiate builtin tool creator\n const instantiated = instantiateBuiltinTool(item, state, options);\n result = { ...result, ...instantiated };\n } else if (typeof item === \"object\" && item !== null) {\n // Assume it's a ToolSet object\n result = { ...result, ...item };\n }\n // Silently skip invalid items\n }\n\n return result;\n}\n\n/**\n * Options for creating the subagent tool.\n */\nexport interface CreateSubagentToolOptions {\n /** Default model for subagents (AI SDK LanguageModel instance) */\n defaultModel: LanguageModel;\n /** Default tools available to all subagents */\n defaultTools?: ToolSet;\n /** List of custom subagent specifications */\n subagents?: SubAgent[];\n /** Whether to include the general-purpose agent */\n includeGeneralPurposeAgent?: boolean;\n /** Backend for filesystem operations */\n backend?: BackendProtocol | BackendFactory;\n /** Custom description for the task tool */\n taskDescription?: string | null;\n /** Optional callback for emitting events */\n onEvent?: EventCallback;\n /** Interrupt config to pass to subagents */\n interruptOn?: InterruptOnConfig;\n /** Parent agent options to pass through to subagents */\n parentGenerationOptions?: CreateDeepAgentParams[\"generationOptions\"];\n parentAdvancedOptions?: CreateDeepAgentParams[\"advancedOptions\"];\n}\n\n/**\n * Build the system prompt for a subagent.\n */\nfunction buildSubagentSystemPrompt(customPrompt: string): string {\n return `${customPrompt}\n\n${BASE_PROMPT}\n\n${TODO_SYSTEM_PROMPT}\n\n${FILESYSTEM_SYSTEM_PROMPT}`;\n}\n\n/**\n * Create the task tool for spawning subagents using ToolLoopAgent.\n */\nexport function createSubagentTool(\n state: DeepAgentState,\n options: CreateSubagentToolOptions\n) {\n const {\n defaultModel,\n defaultTools = {},\n subagents = [],\n includeGeneralPurposeAgent = true,\n backend,\n taskDescription = null,\n onEvent,\n interruptOn,\n parentGenerationOptions,\n parentAdvancedOptions,\n } = options;\n\n // Build subagent registry (store raw tool config, process during execution)\n const subagentRegistry: Record<\n string,\n {\n systemPrompt: string;\n toolConfig: ToolSet | SubagentToolConfig[] | undefined;\n model: LanguageModel;\n output?: { schema: z.ZodType<any>; description?: string };\n }\n > = {};\n const subagentDescriptions: string[] = [];\n\n // Add general-purpose agent if enabled\n if (includeGeneralPurposeAgent) {\n subagentRegistry[\"general-purpose\"] = {\n systemPrompt: buildSubagentSystemPrompt(DEFAULT_SUBAGENT_PROMPT),\n toolConfig: defaultTools,\n model: defaultModel,\n };\n subagentDescriptions.push(\n `- general-purpose: ${DEFAULT_GENERAL_PURPOSE_DESCRIPTION}`\n );\n }\n\n // Add custom subagents (store raw tool config)\n for (const subagent of subagents) {\n subagentRegistry[subagent.name] = {\n systemPrompt: buildSubagentSystemPrompt(subagent.systemPrompt),\n toolConfig: subagent.tools || defaultTools,\n model: subagent.model || defaultModel,\n output: subagent.output,\n };\n subagentDescriptions.push(`- ${subagent.name}: ${subagent.description}`);\n }\n\n const finalTaskDescription =\n taskDescription || getTaskToolDescription(subagentDescriptions);\n\n return tool({\n description: finalTaskDescription,\n inputSchema: z.object({\n description: z\n .string()\n .describe(\"The task to execute with the selected agent\"),\n subagent_type: z\n .string()\n .describe(\n `Name of the agent to use. Available: ${Object.keys(subagentRegistry).join(\", \")}`\n ),\n }),\n execute: async ({ description, subagent_type }) => {\n // Validate subagent type\n if (!(subagent_type in subagentRegistry)) {\n const allowedTypes = Object.keys(subagentRegistry)\n .map((k) => `\\`${k}\\``)\n .join(\", \");\n return `Error: invoked agent of type ${subagent_type}, the only allowed types are ${allowedTypes}`;\n }\n\n const subagentConfig = subagentRegistry[subagent_type]!;\n\n // Find the subagent spec to get its specific options\n const subagentSpec = subagents.find((sa) => sa.name === subagent_type);\n const subagentInterruptOn = subagentSpec?.interruptOn ?? interruptOn;\n\n // Merge options: subagent-specific options override parent options\n const mergedGenerationOptions = {\n ...parentGenerationOptions,\n ...subagentSpec?.generationOptions,\n };\n\n const mergedAdvancedOptions = {\n ...parentAdvancedOptions,\n ...subagentSpec?.advancedOptions,\n };\n\n // Emit subagent start event\n if (onEvent) {\n onEvent(createSubagentStartEvent(subagent_type, description));\n }\n\n // Create a fresh state for the subagent (shares files but have own todos)\n const subagentState: DeepAgentState = {\n todos: [],\n files: state.files, // Share files with parent\n };\n\n // Process subagent tool configuration (handles both arrays and ToolSet objects)\n const customTools = processSubagentTools(\n subagentConfig.toolConfig,\n subagentState,\n { backend, onEvent }\n );\n\n // Build default tools (todos + filesystem) that all subagents get\n const todosTool = createTodosTool(subagentState, onEvent);\n const filesystemTools = createFilesystemTools(subagentState, backend, onEvent);\n\n // Combine default tools with custom tools\n // Custom tools come last so they can override defaults if needed\n let allTools: ToolSet = {\n write_todos: todosTool,\n ...filesystemTools,\n ...customTools,\n };\n\n // Apply interruptOn config - use subagent's own config if provided, otherwise parent's\n allTools = applyInterruptConfig(allTools, subagentInterruptOn);\n\n try {\n // Create and run a ToolLoopAgent for the subagent\n const subagentSettings: any = {\n model: subagentConfig.model,\n instructions: subagentConfig.systemPrompt,\n tools: allTools,\n stopWhen: stepCountIs(DEFAULT_SUBAGENT_MAX_STEPS), // Enforce max steps limit for subagents\n // Pass output configuration if subagent has one using AI SDK Output helper\n ...(subagentConfig.output ? { output: Output.object(subagentConfig.output) } : {}),\n };\n\n // Add merged generation options\n if (Object.keys(mergedGenerationOptions).length > 0) {\n Object.assign(subagentSettings, mergedGenerationOptions);\n }\n\n // Add merged advanced options (excluding toolChoice and activeTools as per plan)\n if (mergedAdvancedOptions) {\n const { toolChoice, activeTools, ...safeAdvancedOptions } = mergedAdvancedOptions;\n Object.assign(subagentSettings, safeAdvancedOptions);\n }\n\n // Track subagent step count for events\n let subagentStepCount = 0;\n\n // Add onStepFinish callback to settings to capture steps\n subagentSettings.onStepFinish = async ({ toolCalls, toolResults }: { toolCalls: any[]; toolResults: any[] }) => {\n // Emit subagent step event with tool calls\n if (onEvent && toolCalls && toolCalls.length > 0) {\n // Map tool calls with their results\n const toolCallsWithResults = toolCalls.map((tc: any, index: number) => ({\n toolName: tc.toolName,\n args: tc.args,\n result: toolResults[index],\n }));\n\n onEvent(createSubagentStepEvent(subagentStepCount++, toolCallsWithResults));\n }\n };\n\n const subagentAgent = new ToolLoopAgent(subagentSettings);\n\n const result = await subagentAgent.generate({\n prompt: description,\n });\n\n // Merge any file changes back to parent state\n state.files = { ...state.files, ...subagentState.files };\n\n const resultText = result.text || \"Task completed successfully.\";\n\n // Format output for parent agent\n let formattedResult = resultText;\n\n // If subagent has structured output, include it in the response\n if (subagentConfig.output && 'output' in result && result.output) {\n formattedResult = `${resultText}\\n\\n[Structured Output]\\n${JSON.stringify(result.output, null, 2)}`;\n }\n\n // Emit subagent finish event\n if (onEvent) {\n onEvent(createSubagentFinishEvent(subagent_type, formattedResult));\n }\n\n return formattedResult;\n } catch (error: unknown) {\n const err = error as Error;\n const errorMessage = `Error executing subagent: ${err.message}`;\n\n // Emit subagent finish event with error\n if (onEvent) {\n onEvent(createSubagentFinishEvent(subagent_type, errorMessage));\n }\n\n return errorMessage;\n }\n },\n });\n}\n","/**\n * Utility to patch dangling tool calls in message history.\n *\n * When an AI message contains tool_calls but subsequent messages don't include\n * the corresponding tool result responses, this utility adds synthetic\n * tool result messages saying the tool call was cancelled.\n *\n * This prevents errors when sending the conversation history to the model,\n * as models expect every tool call to have a corresponding result.\n */\n\nimport type { ModelMessage } from \"ai\";\n\n/**\n * Check if a message is an assistant message with tool calls.\n */\nfunction hasToolCalls(message: ModelMessage): boolean {\n if (message.role !== \"assistant\") return false;\n\n // Check if content contains tool calls\n const content = message.content;\n if (Array.isArray(content)) {\n return content.some(\n (part) => typeof part === \"object\" && part !== null && \"type\" in part && part.type === \"tool-call\"\n );\n }\n\n return false;\n}\n\n/**\n * Extract tool call IDs from an assistant message.\n */\nfunction getToolCallIds(message: ModelMessage): string[] {\n if (message.role !== \"assistant\") return [];\n\n const content = message.content;\n if (!Array.isArray(content)) return [];\n\n const ids: string[] = [];\n for (const part of content) {\n if (\n typeof part === \"object\" &&\n part !== null &&\n \"type\" in part &&\n part.type === \"tool-call\" &&\n \"toolCallId\" in part\n ) {\n ids.push(part.toolCallId as string);\n }\n }\n\n return ids;\n}\n\n/**\n * Check if a message is a tool result for a specific tool call ID.\n */\nfunction isToolResultFor(message: ModelMessage, toolCallId: string): boolean {\n if (message.role !== \"tool\") return false;\n\n // Tool messages should have a toolCallId\n if (\"toolCallId\" in message && message.toolCallId === toolCallId) {\n return true;\n }\n\n // Also check content array for tool-result parts\n const content = message.content;\n if (Array.isArray(content)) {\n return content.some(\n (part) =>\n typeof part === \"object\" &&\n part !== null &&\n \"type\" in part &&\n part.type === \"tool-result\" &&\n \"toolCallId\" in part &&\n part.toolCallId === toolCallId\n );\n }\n\n return false;\n}\n\n/**\n * Create a synthetic tool result message for a cancelled tool call.\n */\nfunction createCancelledToolResult(\n toolCallId: string,\n toolName: string\n): ModelMessage {\n const message: ModelMessage = {\n role: \"tool\",\n content: [\n {\n type: \"tool-result\" as const,\n toolCallId,\n toolName,\n output: {\n type: \"text\" as const,\n value: `Tool call ${toolName} with id ${toolCallId} was cancelled - another message came in before it could be completed.`,\n },\n },\n ],\n };\n return message;\n}\n\n/**\n * Get tool name from a tool call part.\n */\nfunction getToolName(message: ModelMessage, toolCallId: string): string {\n if (message.role !== \"assistant\") return \"unknown\";\n\n const content = message.content;\n if (!Array.isArray(content)) return \"unknown\";\n\n for (const part of content) {\n if (\n typeof part === \"object\" &&\n part !== null &&\n \"type\" in part &&\n part.type === \"tool-call\" &&\n \"toolCallId\" in part &&\n part.toolCallId === toolCallId &&\n \"toolName\" in part\n ) {\n return part.toolName as string;\n }\n }\n\n return \"unknown\";\n}\n\n/**\n * Patch dangling tool calls in a message array.\n *\n * Scans for assistant messages with tool_calls that don't have corresponding\n * tool result messages, and adds synthetic \"cancelled\" responses.\n *\n * @param messages - Array of messages to patch\n * @returns New array with patched messages (original array is not modified)\n *\n * @example\n * ```typescript\n * const messages = [\n * { role: \"user\", content: \"Hello\" },\n * { role: \"assistant\", content: [{ type: \"tool-call\", toolCallId: \"1\", toolName: \"search\" }] },\n * // Missing tool result for tool call \"1\"\n * { role: \"user\", content: \"Never mind\" },\n * ];\n *\n * const patched = patchToolCalls(messages);\n * // patched now includes a synthetic tool result for the dangling call\n * ```\n */\nexport function patchToolCalls(messages: ModelMessage[]): ModelMessage[] {\n if (!messages || messages.length === 0) {\n return messages;\n }\n\n const result: ModelMessage[] = [];\n\n for (let i = 0; i < messages.length; i++) {\n const message = messages[i];\n if (!message) continue;\n \n result.push(message);\n\n // Check if this is an assistant message with tool calls\n if (hasToolCalls(message)) {\n const toolCallIds = getToolCallIds(message);\n\n for (const toolCallId of toolCallIds) {\n // Look for a corresponding tool result in subsequent messages\n let hasResult = false;\n for (let j = i + 1; j < messages.length; j++) {\n const subsequentMsg = messages[j];\n if (subsequentMsg && isToolResultFor(subsequentMsg, toolCallId)) {\n hasResult = true;\n break;\n }\n }\n\n // If no result found, add a synthetic cancelled result\n if (!hasResult) {\n const toolName = getToolName(message, toolCallId);\n result.push(createCancelledToolResult(toolCallId, toolName));\n }\n }\n }\n }\n\n return result;\n}\n\n/**\n * Check if messages have any dangling tool calls.\n *\n * @param messages - Array of messages to check\n * @returns True if there are dangling tool calls\n */\nexport function hasDanglingToolCalls(messages: ModelMessage[]): boolean {\n if (!messages || messages.length === 0) {\n return false;\n }\n\n for (let i = 0; i < messages.length; i++) {\n const message = messages[i];\n if (!message) continue;\n\n if (hasToolCalls(message)) {\n const toolCallIds = getToolCallIds(message);\n\n for (const toolCallId of toolCallIds) {\n let hasResult = false;\n for (let j = i + 1; j < messages.length; j++) {\n const subsequentMsg = messages[j];\n if (subsequentMsg && isToolResultFor(subsequentMsg, toolCallId)) {\n hasResult = true;\n break;\n }\n }\n\n if (!hasResult) {\n return true;\n }\n }\n }\n }\n\n return false;\n}\n\n","/**\n * Conversation summarization utility.\n *\n * Automatically summarizes older messages when approaching token limits\n * to prevent context overflow while preserving important context.\n */\n\nimport { generateText, type LanguageModel } from \"ai\";\nimport type { ModelMessage } from \"../types\";\nimport { estimateTokens } from \"./eviction\";\nimport {\n DEFAULT_SUMMARIZATION_THRESHOLD as CENTRALIZED_THRESHOLD,\n DEFAULT_KEEP_MESSAGES as CENTRALIZED_KEEP,\n} from \"../constants/limits\";\n\n/**\n * Default token threshold before triggering summarization.\n * 170k tokens is a safe threshold for most models.\n */\nexport const DEFAULT_SUMMARIZATION_THRESHOLD = CENTRALIZED_THRESHOLD;\n\n/**\n * Default number of recent messages to keep intact.\n */\nexport const DEFAULT_KEEP_MESSAGES = CENTRALIZED_KEEP;\n\n/**\n * Options for summarization.\n */\nexport interface SummarizationOptions {\n /** Model to use for summarization (AI SDK LanguageModel instance) */\n model: LanguageModel;\n /** Token threshold to trigger summarization (default: 170000) */\n tokenThreshold?: number;\n /** Number of recent messages to keep intact (default: 6) */\n keepMessages?: number;\n /** Generation options to pass through */\n generationOptions?: any;\n /** Advanced options to pass through */\n advancedOptions?: any;\n}\n\n/**\n * Result of summarization check.\n */\nexport interface SummarizationResult {\n /** Whether summarization was needed */\n summarized: boolean;\n /** The processed messages (either original or with summary) */\n messages: ModelMessage[];\n /** Token count before processing */\n tokensBefore?: number;\n /** Token count after processing */\n tokensAfter?: number;\n}\n\n/**\n * Estimate total tokens in a messages array.\n */\nexport function estimateMessagesTokens(messages: ModelMessage[]): number {\n let total = 0;\n\n for (const message of messages) {\n if (typeof message.content === \"string\") {\n total += estimateTokens(message.content);\n } else if (Array.isArray(message.content)) {\n for (const part of message.content) {\n if (typeof part === \"object\" && part !== null && \"text\" in part) {\n total += estimateTokens(String(part.text));\n }\n }\n }\n }\n\n return total;\n}\n\n/**\n * Extract text content from a message.\n */\nfunction getMessageText(message: ModelMessage): string {\n if (typeof message.content === \"string\") {\n return message.content;\n }\n\n if (Array.isArray(message.content)) {\n return message.content\n .map((part) => {\n if (typeof part === \"object\" && part !== null && \"text\" in part) {\n return String(part.text);\n }\n if (typeof part === \"object\" && part !== null && \"type\" in part) {\n if (part.type === \"tool-call\") {\n return `[Tool call: ${(part as { toolName?: string }).toolName || \"unknown\"}]`;\n }\n if (part.type === \"tool-result\") {\n return `[Tool result]`;\n }\n }\n return \"\";\n })\n .filter(Boolean)\n .join(\"\\n\");\n }\n\n return \"\";\n}\n\n/**\n * Format messages for summarization prompt.\n */\nfunction formatMessagesForSummary(messages: ModelMessage[]): string {\n return messages\n .map((msg) => {\n const role = msg.role === \"user\" ? \"User\" : msg.role === \"assistant\" ? \"Assistant\" : \"System\";\n const text = getMessageText(msg);\n return `${role}: ${text}`;\n })\n .join(\"\\n\\n\");\n}\n\n/**\n * Generate a summary of conversation messages.\n */\nasync function generateSummary(\n messages: ModelMessage[],\n model: LanguageModel,\n generationOptions?: any,\n advancedOptions?: any\n): Promise<string> {\n const conversationText = formatMessagesForSummary(messages);\n\n const generateTextOptions: any = {\n model,\n system: `You are a conversation summarizer. Your task is to create a concise but comprehensive summary of the conversation that preserves:\n1. Key decisions and conclusions\n2. Important context and background information\n3. Any tasks or todos mentioned\n4. Technical details that may be referenced later\n5. The overall flow and progression of the conversation\n\nKeep the summary focused and avoid redundancy. The summary should allow someone to understand the conversation context without reading the full history.`,\n prompt: `Please summarize the following conversation:\\n\\n${conversationText}`,\n };\n\n // Add passthrough options\n if (generationOptions) {\n Object.assign(generateTextOptions, generationOptions);\n }\n if (advancedOptions) {\n Object.assign(generateTextOptions, advancedOptions);\n }\n\n const result = await generateText(generateTextOptions);\n return result.text;\n}\n\n/**\n * Summarize older messages when approaching token limits.\n *\n * This function checks if the total tokens in the messages exceed the threshold.\n * If so, it summarizes older messages while keeping recent ones intact.\n *\n * @param messages - Array of conversation messages\n * @param options - Summarization options\n * @returns Processed messages with optional summary\n *\n * @example\n * ```typescript\n * import { anthropic } from '@ai-sdk/anthropic';\n *\n * const result = await summarizeIfNeeded(messages, {\n * model: anthropic('claude-haiku-4-5-20251001'),\n * tokenThreshold: 170000,\n * keepMessages: 6,\n * });\n *\n * if (result.summarized) {\n * console.log(`Reduced from ${result.tokensBefore} to ${result.tokensAfter} tokens`);\n * }\n * ```\n */\nexport async function summarizeIfNeeded(\n messages: ModelMessage[],\n options: SummarizationOptions\n): Promise<SummarizationResult> {\n const {\n model,\n tokenThreshold = DEFAULT_SUMMARIZATION_THRESHOLD,\n keepMessages = DEFAULT_KEEP_MESSAGES,\n } = options;\n\n // Estimate current token count\n const tokensBefore = estimateMessagesTokens(messages);\n\n // Check if summarization is needed\n if (tokensBefore < tokenThreshold) {\n return {\n summarized: false,\n messages,\n tokensBefore,\n };\n }\n\n // Not enough messages to summarize\n if (messages.length <= keepMessages) {\n return {\n summarized: false,\n messages,\n tokensBefore,\n };\n }\n\n // Split messages: older ones to summarize, recent ones to keep\n const messagesToSummarize = messages.slice(0, -keepMessages);\n const messagesToKeep = messages.slice(-keepMessages);\n\n // Generate summary\n const summary = await generateSummary(\n messagesToSummarize,\n model,\n options.generationOptions,\n options.advancedOptions\n );\n\n // Create summary message\n const summaryMessage: ModelMessage = {\n role: \"system\",\n content: `[Previous conversation summary]\\n${summary}\\n\\n[End of summary - recent messages follow]`,\n } as ModelMessage;\n\n // Combine summary with recent messages\n const newMessages = [summaryMessage, ...messagesToKeep];\n const tokensAfter = estimateMessagesTokens(newMessages);\n\n return {\n summarized: true,\n messages: newMessages,\n tokensBefore,\n tokensAfter,\n };\n}\n\n/**\n * Check if messages need summarization without performing it.\n */\nexport function needsSummarization(\n messages: ModelMessage[],\n tokenThreshold: number = DEFAULT_SUMMARIZATION_THRESHOLD\n): boolean {\n const tokens = estimateMessagesTokens(messages);\n return tokens >= tokenThreshold;\n}\n\n","/**\n * Deep Agent implementation using Vercel AI SDK v6 ToolLoopAgent.\n */\n\nimport {\n ToolLoopAgent,\n stepCountIs,\n generateText,\n streamText,\n wrapLanguageModel,\n Output,\n type ToolSet,\n type StopCondition,\n type LanguageModel,\n type LanguageModelMiddleware,\n type ToolLoopAgentSettings,\n} from \"ai\";\nimport type { LanguageModelV3 } from \"@ai-sdk/provider\";\nimport type { z } from \"zod\";\nimport { DEFAULT_MAX_STEPS } from \"./constants/limits\";\nimport {\n createCheckpointSavedEvent,\n createCheckpointLoadedEvent,\n} from \"./utils/events\";\nimport type {\n CreateDeepAgentParams,\n DeepAgentState,\n BackendProtocol,\n BackendFactory,\n DeepAgentEvent,\n ErrorEvent as DeepAgentErrorEvent,\n CheckpointLoadedEvent,\n EventCallback,\n StreamWithEventsOptions,\n ModelMessage,\n SandboxBackendProtocol,\n InterruptOnConfig,\n PrepareStepFunction,\n} from \"./types\";\nimport type { BaseCheckpointSaver, Checkpoint, InterruptData } from \"./checkpointer/types\";\nimport { isSandboxBackend } from \"./types\";\nimport {\n BASE_PROMPT,\n TODO_SYSTEM_PROMPT,\n FILESYSTEM_SYSTEM_PROMPT,\n TASK_SYSTEM_PROMPT,\n EXECUTE_SYSTEM_PROMPT,\n buildSkillsPrompt,\n} from \"./prompts\";\nimport { createTodosTool } from \"./tools/todos\";\nimport { createFilesystemTools } from \"./tools/filesystem\";\nimport { createSubagentTool } from \"./tools/subagent\";\nimport { createExecuteTool } from \"./tools/execute\";\nimport { StateBackend } from \"./backends/state\";\nimport { patchToolCalls } from \"./utils/patch-tool-calls\";\nimport { summarizeIfNeeded } from \"./utils/summarization\";\nimport { applyInterruptConfig, wrapToolsWithApproval, type ApprovalCallback } from \"./utils/approval\";\nimport type { SummarizationConfig } from \"./types\";\n\n/**\n * Build the full system prompt from components.\n */\nfunction buildSystemPrompt(\n customPrompt?: string,\n hasSubagents?: boolean,\n hasSandbox?: boolean,\n skills?: Array<{ name: string; description: string; path: string }>\n): string {\n const parts = [\n customPrompt || \"\",\n BASE_PROMPT,\n TODO_SYSTEM_PROMPT,\n FILESYSTEM_SYSTEM_PROMPT,\n ];\n\n if (hasSandbox) {\n parts.push(EXECUTE_SYSTEM_PROMPT);\n }\n\n if (hasSubagents) {\n parts.push(TASK_SYSTEM_PROMPT);\n }\n\n // Add skills prompt if skills loaded\n if (skills && skills.length > 0) {\n parts.push(buildSkillsPrompt(skills));\n }\n\n return parts.filter(Boolean).join(\"\\n\\n\");\n}\n\n/**\n * Deep Agent wrapper class that provides generate() and stream() methods.\n * Uses ToolLoopAgent from AI SDK v6 for the agent loop.\n */\nexport class DeepAgent {\n private model: LanguageModel;\n private systemPrompt: string;\n private userTools: ToolSet;\n private maxSteps: number;\n private backend: BackendProtocol | BackendFactory;\n private subagentOptions: {\n defaultModel: LanguageModel;\n defaultTools: ToolSet;\n subagents: CreateDeepAgentParams[\"subagents\"];\n includeGeneralPurposeAgent: boolean;\n };\n private toolResultEvictionLimit?: number;\n private enablePromptCaching: boolean;\n private summarizationConfig?: SummarizationConfig;\n private hasSandboxBackend: boolean;\n private interruptOn?: InterruptOnConfig;\n private checkpointer?: BaseCheckpointSaver;\n private skillsMetadata: Array<{ name: string; description: string; path: string }> = [];\n private outputConfig?: { schema: z.ZodType<any>; description?: string };\n\n // AI SDK ToolLoopAgent passthrough options\n private loopControl?: CreateDeepAgentParams[\"loopControl\"];\n private generationOptions?: CreateDeepAgentParams[\"generationOptions\"];\n private advancedOptions?: CreateDeepAgentParams[\"advancedOptions\"];\n\n constructor(params: CreateDeepAgentParams) {\n const {\n model,\n middleware,\n tools = {},\n systemPrompt,\n subagents = [],\n backend,\n maxSteps = DEFAULT_MAX_STEPS,\n includeGeneralPurposeAgent = true,\n toolResultEvictionLimit,\n enablePromptCaching = false,\n summarization,\n interruptOn,\n checkpointer,\n skillsDir,\n agentId,\n output,\n loopControl,\n generationOptions,\n advancedOptions,\n } = params;\n\n // Wrap model with middleware if provided\n if (middleware) {\n const middlewares = Array.isArray(middleware)\n ? middleware\n : [middleware];\n\n this.model = wrapLanguageModel({\n model: model as LanguageModelV3, // Cast required since DeepAgent accepts LanguageModel\n middleware: middlewares,\n }) as LanguageModel;\n } else {\n this.model = model;\n }\n this.maxSteps = maxSteps;\n this.backend =\n backend || ((state: DeepAgentState) => new StateBackend(state));\n this.toolResultEvictionLimit = toolResultEvictionLimit;\n this.enablePromptCaching = enablePromptCaching;\n this.summarizationConfig = summarization;\n this.interruptOn = interruptOn;\n this.checkpointer = checkpointer;\n this.outputConfig = output;\n\n // Store AI SDK passthrough options\n this.loopControl = loopControl;\n this.generationOptions = generationOptions;\n this.advancedOptions = advancedOptions;\n\n // Load skills - prefer agentId over legacy skillsDir\n if (agentId) {\n // Show deprecation warning if skillsDir is also provided\n if (skillsDir) {\n console.warn(\n '[DeepAgent] agentId parameter takes precedence over skillsDir. ' +\n 'skillsDir is deprecated and will be ignored.'\n );\n }\n\n this.loadSkills({ agentId }).catch(error => {\n console.warn('[DeepAgent] Failed to load skills:', error);\n });\n } else if (skillsDir) {\n // Legacy mode: use skillsDir\n this.loadSkills({ skillsDir }).catch(error => {\n console.warn('[DeepAgent] Failed to load skills:', error);\n });\n }\n\n // Check if backend is a sandbox (supports execute)\n // For factory functions, we can't know until runtime, so we check if it's an instance\n this.hasSandboxBackend = typeof backend !== \"function\" && backend !== undefined && isSandboxBackend(backend);\n\n // Determine if we have subagents\n const hasSubagents =\n includeGeneralPurposeAgent || (subagents && subagents.length > 0);\n\n this.systemPrompt = buildSystemPrompt(systemPrompt, hasSubagents, this.hasSandboxBackend, this.skillsMetadata);\n\n // Store user-provided tools\n this.userTools = tools;\n\n // Store subagent options for later use\n this.subagentOptions = {\n defaultModel: model,\n defaultTools: tools,\n subagents,\n includeGeneralPurposeAgent,\n };\n }\n\n /**\n * Create core tools (todos and filesystem).\n * @private\n */\n private createCoreTools(state: DeepAgentState, onEvent?: EventCallback): ToolSet {\n const todosTool = createTodosTool(state, onEvent);\n const filesystemTools = createFilesystemTools(state, {\n backend: this.backend,\n onEvent,\n toolResultEvictionLimit: this.toolResultEvictionLimit,\n });\n\n return {\n write_todos: todosTool,\n ...filesystemTools,\n ...this.userTools,\n };\n }\n\n /**\n * Create web tools if TAVILY_API_KEY is available.\n * Uses dynamic import to avoid bundling Node.js dependencies in client builds.\n * @private\n */\n private createWebToolSet(state: DeepAgentState, onEvent?: EventCallback): ToolSet {\n // Check if TAVILY_API_KEY is present before attempting to load web tools\n if (!process.env.TAVILY_API_KEY) {\n return {};\n }\n\n try {\n // Dynamic import to avoid bundling Node.js-only dependencies\n // This will only load in Node.js environments (server-side)\n const webToolsModule = require(\"./tools/web\");\n const webTools = webToolsModule.createWebTools(state, {\n backend: this.backend,\n onEvent,\n toolResultEvictionLimit: this.toolResultEvictionLimit,\n });\n return webTools;\n } catch (error) {\n // If web tools fail to load (e.g., in browser), return empty tools\n console.warn(\"Web tools not available in this environment:\", error);\n return {};\n }\n }\n\n /**\n * Create execute tool if backend is a sandbox.\n * @private\n */\n private createExecuteToolSet(onEvent?: EventCallback): ToolSet {\n if (!this.hasSandboxBackend) {\n return {};\n }\n\n const sandboxBackend = this.backend as SandboxBackendProtocol;\n return {\n execute: createExecuteTool({\n backend: sandboxBackend,\n onEvent,\n }),\n };\n }\n\n /**\n * Create subagent tool if configured.\n * @private\n */\n private createSubagentToolSet(state: DeepAgentState, onEvent?: EventCallback): ToolSet {\n if (\n !this.subagentOptions.includeGeneralPurposeAgent &&\n (!this.subagentOptions.subagents || this.subagentOptions.subagents.length === 0)\n ) {\n return {};\n }\n\n const subagentTool = createSubagentTool(state, {\n defaultModel: this.subagentOptions.defaultModel,\n defaultTools: this.userTools,\n subagents: this.subagentOptions.subagents,\n includeGeneralPurposeAgent: this.subagentOptions.includeGeneralPurposeAgent,\n backend: this.backend,\n onEvent,\n interruptOn: this.interruptOn,\n parentGenerationOptions: this.generationOptions,\n parentAdvancedOptions: this.advancedOptions,\n });\n\n return { task: subagentTool };\n }\n\n /**\n * Create all tools for the agent, combining core, web, execute, and subagent tools.\n * @private\n */\n private createTools(state: DeepAgentState, onEvent?: EventCallback): ToolSet {\n // Start with core tools (todos, filesystem, user tools)\n let allTools = this.createCoreTools(state, onEvent);\n\n // Add web tools if available\n const webTools = this.createWebToolSet(state, onEvent);\n if (Object.keys(webTools).length > 0) {\n allTools = { ...allTools, ...webTools };\n }\n\n // Add execute tool if sandbox backend\n const executeTools = this.createExecuteToolSet(onEvent);\n if (Object.keys(executeTools).length > 0) {\n allTools = { ...allTools, ...executeTools };\n }\n\n // Add subagent tool if configured\n const subagentTools = this.createSubagentToolSet(state, onEvent);\n if (Object.keys(subagentTools).length > 0) {\n allTools = { ...allTools, ...subagentTools };\n }\n\n // Apply interruptOn configuration to tools\n allTools = applyInterruptConfig(allTools, this.interruptOn);\n\n return allTools;\n }\n\n /**\n * Build stop conditions with maxSteps safety limit.\n * Combines user-provided stop conditions with the maxSteps limit.\n */\n private buildStopConditions(maxSteps?: number): StopCondition<any>[] {\n const conditions: StopCondition<any>[] = [];\n\n // Always add maxSteps safety limit\n conditions.push(stepCountIs(maxSteps ?? this.maxSteps));\n\n // Add user-provided stop conditions\n if (this.loopControl?.stopWhen) {\n if (Array.isArray(this.loopControl.stopWhen)) {\n conditions.push(...this.loopControl.stopWhen);\n } else {\n conditions.push(this.loopControl.stopWhen);\n }\n }\n\n return conditions;\n }\n\n /**\n * Build agent settings by combining passthrough options with defaults.\n */\n private buildAgentSettings(onEvent?: EventCallback) {\n const settings: any = {\n model: this.model,\n instructions: this.systemPrompt,\n tools: undefined, // Will be set by caller\n };\n\n // Add generation options if provided\n if (this.generationOptions) {\n Object.assign(settings, this.generationOptions);\n }\n\n // Add advanced options if provided\n if (this.advancedOptions) {\n Object.assign(settings, this.advancedOptions);\n }\n\n // Add composed loop control callbacks if provided\n if (this.loopControl) {\n if (this.loopControl.prepareStep) {\n settings.prepareStep = this.composePrepareStep(this.loopControl.prepareStep);\n }\n if (this.loopControl.onStepFinish) {\n settings.onStepFinish = this.composeOnStepFinish(this.loopControl.onStepFinish);\n }\n if (this.loopControl.onFinish) {\n settings.onFinish = this.composeOnFinish(this.loopControl.onFinish);\n }\n }\n\n // Add output configuration if provided using AI SDK Output helper\n if (this.outputConfig) {\n settings.output = Output.object(this.outputConfig);\n }\n\n return settings;\n }\n\n /**\n * Create a ToolLoopAgent for a given state.\n * @param state - The shared agent state\n * @param maxSteps - Optional max steps override\n * @param onEvent - Optional callback for emitting events\n */\n private createAgent(state: DeepAgentState, maxSteps?: number, onEvent?: EventCallback) {\n const tools = this.createTools(state, onEvent);\n const settings = this.buildAgentSettings(onEvent);\n const stopConditions = this.buildStopConditions(maxSteps);\n\n return new ToolLoopAgent({\n ...settings,\n tools,\n stopWhen: stopConditions,\n });\n }\n\n /**\n * Load skills from directory asynchronously.\n * Supports both legacy skillsDir and new agentId modes.\n */\n private async loadSkills(options: { skillsDir?: string; agentId?: string }) {\n const { listSkills } = await import(\"./skills/load\");\n\n const skills = await listSkills(\n options.agentId\n ? { agentId: options.agentId }\n : { projectSkillsDir: options.skillsDir }\n );\n\n this.skillsMetadata = skills.map(s => ({\n name: s.name,\n description: s.description,\n path: s.path,\n }));\n }\n\n /**\n * Generate a response (non-streaming).\n */\n async generate(options: { prompt: string; maxSteps?: number }) {\n // Create fresh state for this invocation\n const state: DeepAgentState = {\n todos: [],\n files: {},\n };\n\n const agent = this.createAgent(state, options.maxSteps);\n const result = await agent.generate({ prompt: options.prompt });\n\n // Return result with state attached\n // Note: We attach state as a property to preserve getters on result\n Object.defineProperty(result, 'state', {\n value: state,\n enumerable: true,\n writable: false,\n });\n\n return result as typeof result & { state: DeepAgentState };\n }\n\n /**\n * Stream a response.\n */\n async stream(options: { prompt: string; maxSteps?: number }) {\n // Create fresh state for this invocation\n const state: DeepAgentState = {\n todos: [],\n files: {},\n };\n\n const agent = this.createAgent(state, options.maxSteps);\n const result = await agent.stream({ prompt: options.prompt });\n\n // Return result with state attached\n // Note: We attach state as a property to preserve getters on result\n Object.defineProperty(result, 'state', {\n value: state,\n enumerable: true,\n writable: false,\n });\n\n return result as typeof result & { state: DeepAgentState };\n }\n\n /**\n * Generate with an existing state (for continuing conversations).\n */\n async generateWithState(options: {\n prompt: string;\n state: DeepAgentState;\n maxSteps?: number;\n }) {\n const agent = this.createAgent(options.state, options.maxSteps);\n const result = await agent.generate({ prompt: options.prompt });\n\n // Return result with state attached\n // Note: We attach state as a property to preserve getters on result\n Object.defineProperty(result, 'state', {\n value: options.state,\n enumerable: true,\n writable: false,\n });\n\n return result as typeof result & { state: DeepAgentState };\n }\n\n /**\n * Get the underlying ToolLoopAgent for advanced usage.\n * This allows using AI SDK's createAgentUIStream and other utilities.\n */\n getAgent(state?: DeepAgentState) {\n const agentState = state || { todos: [], files: {} };\n return this.createAgent(agentState);\n }\n\n /**\n * Stream a response with real-time events.\n * This is an async generator that yields DeepAgentEvent objects.\n * \n * Supports conversation history via the `messages` option for multi-turn conversations.\n * \n * @example\n * ```typescript\n * // Single turn\n * for await (const event of agent.streamWithEvents({ prompt: \"...\" })) {\n * switch (event.type) {\n * case 'text':\n * process.stdout.write(event.text);\n * break;\n * case 'done':\n * // event.messages contains the updated conversation history\n * console.log('Messages:', event.messages);\n * break;\n * }\n * }\n * \n * // Multi-turn conversation\n * let messages = [];\n * for await (const event of agent.streamWithEvents({ prompt: \"Hello\", messages })) {\n * if (event.type === 'done') {\n * messages = event.messages; // Save for next turn\n * }\n * }\n * for await (const event of agent.streamWithEvents({ prompt: \"Follow up\", messages })) {\n * // Agent now has context from previous turn\n * }\n * ```\n */\n\n /**\n * Compose user's onStepFinish callback with DeepAgent's internal checkpointing logic.\n * User callback executes first, errors are caught to prevent breaking checkpointing.\n */\n private composeOnStepFinish(userOnStepFinish?: ToolLoopAgentSettings['onStepFinish']) {\n return async (params: any) => {\n // Execute user callback first if provided\n if (userOnStepFinish) {\n try {\n await userOnStepFinish(params);\n } catch (error) {\n // Log error but don't let it break DeepAgent's internal logic\n console.error(\"[DeepAgent] User onStepFinish callback failed:\", error);\n }\n }\n\n // TODO: Add DeepAgent's internal checkpointing logic here\n // This will be implemented when we migrate from streamText to ToolLoopAgent\n };\n }\n\n /**\n * Compose user's onFinish callback with DeepAgent's internal cleanup logic.\n */\n private composeOnFinish(userOnFinish?: ToolLoopAgentSettings['onFinish']) {\n return async (params: any) => {\n // Execute user callback first if provided\n if (userOnFinish) {\n try {\n await userOnFinish(params);\n } catch (error) {\n console.error(\"[DeepAgent] User onFinish callback failed:\", error);\n }\n }\n\n // TODO: Add DeepAgent's internal cleanup logic here\n };\n }\n\n /**\n * Compose user's prepareStep callback with DeepAgent's internal step preparation.\n * Returns a function typed as `any` to avoid AI SDK's strict toolName inference.\n */\n private composePrepareStep(userPrepareStep?: PrepareStepFunction): any {\n return async (params: any) => {\n // Execute user callback first if provided\n if (userPrepareStep) {\n try {\n const result = await userPrepareStep(params);\n // Merge user's prepareStep result with DeepAgent's requirements\n return {\n ...result,\n // TODO: Add DeepAgent's internal step preparation here\n };\n } catch (error) {\n console.error(\"[DeepAgent] User prepareStep callback failed:\", error);\n return params; // Return original params on error\n }\n }\n\n return params;\n };\n }\n\n /**\n * Build streamText options with callbacks for step tracking and checkpointing.\n *\n * @private\n */\n private buildStreamTextOptions(\n inputMessages: ModelMessage[],\n tools: ToolSet,\n options: StreamWithEventsOptions,\n state: DeepAgentState,\n baseStep: number,\n pendingInterrupt: InterruptData | undefined,\n eventQueue: DeepAgentEvent[],\n stepNumberRef: { value: number }\n ): Parameters<typeof streamText>[0] {\n const { threadId } = options;\n\n const streamOptions: Parameters<typeof streamText>[0] = {\n model: this.model,\n messages: inputMessages,\n tools,\n stopWhen: this.buildStopConditions(options.maxSteps),\n abortSignal: options.abortSignal,\n onStepFinish: async ({ toolCalls, toolResults }) => {\n // Call user's onStepFinish first if provided\n if (this.loopControl?.onStepFinish) {\n const composedOnStepFinish = this.composeOnStepFinish(this.loopControl.onStepFinish);\n await composedOnStepFinish({ toolCalls, toolResults });\n }\n\n // Then execute DeepAgent's checkpointing logic\n stepNumberRef.value++;\n const cumulativeStep = baseStep + stepNumberRef.value;\n\n // Emit step finish event (relative step number)\n const stepEvent: DeepAgentEvent = {\n type: \"step-finish\",\n stepNumber: stepNumberRef.value,\n toolCalls: toolCalls.map((tc, i) => ({\n toolName: tc.toolName,\n args: \"input\" in tc ? tc.input : undefined,\n result: toolResults[i] ? (\"output\" in toolResults[i] ? toolResults[i].output : undefined) : undefined,\n })),\n };\n eventQueue.push(stepEvent);\n\n // Save checkpoint if configured\n if (threadId && this.checkpointer) {\n // Get current messages state - we need to track messages as they're built\n // For now, we'll save with the input messages (will be updated after assistant response)\n const checkpoint: Checkpoint = {\n threadId,\n step: cumulativeStep, // Cumulative step number\n messages: inputMessages, // Current messages before assistant response\n state: { ...state },\n interrupt: pendingInterrupt,\n createdAt: new Date().toISOString(),\n updatedAt: new Date().toISOString(),\n };\n await this.checkpointer.save(checkpoint);\n\n eventQueue.push(createCheckpointSavedEvent(threadId, cumulativeStep));\n }\n },\n };\n\n // Add generation options if provided\n if (this.generationOptions) {\n Object.assign(streamOptions, this.generationOptions);\n }\n\n // Add advanced options if provided\n if (this.advancedOptions) {\n Object.assign(streamOptions, this.advancedOptions);\n }\n\n // Add output configuration if provided using AI SDK Output helper\n if (this.outputConfig) {\n streamOptions.output = Output.object(this.outputConfig);\n }\n\n // Add composed loop control callbacks if provided\n if (this.loopControl) {\n if (this.loopControl.prepareStep) {\n streamOptions.prepareStep = this.composePrepareStep(this.loopControl.prepareStep);\n }\n if (this.loopControl.onFinish) {\n streamOptions.onFinish = this.composeOnFinish(this.loopControl.onFinish);\n }\n }\n\n // Add system prompt with optional caching for Anthropic models\n if (this.enablePromptCaching) {\n // Use messages format with cache control for Anthropic\n streamOptions.messages = [\n {\n role: \"system\",\n content: this.systemPrompt,\n providerOptions: {\n anthropic: { cacheControl: { type: \"ephemeral\" } },\n },\n } as ModelMessage,\n ...inputMessages,\n ];\n } else {\n // Use standard system prompt\n streamOptions.system = this.systemPrompt;\n }\n\n return streamOptions;\n }\n\n /**\n * Build message array from options, handling validation and priority logic.\n * Priority: explicit messages > prompt > checkpoint history.\n *\n * @private\n */\n private async buildMessageArray(\n options: StreamWithEventsOptions,\n patchedHistory: ModelMessage[]\n ): Promise<{\n messages: ModelMessage[];\n patchedHistory: ModelMessage[];\n error?: DeepAgentErrorEvent;\n shouldReturnEmpty?: boolean;\n }> {\n const { resume } = options;\n\n // Validation: require either prompt, messages, resume, or threadId\n if (!options.prompt && !options.messages && !resume && !options.threadId) {\n return {\n messages: [],\n patchedHistory,\n error: {\n type: \"error\",\n error: new Error(\"Either 'prompt', 'messages', 'resume', or 'threadId' is required\"),\n },\n };\n }\n\n // Build messages with priority: explicit messages > prompt > checkpoint\n let userMessages: ModelMessage[] = [];\n let shouldUseCheckpointHistory = true;\n\n if (options.messages && options.messages.length > 0) {\n // Use explicit messages array (preferred)\n userMessages = options.messages;\n shouldUseCheckpointHistory = false; // Explicit messages replace checkpoint history\n\n // Emit deprecation warning for prompt if also provided\n if (options.prompt && process.env.NODE_ENV !== 'production') {\n console.warn('prompt parameter is deprecated when messages are provided, using messages instead');\n }\n } else if (options.messages) {\n // Empty messages array provided - clear checkpoint history and treat as reset\n shouldUseCheckpointHistory = false;\n patchedHistory = []; // Clear checkpoint history\n\n // According to priority logic, even empty messages take precedence over prompt\n // This means prompt is ignored even if messages is empty\n if (options.prompt && process.env.NODE_ENV !== 'production') {\n console.warn('prompt parameter is deprecated when empty messages are provided, prompt ignored');\n }\n // Empty messages case will be handled by validation below\n } else if (options.prompt) {\n // Convert prompt to message for backward compatibility\n userMessages = [{ role: \"user\", content: options.prompt } as ModelMessage];\n\n if (process.env.NODE_ENV !== 'production') {\n console.warn('prompt parameter is deprecated, use messages instead');\n }\n }\n // If neither messages nor prompt provided, use checkpoint history only\n\n // Load checkpoint messages if available and not replaced by explicit messages\n if (shouldUseCheckpointHistory && patchedHistory.length > 0) {\n // Patch any dangling tool calls in the history first\n patchedHistory = patchToolCalls(patchedHistory);\n\n // Apply summarization if enabled and needed\n if (this.summarizationConfig?.enabled && patchedHistory.length > 0) {\n const summarizationResult = await summarizeIfNeeded(patchedHistory, {\n model: this.summarizationConfig.model || this.model,\n tokenThreshold: this.summarizationConfig.tokenThreshold,\n keepMessages: this.summarizationConfig.keepMessages,\n generationOptions: this.generationOptions,\n advancedOptions: this.advancedOptions,\n });\n patchedHistory = summarizationResult.messages;\n }\n } else if (!shouldUseCheckpointHistory) {\n // Explicit messages replace checkpoint history - clear patchedHistory\n patchedHistory = [];\n }\n\n // Handle empty messages case\n const hasEmptyMessages = options.messages && options.messages.length === 0;\n const hasValidInput = userMessages.length > 0 || patchedHistory.length > 0;\n\n // Special case: empty messages with no checkpoint history\n if (hasEmptyMessages && !hasValidInput && !resume) {\n // This is a \"no-op\" case - return done immediately with empty messages\n return {\n messages: [],\n patchedHistory,\n shouldReturnEmpty: true,\n };\n }\n\n // Check if we have valid input: either user messages or checkpoint history\n if (!hasValidInput && !resume) {\n return {\n messages: [],\n patchedHistory,\n error: {\n type: \"error\",\n error: new Error(\"No valid input: provide either non-empty messages, prompt, or threadId with existing checkpoint\"),\n },\n };\n }\n\n const inputMessages: ModelMessage[] = [\n ...patchedHistory,\n ...userMessages,\n ];\n\n return { messages: inputMessages, patchedHistory };\n }\n\n /**\n * Load checkpoint context if threadId is provided.\n * Handles checkpoint restoration and resume from interrupt.\n *\n * @private\n */\n private async loadCheckpointContext(\n options: StreamWithEventsOptions\n ): Promise<{\n state: DeepAgentState;\n patchedHistory: ModelMessage[];\n currentStep: number;\n pendingInterrupt: InterruptData | undefined;\n checkpointEvent?: CheckpointLoadedEvent;\n }> {\n const { threadId, resume } = options;\n let state: DeepAgentState = options.state || { todos: [], files: {} };\n let patchedHistory: ModelMessage[] = [];\n let currentStep = 0;\n let pendingInterrupt: InterruptData | undefined;\n let checkpointEvent: CheckpointLoadedEvent | undefined;\n\n if (threadId && this.checkpointer) {\n const checkpoint = await this.checkpointer.load(threadId);\n if (checkpoint) {\n state = checkpoint.state;\n patchedHistory = checkpoint.messages;\n currentStep = checkpoint.step;\n pendingInterrupt = checkpoint.interrupt;\n\n checkpointEvent = createCheckpointLoadedEvent(\n threadId,\n checkpoint.step,\n checkpoint.messages.length\n );\n }\n }\n\n // Handle resume from interrupt\n if (resume && pendingInterrupt) {\n const decision = resume.decisions[0];\n if (decision?.type === 'approve') {\n pendingInterrupt = undefined;\n } else {\n pendingInterrupt = undefined;\n }\n }\n\n return { state, patchedHistory, currentStep, pendingInterrupt, checkpointEvent };\n }\n\n async *streamWithEvents(\n options: StreamWithEventsOptions\n ): AsyncGenerator<DeepAgentEvent, void, unknown> {\n const { threadId, resume } = options;\n\n // Load checkpoint context (state, history, step tracking)\n const context = await this.loadCheckpointContext(options);\n const { state, currentStep, pendingInterrupt, checkpointEvent } = context;\n let patchedHistory = context.patchedHistory; // Mutable - may be reassigned during message building\n\n // Yield checkpoint-loaded event if checkpoint was restored\n if (checkpointEvent) {\n yield checkpointEvent;\n }\n\n // Build message array with validation and priority logic\n const messageResult = await this.buildMessageArray(options, patchedHistory);\n\n // Handle error cases\n if (messageResult.error) {\n yield messageResult.error;\n return;\n }\n\n // Handle empty messages no-op case\n if (messageResult.shouldReturnEmpty) {\n yield {\n type: \"done\",\n text: \"\",\n messages: [],\n state,\n };\n return;\n }\n\n // Extract results\n const inputMessages = messageResult.messages;\n patchedHistory = messageResult.patchedHistory;\n\n // Event queue for collecting events from tool executions\n const eventQueue: DeepAgentEvent[] = [];\n const stepNumberRef = { value: 0 }; // Mutable reference for stepNumber\n const baseStep = currentStep; // Cumulative step from checkpoint\n\n // Event callback that tools will use to emit events\n const onEvent: EventCallback = (event) => {\n eventQueue.push(event);\n };\n\n // Create tools with event callback\n let tools = this.createTools(state, onEvent);\n\n // Wrap tools with approval checking if interruptOn is configured and callback provided\n // This intercepts tool execution and requests approval before running\n const hasInterruptOn = !!this.interruptOn;\n const hasApprovalCallback = !!options.onApprovalRequest;\n\n if (hasInterruptOn && hasApprovalCallback) {\n tools = wrapToolsWithApproval(tools, this.interruptOn, options.onApprovalRequest);\n }\n\n try {\n // Build streamText options with callbacks\n const streamOptions = this.buildStreamTextOptions(\n inputMessages,\n tools,\n options,\n state,\n baseStep,\n pendingInterrupt,\n eventQueue,\n stepNumberRef\n );\n\n // Use streamText with messages array for conversation history\n const result = streamText(streamOptions);\n\n // Yield step start event\n yield { type: \"step-start\", stepNumber: 1 };\n\n // Stream all chunks (text, tool calls, etc.)\n for await (const chunk of result.fullStream) {\n // First, yield any queued events from tool executions\n while (eventQueue.length > 0) {\n const event = eventQueue.shift()!;\n yield event;\n\n // If a step finished, yield the next step start\n if (event.type === \"step-finish\") {\n yield { type: \"step-start\", stepNumber: event.stepNumber + 1 };\n }\n }\n\n // Handle different chunk types from fullStream\n if (chunk.type === \"text-delta\") {\n yield { type: \"text\", text: chunk.text };\n } else if (chunk.type === \"tool-call\") {\n // Emit tool-call event for UI\n // Note: chunk has input property (AI SDK v6), but we use args for our event type\n yield {\n type: \"tool-call\",\n toolName: chunk.toolName,\n toolCallId: chunk.toolCallId,\n args: chunk.input,\n } as DeepAgentEvent;\n } else if (chunk.type === \"tool-result\") {\n // Emit tool-result event for UI\n // Note: chunk has output property (AI SDK v6), but we use result for our event type\n yield {\n type: \"tool-result\",\n toolName: chunk.toolName,\n toolCallId: chunk.toolCallId,\n result: chunk.output,\n isError: false,\n } as DeepAgentEvent;\n } else if (chunk.type === \"tool-error\") {\n // Emit tool-result event with error flag for UI\n yield {\n type: \"tool-result\",\n toolName: chunk.toolName,\n toolCallId: chunk.toolCallId,\n result: chunk.error,\n isError: true,\n } as DeepAgentEvent;\n }\n }\n\n // Yield any remaining queued events\n while (eventQueue.length > 0) {\n yield eventQueue.shift()!;\n }\n\n // Get the final text\n const finalText = await result.text;\n\n // Build updated messages array with assistant response\n // Only include assistant message if there's actual content (avoid empty text blocks)\n const updatedMessages: ModelMessage[] = [\n ...inputMessages,\n ...(finalText ? [{ role: \"assistant\", content: finalText } as ModelMessage] : []),\n ];\n\n // Extract output if present (from ToolLoopAgent's native output parsing)\n const output = 'output' in result ? (result as { output: unknown }).output : undefined;\n\n // Yield done event with updated messages\n yield {\n type: \"done\",\n state,\n text: finalText,\n messages: updatedMessages,\n ...(output !== undefined ? { output } : {}),\n };\n \n // Save final checkpoint after done event\n if (threadId && this.checkpointer) {\n const finalCheckpoint: Checkpoint = {\n threadId,\n step: baseStep + stepNumberRef.value, // Cumulative step number\n messages: updatedMessages,\n state,\n createdAt: new Date().toISOString(),\n updatedAt: new Date().toISOString(),\n };\n await this.checkpointer.save(finalCheckpoint);\n\n // Emit checkpoint-saved event for final checkpoint\n yield createCheckpointSavedEvent(threadId, baseStep + stepNumberRef.value);\n }\n } catch (error) {\n // Yield error event\n yield {\n type: \"error\",\n error: error instanceof Error ? error : new Error(String(error)),\n };\n }\n }\n\n /**\n * Stream with a simple callback interface.\n * This is a convenience wrapper around streamWithEvents.\n */\n async streamWithCallback(\n options: StreamWithEventsOptions,\n onEvent: EventCallback\n ): Promise<{ state: DeepAgentState; text?: string; messages?: ModelMessage[] }> {\n let finalState: DeepAgentState = options.state || { todos: [], files: {} };\n let finalText: string | undefined;\n let finalMessages: ModelMessage[] | undefined;\n\n for await (const event of this.streamWithEvents(options)) {\n onEvent(event);\n\n if (event.type === \"done\") {\n finalState = event.state;\n finalText = event.text;\n finalMessages = event.messages;\n }\n }\n\n return { state: finalState, text: finalText, messages: finalMessages };\n }\n}\n\n/**\n * Create a Deep Agent with planning, filesystem, and subagent capabilities.\n *\n * @param params - Configuration object for the Deep Agent\n * @param params.model - **Required.** AI SDK LanguageModel instance (e.g., `anthropic('claude-sonnet-4-20250514')`, `openai('gpt-4o')`)\n * @param params.systemPrompt - Optional custom system prompt for the agent\n * @param params.tools - Optional custom tools to add to the agent (AI SDK ToolSet)\n * @param params.subagents - Optional array of specialized subagent configurations for task delegation\n * @param params.backend - Optional backend for filesystem operations (default: StateBackend for in-memory storage)\n * @param params.maxSteps - Optional maximum number of steps for the agent loop (default: 100)\n * @param params.includeGeneralPurposeAgent - Optional flag to include general-purpose subagent (default: true)\n * @param params.toolResultEvictionLimit - Optional token limit before evicting large tool results to filesystem (default: disabled)\n * @param params.enablePromptCaching - Optional flag to enable prompt caching for improved performance (Anthropic only, default: false)\n * @param params.summarization - Optional summarization configuration for automatic conversation summarization\n * @returns A configured DeepAgent instance\n *\n * @see {@link CreateDeepAgentParams} for detailed parameter types\n *\n * @example Basic usage\n * ```typescript\n * import { createDeepAgent } from 'deepagentsdk';\n * import { anthropic } from '@ai-sdk/anthropic';\n *\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * systemPrompt: 'You are a research assistant...',\n * });\n *\n * const result = await agent.generate({\n * prompt: 'Research the topic and write a report',\n * });\n * ```\n *\n * @example With custom tools\n * ```typescript\n * import { tool } from 'ai';\n * import { z } from 'zod';\n *\n * const customTool = tool({\n * description: 'Get current time',\n * inputSchema: z.object({}),\n * execute: async () => new Date().toISOString(),\n * });\n *\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * tools: { get_time: customTool },\n * });\n * ```\n *\n * @example With subagents\n * ```typescript\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * subagents: [{\n * name: 'research-agent',\n * description: 'Specialized for research tasks',\n * systemPrompt: 'You are a research specialist...',\n * }],\n * });\n * ```\n *\n * @example With StateBackend (default, explicit)\n * ```typescript\n * import { StateBackend } from 'deepagentsdk';\n *\n * const state = { todos: [], files: {} };\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend: new StateBackend(state), // Ephemeral in-memory storage\n * });\n * ```\n *\n * @example With FilesystemBackend\n * ```typescript\n * import { FilesystemBackend } from 'deepagentsdk';\n *\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend: new FilesystemBackend({ rootDir: './workspace' }), // Persist to disk\n * });\n * ```\n *\n * @example With PersistentBackend\n * ```typescript\n * import { PersistentBackend, InMemoryStore } from 'deepagentsdk';\n *\n * const store = new InMemoryStore();\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend: new PersistentBackend({ store, namespace: 'project-1' }), // Cross-session persistence\n * });\n * ```\n *\n * @example With CompositeBackend\n * ```typescript\n * import { CompositeBackend, FilesystemBackend, StateBackend } from 'deepagentsdk';\n *\n * const state = { todos: [], files: {} };\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend: new CompositeBackend(\n * new StateBackend(state),\n * { '/persistent/': new FilesystemBackend({ rootDir: './persistent' }) }\n * ), // Route files by path prefix\n * });\n * ```\n *\n * @example With middleware for logging and caching\n * ```typescript\n * import { createDeepAgent } from 'deepagentsdk';\n * import { anthropic } from '@ai-sdk/anthropic';\n *\n * const loggingMiddleware = {\n * wrapGenerate: async ({ doGenerate, params }) => {\n * console.log('Model called with:', params.prompt);\n * const result = await doGenerate();\n * console.log('Model returned:', result.text);\n * return result;\n * },\n * };\n *\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * middleware: [loggingMiddleware],\n * });\n * ```\n *\n * @example With middleware factory for context access\n * ```typescript\n * import { FilesystemBackend } from 'deepagentsdk';\n *\n * function createContextMiddleware(backend: BackendProtocol) {\n * return {\n * wrapGenerate: async ({ doGenerate }) => {\n * const state = await backend.read('state');\n * const result = await doGenerate();\n * await backend.write('state', { ...state, lastCall: result });\n * return result;\n * },\n * };\n * }\n *\n * const backend = new FilesystemBackend({ rootDir: './workspace' });\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * backend,\n * middleware: createContextMiddleware(backend),\n * });\n * ```\n *\n * @example With performance optimizations\n * ```typescript\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * enablePromptCaching: true,\n * toolResultEvictionLimit: 20000,\n * summarization: {\n * enabled: true,\n * tokenThreshold: 170000,\n * keepMessages: 6,\n * },\n * });\n * ```\n */\nexport function createDeepAgent(params: CreateDeepAgentParams): DeepAgent {\n return new DeepAgent(params);\n}\n\n// Re-export useful AI SDK v6 primitives\nexport { ToolLoopAgent, stepCountIs, hasToolCall } from \"ai\";\n","/**\n * BaseSandbox: Abstract base class for sandbox backends.\n *\n * Implements all BackendProtocol methods using shell commands executed via execute().\n * Subclasses only need to implement execute() and id.\n *\n * This pattern allows creating sandbox backends for different environments\n * (local, Modal, Runloop, Daytona, etc.) by only implementing the command\n * execution layer.\n */\n\nimport type {\n EditResult,\n ExecuteResponse,\n FileData,\n FileInfo,\n FileDownloadResponse,\n FileOperationError,\n FileUploadResponse,\n GrepMatch,\n SandboxBackendProtocol,\n WriteResult,\n} from \"../types\";\nimport {\n FILE_NOT_FOUND,\n SYSTEM_REMINDER_FILE_EMPTY,\n STRING_NOT_FOUND,\n} from \"../constants/errors\";\nimport { DEFAULT_READ_LIMIT } from \"../constants/limits\";\n\n/**\n * Map error messages to FileOperationError literals.\n *\n * This provides structured error handling that LLMs can understand and potentially fix.\n */\nfunction mapErrorToOperationError(\n errorMessage: string,\n path: string\n): FileOperationError {\n const lowerError = errorMessage.toLowerCase();\n\n if (lowerError.includes(\"no such file\") ||\n lowerError.includes(\"not found\") ||\n lowerError.includes(\"cannot find\")) {\n return \"file_not_found\";\n }\n\n if (lowerError.includes(\"permission denied\") ||\n lowerError.includes(\"access denied\") ||\n lowerError.includes(\"read-only\")) {\n return \"permission_denied\";\n }\n\n if (lowerError.includes(\"is a directory\") ||\n lowerError.includes(\"directory not empty\")) {\n return \"is_directory\";\n }\n\n // Default to invalid_path for other errors\n return \"invalid_path\";\n}\n\n/**\n * Encode string to base64 for safe shell transmission.\n */\nfunction toBase64(str: string): string {\n return Buffer.from(str, \"utf-8\").toString(\"base64\");\n}\n\n/**\n * Build a Node.js script command with embedded base64 arguments.\n * This avoids shell argument parsing issues by embedding values directly in the script.\n */\nfunction buildNodeScript(script: string, args: Record<string, string>): string {\n // Replace placeholders with actual values\n let result = script;\n for (const [key, value] of Object.entries(args)) {\n result = result.replace(new RegExp(`__${key}__`, \"g\"), value);\n }\n return `node -e '${result}'`;\n}\n\n/**\n * Abstract base class for sandbox backends.\n *\n * Implements all file operations using shell commands via execute().\n * Subclasses only need to implement execute() and id.\n *\n * @example Creating a custom sandbox backend\n * ```typescript\n * class MyCloudSandbox extends BaseSandbox {\n * readonly id = 'my-cloud-123';\n *\n * async execute(command: string): Promise<ExecuteResponse> {\n * // Call your cloud provider's API\n * const result = await myCloudApi.runCommand(command);\n * return {\n * output: result.stdout + result.stderr,\n * exitCode: result.exitCode,\n * truncated: false,\n * };\n * }\n * }\n * ```\n */\nexport abstract class BaseSandbox implements SandboxBackendProtocol {\n /**\n * Execute a shell command in the sandbox.\n * Must be implemented by subclasses.\n */\n abstract execute(command: string): Promise<ExecuteResponse>;\n\n /**\n * Unique identifier for this sandbox instance.\n * Must be implemented by subclasses.\n */\n abstract readonly id: string;\n\n /**\n * Upload multiple files to the sandbox.\n *\n * Default implementation uses base64 encoding via shell commands.\n * Subclasses can override if they have a native file upload API.\n *\n * This API is designed to allow partial success - individual uploads may fail\n * without affecting others. Check the error field in each response.\n */\n async uploadFiles(files: Array<[string, Uint8Array]>): Promise<FileUploadResponse[]> {\n const responses: FileUploadResponse[] = [];\n\n for (const [path, content] of files) {\n try {\n // Use shell command to write file via base64 encoding\n const base64Content = Buffer.from(content).toString(\"base64\");\n // Escape single quotes in path for shell safety\n const escapedPath = path.replace(/'/g, \"'\\\\''\");\n const result = await this.execute(`echo '${base64Content}' | base64 -d > '${escapedPath}'`);\n\n if (result.exitCode !== 0) {\n responses.push({\n path,\n error: mapErrorToOperationError(result.output, path),\n });\n } else {\n responses.push({ path, error: null });\n }\n } catch (error) {\n responses.push({\n path,\n error: \"permission_denied\",\n });\n }\n }\n\n return responses;\n }\n\n /**\n * Download multiple files from the sandbox.\n *\n * Default implementation uses base64 encoding via shell commands.\n * Subclasses can override if they have a native file download API.\n *\n * This API is designed to allow partial success - individual downloads may fail\n * without affecting others. Check the error field in each response.\n */\n async downloadFiles(paths: string[]): Promise<FileDownloadResponse[]> {\n const responses: FileDownloadResponse[] = [];\n\n for (const path of paths) {\n try {\n // Escape single quotes in path for shell safety\n const escapedPath = path.replace(/'/g, \"'\\\\''\");\n const result = await this.execute(`base64 '${escapedPath}'`);\n\n if (result.exitCode !== 0) {\n responses.push({\n path,\n content: null,\n error: mapErrorToOperationError(result.output, path),\n });\n } else {\n const base64Content = result.output.trim();\n const content = Buffer.from(base64Content, \"base64\");\n responses.push({ path, content, error: null });\n }\n } catch (error) {\n responses.push({\n path,\n content: null,\n error: \"permission_denied\",\n });\n }\n }\n\n return responses;\n }\n\n /**\n * List files and directories in a path.\n */\n async lsInfo(path: string): Promise<FileInfo[]> {\n const pathB64 = toBase64(path);\n const script = `\nconst fs = require(\"fs\");\nconst path = require(\"path\");\n\nconst dirPath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\n\ntry {\n const entries = fs.readdirSync(dirPath, { withFileTypes: true });\n for (const entry of entries) {\n const fullPath = path.join(dirPath, entry.name);\n try {\n const stat = fs.statSync(fullPath);\n console.log(JSON.stringify({\n path: entry.name,\n is_dir: entry.isDirectory(),\n size: stat.size,\n modified_at: stat.mtime.toISOString()\n }));\n } catch (e) {}\n }\n} catch (e) {}\n`;\n const result = await this.execute(buildNodeScript(script, { PATH: pathB64 }));\n\n const infos: FileInfo[] = [];\n for (const line of result.output.trim().split(\"\\n\")) {\n if (!line) continue;\n try {\n const data = JSON.parse(line);\n infos.push({\n path: data.path,\n is_dir: data.is_dir,\n size: data.size,\n modified_at: data.modified_at,\n });\n } catch {\n // Skip malformed lines\n }\n }\n return infos;\n }\n\n /**\n * Read file content with line numbers.\n */\n async read(\n filePath: string,\n offset: number = 0,\n limit: number = DEFAULT_READ_LIMIT\n ): Promise<string> {\n const pathB64 = toBase64(filePath);\n const emptyReminder = SYSTEM_REMINDER_FILE_EMPTY;\n const script = `\nconst fs = require(\"fs\");\nconst filePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\nconst offset = __OFFSET__;\nconst limit = __LIMIT__;\n\nif (!fs.existsSync(filePath)) {\n console.error(\"Error: File not found\");\n process.exit(1);\n}\n\nconst stat = fs.statSync(filePath);\nif (stat.size === 0) {\n console.log(\"${emptyReminder}\");\n process.exit(0);\n}\n\nconst content = fs.readFileSync(filePath, \"utf-8\");\nconst lines = content.split(\"\\\\n\");\nconst selected = lines.slice(offset, offset + limit);\n\nfor (let i = 0; i < selected.length; i++) {\n const lineNum = (offset + i + 1).toString().padStart(6, \" \");\n console.log(lineNum + \"\\\\t\" + selected[i]);\n}\n`;\n const result = await this.execute(\n buildNodeScript(script, {\n PATH: pathB64,\n OFFSET: String(offset),\n LIMIT: String(limit),\n })\n );\n\n if (result.exitCode !== 0) {\n if (result.output.includes(\"Error: File not found\")) {\n return FILE_NOT_FOUND(filePath);\n }\n return result.output.trim();\n }\n\n return result.output.trimEnd();\n }\n\n /**\n * Read raw file data.\n */\n async readRaw(filePath: string): Promise<FileData> {\n const pathB64 = toBase64(filePath);\n const script = `\nconst fs = require(\"fs\");\nconst filePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\n\nif (!fs.existsSync(filePath)) {\n console.error(\"Error: File not found\");\n process.exit(1);\n}\n\nconst stat = fs.statSync(filePath);\nconst content = fs.readFileSync(filePath, \"utf-8\");\n\nconsole.log(JSON.stringify({\n content: content.split(\"\\\\n\"),\n created_at: stat.birthtime.toISOString(),\n modified_at: stat.mtime.toISOString()\n}));\n`;\n const result = await this.execute(buildNodeScript(script, { PATH: pathB64 }));\n\n if (result.exitCode !== 0) {\n throw new Error(`File '${filePath}' not found`);\n }\n\n try {\n const data = JSON.parse(result.output.trim());\n return {\n content: data.content,\n created_at: data.created_at,\n modified_at: data.modified_at,\n };\n } catch {\n throw new Error(`Failed to parse file data for '${filePath}'`);\n }\n }\n\n /**\n * Write content to a new file.\n */\n async write(filePath: string, content: string): Promise<WriteResult> {\n const pathB64 = toBase64(filePath);\n const contentB64 = toBase64(content);\n const script = `\nconst fs = require(\"fs\");\nconst path = require(\"path\");\n\nconst filePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\nconst content = Buffer.from(\"__CONTENT__\", \"base64\").toString(\"utf-8\");\n\nif (fs.existsSync(filePath)) {\n console.error(\"Error: File already exists\");\n process.exit(1);\n}\n\nconst dir = path.dirname(filePath);\nif (dir && dir !== \".\") {\n fs.mkdirSync(dir, { recursive: true });\n}\n\nfs.writeFileSync(filePath, content, \"utf-8\");\n`;\n const result = await this.execute(\n buildNodeScript(script, { PATH: pathB64, CONTENT: contentB64 })\n );\n\n if (result.exitCode !== 0) {\n if (result.output.includes(\"already exists\")) {\n return {\n success: false,\n error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.`,\n };\n }\n return { success: false, error: result.output.trim() || `Failed to write '${filePath}'` };\n }\n\n return { success: true, path: filePath };\n }\n\n /**\n * Edit a file by replacing string occurrences.\n */\n async edit(\n filePath: string,\n oldString: string,\n newString: string,\n replaceAll: boolean = false\n ): Promise<EditResult> {\n const pathB64 = toBase64(filePath);\n const oldB64 = toBase64(oldString);\n const newB64 = toBase64(newString);\n const script = `\nconst fs = require(\"fs\");\n\nconst filePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\nconst oldStr = Buffer.from(\"__OLD__\", \"base64\").toString(\"utf-8\");\nconst newStr = Buffer.from(\"__NEW__\", \"base64\").toString(\"utf-8\");\nconst replaceAll = __REPLACE_ALL__;\n\nif (!fs.existsSync(filePath)) {\n console.error(\"Error: File not found\");\n process.exit(1);\n}\n\nlet content = fs.readFileSync(filePath, \"utf-8\");\nconst count = content.split(oldStr).length - 1;\n\nif (count === 0) {\n process.exit(2);\n}\nif (count > 1 && !replaceAll) {\n process.exit(3);\n}\n\nif (replaceAll) {\n content = content.split(oldStr).join(newStr);\n} else {\n content = content.replace(oldStr, newStr);\n}\n\nfs.writeFileSync(filePath, content, \"utf-8\");\nconsole.log(count);\n`;\n const result = await this.execute(\n buildNodeScript(script, {\n PATH: pathB64,\n OLD: oldB64,\n NEW: newB64,\n REPLACE_ALL: String(replaceAll),\n })\n );\n\n if (result.exitCode === 1) {\n return { success: false, error: FILE_NOT_FOUND(filePath) };\n }\n if (result.exitCode === 2) {\n return { success: false, error: STRING_NOT_FOUND(filePath, oldString) };\n }\n if (result.exitCode === 3) {\n return {\n success: false,\n error: `Error: String '${oldString}' appears multiple times. Use replaceAll=true to replace all occurrences.`,\n };\n }\n\n const count = parseInt(result.output.trim(), 10) || 1;\n return { success: true, path: filePath, occurrences: count };\n }\n\n /**\n * Search for pattern in files.\n */\n async grepRaw(\n pattern: string,\n path: string = \"/\",\n glob: string | null = null\n ): Promise<GrepMatch[] | string> {\n const patternB64 = toBase64(pattern);\n const pathB64 = toBase64(path);\n const globB64 = glob ? toBase64(glob) : toBase64(\"**/*\");\n const script = `\nconst fs = require(\"fs\");\nconst path = require(\"path\");\n\nconst pattern = Buffer.from(\"__PATTERN__\", \"base64\").toString(\"utf-8\");\nconst basePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\nconst fileGlob = Buffer.from(\"__GLOB__\", \"base64\").toString(\"utf-8\");\n\nfunction walkDir(dir, baseDir) {\n const results = [];\n try {\n const entries = fs.readdirSync(dir, { withFileTypes: true });\n for (const entry of entries) {\n const fullPath = path.join(dir, entry.name);\n const relativePath = path.relative(baseDir, fullPath);\n \n if (entry.isDirectory()) {\n results.push(...walkDir(fullPath, baseDir));\n } else {\n results.push(relativePath);\n }\n }\n } catch (e) {}\n return results;\n}\n\nfunction matchGlob(filepath, pattern) {\n if (!pattern || pattern === \"**/*\") return true;\n const regex = pattern\n .replace(/\\\\./g, \"\\\\\\\\.\")\n .replace(/\\\\*\\\\*/g, \"<<<GLOBSTAR>>>\")\n .replace(/\\\\*/g, \"[^/]*\")\n .replace(/<<<GLOBSTAR>>>/g, \".*\")\n .replace(/\\\\?/g, \".\");\n return new RegExp(\"^\" + regex + \"$\").test(filepath);\n}\n\nconst allFiles = walkDir(basePath, basePath);\nconst files = allFiles.filter(f => matchGlob(f, fileGlob)).sort();\n\nfor (const file of files) {\n try {\n const fullPath = path.join(basePath, file);\n const content = fs.readFileSync(fullPath, \"utf-8\");\n const lines = content.split(\"\\\\n\");\n \n for (let i = 0; i < lines.length; i++) {\n if (lines[i].includes(pattern)) {\n console.log(JSON.stringify({\n path: file,\n line: i + 1,\n text: lines[i]\n }));\n }\n }\n } catch (e) {}\n}\n`;\n const result = await this.execute(\n buildNodeScript(script, {\n PATTERN: patternB64,\n PATH: pathB64,\n GLOB: globB64,\n })\n );\n\n const matches: GrepMatch[] = [];\n for (const line of result.output.trim().split(\"\\n\")) {\n if (!line) continue;\n try {\n const data = JSON.parse(line);\n matches.push({\n path: data.path,\n line: data.line,\n text: data.text,\n });\n } catch {\n // Skip malformed lines\n }\n }\n return matches;\n }\n\n /**\n * Find files matching glob pattern.\n */\n async globInfo(pattern: string, path: string = \"/\"): Promise<FileInfo[]> {\n const pathB64 = toBase64(path);\n const patternB64 = toBase64(pattern);\n const script = `\nconst fs = require(\"fs\");\nconst path = require(\"path\");\n\nconst basePath = Buffer.from(\"__PATH__\", \"base64\").toString(\"utf-8\");\nconst pattern = Buffer.from(\"__PATTERN__\", \"base64\").toString(\"utf-8\");\n\nfunction walkDir(dir, baseDir) {\n const results = [];\n try {\n const entries = fs.readdirSync(dir, { withFileTypes: true });\n for (const entry of entries) {\n const fullPath = path.join(dir, entry.name);\n const relativePath = path.relative(baseDir, fullPath);\n \n if (entry.isDirectory()) {\n results.push(...walkDir(fullPath, baseDir));\n } else {\n results.push(relativePath);\n }\n }\n } catch (e) {}\n return results;\n}\n\nfunction matchGlob(filepath, pattern) {\n const regex = pattern\n .replace(/\\\\./g, \"\\\\\\\\.\")\n .replace(/\\\\*\\\\*/g, \"<<<GLOBSTAR>>>\")\n .replace(/\\\\*/g, \"[^/]*\")\n .replace(/<<<GLOBSTAR>>>/g, \".*\")\n .replace(/\\\\?/g, \".\");\n return new RegExp(\"^\" + regex + \"$\").test(filepath);\n}\n\nconst allFiles = walkDir(basePath, basePath);\nconst matches = allFiles.filter(f => matchGlob(f, pattern)).sort();\n\nfor (const m of matches) {\n try {\n const fullPath = path.join(basePath, m);\n const stat = fs.statSync(fullPath);\n console.log(JSON.stringify({\n path: m,\n is_dir: stat.isDirectory(),\n size: stat.size,\n modified_at: stat.mtime.toISOString()\n }));\n } catch (e) {}\n}\n`;\n const result = await this.execute(\n buildNodeScript(script, { PATH: pathB64, PATTERN: patternB64 })\n );\n\n const infos: FileInfo[] = [];\n for (const line of result.output.trim().split(\"\\n\")) {\n if (!line) continue;\n try {\n const data = JSON.parse(line);\n infos.push({\n path: data.path,\n is_dir: data.is_dir,\n size: data.size,\n modified_at: data.modified_at,\n });\n } catch {\n // Skip malformed lines\n }\n }\n return infos;\n }\n}\n","/**\n * LocalSandbox: Execute commands locally using child_process.\n *\n * Useful for local development and testing without cloud sandboxes.\n * All file operations are inherited from BaseSandbox and executed\n * via shell commands in the local filesystem.\n */\n\nimport { spawn } from \"child_process\";\nimport type { ExecuteResponse } from \"../types\";\nimport { BaseSandbox } from \"./sandbox\";\n\n/**\n * Options for LocalSandbox.\n */\nexport interface LocalSandboxOptions {\n /**\n * Working directory for command execution.\n * All file paths in sandbox operations are relative to this directory.\n * @default process.cwd()\n */\n cwd?: string;\n\n /**\n * Timeout in milliseconds for command execution.\n * Commands that exceed this timeout will be terminated.\n * @default 30000 (30 seconds)\n */\n timeout?: number;\n\n /**\n * Additional environment variables to set for command execution.\n * These are merged with the current process environment.\n */\n env?: Record<string, string>;\n\n /**\n * Maximum output size in bytes before truncation.\n * @default 1048576 (1MB)\n */\n maxOutputSize?: number;\n}\n\n/**\n * Local sandbox that executes commands using Node.js child_process.\n *\n * All commands are executed in a bash shell with the specified working directory.\n * Inherits all file operations (read, write, edit, ls, grep, glob) from BaseSandbox.\n *\n * @example Basic usage\n * ```typescript\n * import { LocalSandbox } from 'deepagentsdk';\n *\n * const sandbox = new LocalSandbox({ cwd: './workspace' });\n *\n * // Execute commands\n * const result = await sandbox.execute('ls -la');\n * console.log(result.output);\n *\n * // File operations\n * await sandbox.write('./src/index.ts', 'console.log(\"hello\")');\n * const content = await sandbox.read('./src/index.ts');\n * ```\n *\n * @example With timeout and environment\n * ```typescript\n * const sandbox = new LocalSandbox({\n * cwd: './workspace',\n * timeout: 60000, // 60 seconds\n * env: {\n * NODE_ENV: 'development',\n * DEBUG: '*',\n * },\n * });\n * ```\n *\n * @example Error handling\n * ```typescript\n * const result = await sandbox.execute('npm test');\n * if (result.exitCode !== 0) {\n * console.error('Tests failed:', result.output);\n * }\n * ```\n */\nexport class LocalSandbox extends BaseSandbox {\n private readonly cwd: string;\n private readonly timeout: number;\n private readonly env: Record<string, string>;\n private readonly maxOutputSize: number;\n private readonly _id: string;\n\n /**\n * Create a new LocalSandbox instance.\n *\n * @param options - Configuration options for the sandbox\n */\n constructor(options: LocalSandboxOptions = {}) {\n super();\n this.cwd = options.cwd || process.cwd();\n this.timeout = options.timeout || 30000;\n this.env = options.env || {};\n this.maxOutputSize = options.maxOutputSize || 1024 * 1024; // 1MB\n this._id = `local-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;\n }\n\n /**\n * Unique identifier for this sandbox instance.\n * Format: `local-{timestamp}-{random}`\n */\n get id(): string {\n return this._id;\n }\n\n /**\n * Execute a shell command in the local filesystem.\n *\n * Commands are executed using bash with the configured working directory\n * and environment variables. Output is captured from both stdout and stderr.\n *\n * @param command - Shell command to execute\n * @returns ExecuteResponse with output, exit code, and truncation status\n *\n * @example\n * ```typescript\n * const result = await sandbox.execute('echo \"Hello\" && ls -la');\n * console.log(result.output);\n * console.log('Exit code:', result.exitCode);\n * ```\n */\n async execute(command: string): Promise<ExecuteResponse> {\n return new Promise((resolve) => {\n const child = spawn(\"bash\", [\"-c\", command], {\n cwd: this.cwd,\n env: { ...process.env, ...this.env },\n timeout: this.timeout,\n });\n\n let output = \"\";\n let truncated = false;\n\n child.stdout.on(\"data\", (data: Buffer) => {\n if (output.length < this.maxOutputSize) {\n output += data.toString();\n } else {\n truncated = true;\n }\n });\n\n child.stderr.on(\"data\", (data: Buffer) => {\n if (output.length < this.maxOutputSize) {\n output += data.toString();\n } else {\n truncated = true;\n }\n });\n\n child.on(\"close\", (code) => {\n resolve({\n output,\n exitCode: code,\n truncated,\n });\n });\n\n child.on(\"error\", (err) => {\n resolve({\n output: `Error: ${err.message}`,\n exitCode: 1,\n truncated: false,\n });\n });\n });\n }\n}\n\n","/**\n * Utility to parse model strings into LanguageModel instances.\n * Provides backward compatibility for CLI and other string-based model specifications.\n */\n\nimport { anthropic } from \"@ai-sdk/anthropic\";\nimport { openai } from \"@ai-sdk/openai\";\nimport type { LanguageModel } from \"ai\";\n\n/**\n * Parse a model string into a LanguageModel instance.\n *\n * Supports formats like:\n * - \"anthropic/claude-sonnet-4-20250514\"\n * - \"openai/gpt-4o\"\n * - \"claude-sonnet-4-20250514\" (defaults to Anthropic)\n *\n * @param modelString - The model string to parse\n * @returns A LanguageModel instance\n *\n * @example\n * ```typescript\n * const model = parseModelString(\"anthropic/claude-sonnet-4-20250514\");\n * const agent = createDeepAgent({ model });\n * ```\n */\nexport function parseModelString(modelString: string): LanguageModel {\n const [provider, modelName] = modelString.split(\"/\");\n\n if (provider === \"anthropic\") {\n return anthropic(modelName || \"claude-sonnet-4-20250514\");\n } else if (provider === \"openai\") {\n return openai(modelName || \"gpt-5-mini\") as any;\n }\n\n // Default to anthropic if no provider specified\n return anthropic(modelString);\n}\n","/**\n * File-based checkpoint saver for local development.\n */\n\nimport { existsSync, mkdirSync, readFileSync, writeFileSync, unlinkSync, readdirSync } from \"node:fs\";\nimport { join } from \"node:path\";\nimport type { Checkpoint, BaseCheckpointSaver } from \"./types\";\n\n/**\n * Options for FileSaver.\n */\nexport interface FileSaverOptions {\n /** Directory to store checkpoint files */\n dir: string;\n}\n\n/**\n * File-based checkpoint saver.\n * \n * Stores checkpoints as JSON files in a directory. Each thread gets\n * its own file named `{threadId}.json`.\n * \n * @example\n * ```typescript\n * const saver = new FileSaver({ dir: './.checkpoints' });\n * const agent = createDeepAgent({\n * model: anthropic('claude-sonnet-4-20250514'),\n * checkpointer: saver,\n * });\n * ```\n */\nexport class FileSaver implements BaseCheckpointSaver {\n private dir: string;\n\n constructor(options: FileSaverOptions) {\n this.dir = options.dir;\n \n // Ensure directory exists\n if (!existsSync(this.dir)) {\n mkdirSync(this.dir, { recursive: true });\n }\n }\n\n private getFilePath(threadId: string): string {\n // Sanitize threadId to be safe for filenames\n const safeId = threadId.replace(/[^a-zA-Z0-9_-]/g, '_');\n return join(this.dir, `${safeId}.json`);\n }\n\n async save(checkpoint: Checkpoint): Promise<void> {\n const filePath = this.getFilePath(checkpoint.threadId);\n const data = {\n ...checkpoint,\n updatedAt: new Date().toISOString(),\n };\n writeFileSync(filePath, JSON.stringify(data, null, 2), 'utf-8');\n }\n\n async load(threadId: string): Promise<Checkpoint | undefined> {\n const filePath = this.getFilePath(threadId);\n \n if (!existsSync(filePath)) {\n return undefined;\n }\n \n try {\n const content = readFileSync(filePath, 'utf-8');\n return JSON.parse(content) as Checkpoint;\n } catch {\n return undefined;\n }\n }\n\n async list(): Promise<string[]> {\n if (!existsSync(this.dir)) {\n return [];\n }\n \n const files = readdirSync(this.dir);\n return files\n .filter(f => f.endsWith('.json'))\n .map(f => f.replace('.json', ''));\n }\n\n async delete(threadId: string): Promise<void> {\n const filePath = this.getFilePath(threadId);\n \n if (existsSync(filePath)) {\n unlinkSync(filePath);\n }\n }\n\n async exists(threadId: string): Promise<boolean> {\n const filePath = this.getFilePath(threadId);\n return existsSync(filePath);\n }\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAwBaA,iCAA+B;CAY/BC,oCAAkC;CAYlC,iBAAiB;CAejBC,0BAAwB;CAUxB,oBAAoB;CAYpB,6BAA6B;CAyB7B,qBAAqB;CAUrB,kBAAkB;CAWlB,mBAAmB;CA0BnB,0BAA0B;CAU1B,qBAAqB,0BAA0B;CA2B/C,oBAAoB;;;;;;;;AC/DjC,SAAgB,wBAAwB,OAAsD;AAC5F,QAAO;EAAE,MAAM;EAAiB;EAAO;;;;;AAUzC,SAAgB,0BACd,MACA,SACqB;AACrB,QAAO;EAAE,MAAM;EAAoB;EAAM;EAAS;;;;;AAMpD,SAAgB,uBACd,MACA,SACkB;AAClB,QAAO;EAAE,MAAM;EAAgB;EAAM;EAAS;;;;;AAMhD,SAAgB,sBACd,MACA,aACiB;AACjB,QAAO;EAAE,MAAM;EAAe;EAAM;EAAa;;;;;AAMnD,SAAgB,oBAAoB,MAAc,OAA8B;AAC9E,QAAO;EAAE,MAAM;EAAa;EAAM;EAAO;;;;;AA6D3C,SAAgB,0BAA0B,OAAoC;AAC5E,QAAO;EAAE,MAAM;EAAoB;EAAO;;;;;AAM5C,SAAgB,2BACd,OACA,aACsB;AACtB,QAAO;EAAE,MAAM;EAAqB;EAAO;EAAa;;;;;AAM1D,SAAgB,4BACd,KACA,QACuB;AACvB,QAAO;EAAE,MAAM;EAAsB;EAAK;EAAQ;;;;;AAMpD,SAAgB,6BACd,KACA,YACwB;AACxB,QAAO;EAAE,MAAM;EAAuB;EAAK;EAAY;;;;;AAMzD,SAAgB,yBAAyB,KAAiC;AACxE,QAAO;EAAE,MAAM;EAAmB;EAAK;;;;;AAMzC,SAAgB,0BACd,KACA,SACqB;AACrB,QAAO;EAAE,MAAM;EAAoB;EAAK;EAAS;;;;;AAUnD,SAAgB,yBACd,MACA,MACoB;AACpB,QAAO;EAAE,MAAM;EAAkB;EAAM;EAAM;;;;;AAM/C,SAAgB,0BACd,MACA,QACqB;AACrB,QAAO;EAAE,MAAM;EAAmB;EAAM;EAAQ;;;;;AAMlD,SAAgB,wBACd,WACA,WACmB;AACnB,QAAO;EAAE,MAAM;EAAiB;EAAW;EAAW;;;;;AAoCxD,SAAgB,2BACd,UACA,MACsB;AACtB,QAAO;EAAE,MAAM;EAAoB;EAAU;EAAM;;;;;AAMrD,SAAgB,4BACd,UACA,MACA,eACuB;AACvB,QAAO;EAAE,MAAM;EAAqB;EAAU;EAAM;EAAe;;;;;;;;;AC/ErE,SAAgB,iBACd,SACmC;AACnC,QACE,OAAQ,QAAmC,YAAY,cACvD,OAAQ,QAAmC,gBAAgB,cAC3D,OAAQ,QAAmC,kBAAkB,cAC7D,OAAQ,QAAmC,OAAO;;;;;;;;ACnStD,MAAa,cAAc;AAE3B,MAAa,qBAAqB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmClC,MAAa,2BAA2B;;;;;;;;;;AAWxC,MAAa,qBAAqB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+BlC,SAAgB,uBAAuB,sBAAwC;AAC7E,QAAO;;;;EAIP,qBAAqB,KAAK,KAAK,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;IAyC9B,MAAM;;AAGV,MAAa,sCACX;AAEF,MAAa,0BACX;AAEF,MAAa,wBAAwB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgCrC,SAAgB,kBAAkB,QAA4E;AAC5G,KAAI,OAAO,WAAW,EACpB,QAAO;AAOT,QAAO;;;;;;EAJY,OAChB,KAAI,UAAS,OAAO,MAAM,KAAK,MAAM,MAAM,YAAY,eAAe,MAAM,KAAK,0BAA0B,CAC3G,KAAK,KAAK,CAQF;;;;;;;;;;;;;;;;;aClL6C;AAE1D,MAAM,iBAAiB,EAAE,OAAO;CAC9B,IAAI,EAAE,QAAQ,CAAC,SAAS,sCAAsC;CAC9D,SAAS,EACN,QAAQ,CACR,IAAI,IAAI,CACR,SAAS,2DAA2D;CACvE,QAAQ,EACL,KAAK;EAAC;EAAW;EAAe;EAAa;EAAY,CAAC,CAC1D,SAAS,sCAAsC;CACnD,CAAC;;;;;;AAOF,SAAgB,gBAAgB,OAAuB,SAAyB;AAC9E,QAAO,KAAK;EACV,aAAa;;;;;;;;;;EAUb,aAAa,EAAE,OAAO;GACpB,OAAO,EACJ,MAAM,eAAe,CACrB,IAAI,EAAE,CACN,SAAS,+BAA+B;GAC3C,OAAO,EACJ,SAAS,CACT,QAAQ,KAAK,CACb,SACC,qEACD;GACJ,CAAC;EACF,SAAS,OAAO,EAAE,OAAO,YAAY;AACnC,OAAI,OAAO;IAET,MAAM,8BAAc,IAAI,KAAuB;AAC/C,SAAK,MAAM,QAAQ,MAAM,MACvB,aAAY,IAAI,KAAK,IAAI,KAAK;AAGhC,SAAK,MAAM,WAAW,OAAO;KAC3B,MAAM,WAAW,YAAY,IAAI,QAAQ,GAAG;AAC5C,SAAI,SAEF,aAAY,IAAI,QAAQ,IAAI;MAC1B,GAAG;MACH,GAAG;MACJ,CAAC;SAGF,aAAY,IAAI,QAAQ,IAAI,QAAQ;;AAIxC,UAAM,QAAQ,MAAM,KAAK,YAAY,QAAQ,CAAC;SAG9C,OAAM,QAAQ;AAIhB,OAAI,QACF,SAAQ,wBAAwB,CAAC,GAAG,MAAM,MAAM,CAAC,CAAC;AAQpD,UAAO,sDAJU,MAAM,MACpB,KAAK,MAAM,MAAM,EAAE,OAAO,IAAI,EAAE,GAAG,IAAI,EAAE,UAAU,CACnD,KAAK,KAAK;;EAIhB,CAAC;;;;;;AAWJ,MAAa,cAAc;;;;;;CC/Fd,kBAAkB,SAC7B,gBAAgB,KAAK;CAEV,uBAAuB,SAClC,mBAAmB,KAAK;CAEb,oBAAoB,MAAc,WAC7C,qCAAqC,KAAK,OAAO;CAEtC,iBAAiB,YAC5B,0BAA0B;CAEf,oBAAoB,YAC/B,qBAAqB;CAEV,mBAAmB,YAC9B,2BAA2B,QAAQ;CAExB,6BACX;;;;;;;;aCjB8E;aAKnD;AAG7B,MAAa,wBAAwB;;;;AAUrC,SAAgB,6BACd,SACA,YAAoB,GACZ;CACR,IAAI;AACJ,KAAI,OAAO,YAAY,UAAU;AAC/B,UAAQ,QAAQ,MAAM,KAAK;AAC3B,MAAI,MAAM,SAAS,KAAK,MAAM,MAAM,SAAS,OAAO,GAClD,SAAQ,MAAM,MAAM,GAAG,GAAG;OAG5B,SAAQ;CAGV,MAAM,cAAwB,EAAE;AAChC,MAAK,IAAI,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;EACrC,MAAM,OAAO,MAAM;EACnB,MAAM,UAAU,IAAI;AAEpB,MAAI,QAAQ,KAAK,UAAU,gBACzB,aAAY,KACV,GAAG,QAAQ,UAAU,CAAC,SAAS,kBAAkB,CAAC,IAAI,OACvD;WACQ,MAAM;GAEf,MAAM,YAAY,KAAK,KAAK,KAAK,SAAS,gBAAgB;AAC1D,QAAK,IAAI,WAAW,GAAG,WAAW,WAAW,YAAY;IACvD,MAAM,QAAQ,WAAW;IACzB,MAAM,MAAM,KAAK,IAAI,QAAQ,iBAAiB,KAAK,OAAO;IAC1D,MAAM,QAAQ,KAAK,UAAU,OAAO,IAAI;AACxC,QAAI,aAAa,EACf,aAAY,KACV,GAAG,QAAQ,UAAU,CAAC,SAAS,kBAAkB,CAAC,IAAI,QACvD;SACI;KACL,MAAM,qBAAqB,GAAG,QAAQ,GAAG;AACzC,iBAAY,KACV,GAAG,mBAAmB,SAAS,kBAAkB,CAAC,IAAI,QACvD;;;QAIL,aAAY,KACV,GAAG,QAAQ,UAAU,CAAC,SAAS,kBAAkB,CAAC,IACnD;;AAIL,QAAO,YAAY,KAAK,KAAK;;;;;AAM/B,SAAgB,kBAAkB,SAAgC;AAChE,KAAI,CAAC,WAAW,QAAQ,MAAM,KAAK,GACjC,QAAO;AAET,QAAO;;;;;AAMT,SAAgB,iBAAiB,UAA4B;AAC3D,QAAO,SAAS,QAAQ,KAAK,KAAK;;;;;AAMpC,SAAgB,eAAe,SAAiB,WAA8B;CAC5E,MAAM,QAAQ,OAAO,YAAY,WAAW,QAAQ,MAAM,KAAK,GAAG;CAClE,MAAM,uBAAM,IAAI,MAAM,EAAC,aAAa;AAEpC,QAAO;EACL,SAAS;EACT,YAAY,aAAa;EACzB,aAAa;EACd;;;;;AAMH,SAAgB,eAAe,UAAoB,SAA2B;CAC5E,MAAM,QAAQ,OAAO,YAAY,WAAW,QAAQ,MAAM,KAAK,GAAG;CAClE,MAAM,uBAAM,IAAI,MAAM,EAAC,aAAa;AAEpC,QAAO;EACL,SAAS;EACT,YAAY,SAAS;EACrB,aAAa;EACd;;;;;AAMH,SAAgB,mBACd,UACA,QACA,OACQ;CACR,MAAM,UAAU,iBAAiB,SAAS;CAC1C,MAAM,WAAW,kBAAkB,QAAQ;AAC3C,KAAI,SACF,QAAO;CAGT,MAAM,QAAQ,QAAQ,MAAM,KAAK;CACjC,MAAM,WAAW;CACjB,MAAM,SAAS,KAAK,IAAI,WAAW,OAAO,MAAM,OAAO;AAEvD,KAAI,YAAY,MAAM,OACpB,QAAO,sBAAsB,OAAO,wBAAwB,MAAM,OAAO;AAI3E,QAAO,6BADe,MAAM,MAAM,UAAU,OAAO,EACA,WAAW,EAAE;;;;;AAMlE,SAAgB,yBACd,SACA,WACA,WACA,YAC2B;CAC3B,MAAM,cAAc,QAAQ,MAAM,UAAU,CAAC,SAAS;AAEtD,KAAI,gBAAgB,EAClB,QAAO,qCAAqC,UAAU;AAGxD,KAAI,cAAc,KAAK,CAAC,WACtB,QAAO,kBAAkB,UAAU,YAAY,YAAY;AAI7D,QAAO,CADY,QAAQ,MAAM,UAAU,CAAC,KAAK,UAAU,EACvC,YAAY;;;;;AAMlC,SAAgB,aAAa,MAAyC;CACpE,MAAM,UAAU,QAAQ;AACxB,KAAI,CAAC,WAAW,QAAQ,MAAM,KAAK,GACjC,OAAM,IAAI,MAAM,uBAAuB;CAGzC,IAAI,aAAa,QAAQ,WAAW,IAAI,GAAG,UAAU,MAAM;AAE3D,KAAI,CAAC,WAAW,SAAS,IAAI,CAC3B,eAAc;AAGhB,QAAO;;;;;AAMT,SAAgB,gBACd,OACA,SACA,OAAe,KACP;CACR,IAAI;AACJ,KAAI;AACF,mBAAiB,aAAa,KAAK;SAC7B;AACN,SAAO;;CAGT,MAAM,WAAW,OAAO,YACtB,OAAO,QAAQ,MAAM,CAAC,QAAQ,CAAC,QAAQ,GAAG,WAAW,eAAe,CAAC,CACtE;CAED,MAAM,UAAmC,EAAE;AAC3C,MAAK,MAAM,CAAC,UAAU,aAAa,OAAO,QAAQ,SAAS,EAAE;EAC3D,IAAI,WAAW,SAAS,UAAU,eAAe,OAAO;AACxD,MAAI,SAAS,WAAW,IAAI,CAC1B,YAAW,SAAS,UAAU,EAAE;AAElC,MAAI,CAAC,UAAU;GACb,MAAM,QAAQ,SAAS,MAAM,IAAI;AACjC,cAAW,MAAM,MAAM,SAAS,MAAM;;AAGxC,MACE,WAAW,QAAQ,UAAU,SAAS;GACpC,KAAK;GACL,SAAS;GACV,CAAC,CAEF,SAAQ,KAAK,CAAC,UAAU,SAAS,YAAY,CAAC;;AAIlD,SAAQ,MAAM,GAAG,MAAM,EAAE,GAAG,cAAc,EAAE,GAAG,CAAC;AAEhD,KAAI,QAAQ,WAAW,EACrB,QAAO;AAGT,QAAO,QAAQ,KAAK,CAAC,QAAQ,GAAG,CAAC,KAAK,KAAK;;;;;AAM7C,SAAgB,qBACd,OACA,SACA,OAAsB,MACtB,SAAsB,MACA;CACtB,IAAI;AACJ,KAAI;AACF,UAAQ,IAAI,OAAO,QAAQ;UACpB,GAAY;AAEnB,SAAO,cADO,EACa,QAAQ;;CAGrC,IAAI;AACJ,KAAI;AACF,mBAAiB,aAAa,KAAK;SAC7B;AACN,SAAO,EAAE;;CAGX,IAAI,WAAW,OAAO,YACpB,OAAO,QAAQ,MAAM,CAAC,QAAQ,CAAC,QAAQ,GAAG,WAAW,eAAe,CAAC,CACtE;AAED,KAAIC,OACF,YAAW,OAAO,YAChB,OAAO,QAAQ,SAAS,CAAC,QAAQ,CAAC,QAChC,WAAW,QAAQ,SAAS,GAAG,EAAEA,QAAM;EAAE,KAAK;EAAM,SAAS;EAAO,CAAC,CACtE,CACF;CAGH,MAAM,UAAuB,EAAE;AAC/B,MAAK,MAAM,CAAC,UAAU,aAAa,OAAO,QAAQ,SAAS,CACzD,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,QAAQ,KAAK;EAChD,MAAM,OAAO,SAAS,QAAQ;EAC9B,MAAM,UAAU,IAAI;AACpB,MAAI,QAAQ,MAAM,KAAK,KAAK,CAC1B,SAAQ,KAAK;GAAE,MAAM;GAAU,MAAM;GAAS,MAAM;GAAM,CAAC;;AAKjE,QAAO;;;;;aCnQoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8B7B,IAAa,eAAb,MAAqD;CACnD,AAAQ;;;;;;;CAQR,YAAY,OAAuB;AACjC,OAAK,QAAQ;;;;;CAMf,AAAQ,WAAqC;AAC3C,SAAO,KAAK,MAAM,SAAS,EAAE;;;;;CAM/B,OAAO,MAA0B;EAC/B,MAAM,QAAQ,KAAK,UAAU;EAC7B,MAAM,QAAoB,EAAE;EAC5B,MAAM,0BAAU,IAAI,KAAa;EAEjC,MAAM,iBAAiB,KAAK,SAAS,IAAI,GAAG,OAAO,OAAO;AAE1D,OAAK,MAAM,CAAC,GAAG,OAAO,OAAO,QAAQ,MAAM,EAAE;AAC3C,OAAI,CAAC,EAAE,WAAW,eAAe,CAC/B;GAGF,MAAM,WAAW,EAAE,UAAU,eAAe,OAAO;AAEnD,OAAI,SAAS,SAAS,IAAI,EAAE;IAC1B,MAAM,aAAa,SAAS,MAAM,IAAI,CAAC;AACvC,YAAQ,IAAI,iBAAiB,aAAa,IAAI;AAC9C;;GAGF,MAAM,OAAO,GAAG,QAAQ,KAAK,KAAK,CAAC;AACnC,SAAM,KAAK;IACT,MAAM;IACN,QAAQ;IACF;IACN,aAAa,GAAG;IACjB,CAAC;;AAGJ,OAAK,MAAM,UAAU,MAAM,KAAK,QAAQ,CAAC,MAAM,CAC7C,OAAM,KAAK;GACT,MAAM;GACN,QAAQ;GACR,MAAM;GACN,aAAa;GACd,CAAC;AAGJ,QAAM,MAAM,GAAG,MAAM,EAAE,KAAK,cAAc,EAAE,KAAK,CAAC;AAClD,SAAO;;;;;CAMT,KAAK,UAAkB,SAAiB,GAAG,QAAgB,KAAc;EAEvE,MAAM,WADQ,KAAK,UAAU,CACN;AAEvB,MAAI,CAAC,SACH,QAAO,eAAe,SAAS;AAGjC,SAAO,mBAAmB,UAAU,QAAQ,MAAM;;;;;CAMpD,QAAQ,UAA4B;EAElC,MAAM,WADQ,KAAK,UAAU,CACN;AAEvB,MAAI,CAAC,SAAU,OAAM,IAAI,MAAM,SAAS,SAAS,aAAa;AAC9D,SAAO;;;;;CAMT,MAAM,UAAkB,SAA8B;EACpD,MAAM,QAAQ,KAAK,UAAU;AAG7B,MAAI,CAAC,YAAY,SAAS,MAAM,KAAK,GACnC,QAAO;GACL,SAAS;GACT,OAAO;GACR;AAGH,MAAI,YAAY,MACd,QAAO;GACL,SAAS;GACT,OAAO,oBAAoB,SAAS;GACrC;EAGH,MAAM,cAAc,eAAe,QAAQ;AAC3C,OAAK,MAAM,MAAM,YAAY;AAC7B,SAAO;GAAE,SAAS;GAAM,MAAM;GAAU;;;;;CAM1C,KACE,UACA,WACA,WACA,aAAsB,OACV;EAEZ,MAAM,WADQ,KAAK,UAAU,CACN;AAEvB,MAAI,CAAC,SACH,QAAO;GAAE,SAAS;GAAO,OAAO,eAAe,SAAS;GAAE;EAI5D,MAAM,SAAS,yBADC,iBAAiB,SAAS,EAGxC,WACA,WACA,WACD;AAED,MAAI,OAAO,WAAW,SACpB,QAAO;GAAE,SAAS;GAAO,OAAO;GAAQ;EAG1C,MAAM,CAAC,YAAY,eAAe;EAClC,MAAM,cAAc,eAAe,UAAU,WAAW;AACxD,OAAK,MAAM,MAAM,YAAY;AAC7B,SAAO;GAAE,SAAS;GAAM,MAAM;GAAU;GAAa;;;;;CAMvD,QACE,SACA,OAAe,KACf,SAAsB,MACA;AAEtB,SAAO,qBADO,KAAK,UAAU,EACM,SAAS,MAAMC,OAAK;;;;;CAMzD,SAAS,SAAiB,OAAe,KAAiB;EACxD,MAAM,QAAQ,KAAK,UAAU;EAC7B,MAAM,SAAS,gBAAgB,OAAO,SAAS,KAAK;AAEpD,MAAI,WAAW,iBACb,QAAO,EAAE;EAGX,MAAM,QAAQ,OAAO,MAAM,KAAK;EAChC,MAAM,QAAoB,EAAE;AAC5B,OAAK,MAAM,KAAK,OAAO;GACrB,MAAM,KAAK,MAAM;GACjB,MAAM,OAAO,KAAK,GAAG,QAAQ,KAAK,KAAK,CAAC,SAAS;AACjD,SAAM,KAAK;IACT,MAAM;IACN,QAAQ;IACF;IACN,aAAa,IAAI,eAAe;IACjC,CAAC;;AAEJ,SAAO;;;;;;;;;;ACtNX,SAAgB,mBAAmB,YAA4B;AAC7D,QAAO,WACJ,QAAQ,mBAAmB,IAAI,CAC/B,UAAU,GAAG,IAAI;;;;;;AAOtB,SAAgB,eAAe,MAAsB;AACnD,QAAO,KAAK,KAAK,KAAK,SAAS,gBAAgB;;;;;AAMjD,SAAgB,YACd,QACA,aAAqB,8BACZ;AACT,QAAO,eAAe,OAAO,GAAG;;;;;;;;;;;;;;;;;;;;;;;;;AAsDlC,eAAsB,gBACpB,SACsB;CACtB,MAAM,EACJ,QACA,YACA,UACA,SACA,aAAa,iCACX;AAGJ,KAAI,CAAC,YAAY,QAAQ,WAAW,CAClC,QAAO;EACL,SAAS;EACT,SAAS;EACV;CAKH,MAAM,YAAY,uBAAuB,SAAS,GAD9B,mBAAmB,WAAW,CACe;CAGjE,MAAM,cAAc,MAAM,QAAQ,MAAM,WAAW,OAAO;AAE1D,KAAI,YAAY,OAAO;AAErB,UAAQ,KAAK,gCAAgC,YAAY,QAAQ;AACjE,SAAO;GACL,SAAS;GACT,SAAS;GACV;;AAOH,QAAO;EACL,SAAS;EACT,SAJuB,2BADD,eAAe,OAAO,CACsB,6BAA6B,UAAU;EAKzG,aAAa;EACd;;;;;;;;;;AAWH,SAAgB,wBACd,SACA,OACA,aAAqB,8BACsD;CAE3E,MAAM,kBACJ,OAAO,YAAY,aAAa,QAAQ,MAAM,GAAG;AAEnD,QAAO,OACL,QACA,YACA,aACoB;AASpB,UARoB,MAAM,gBAAgB;GACxC;GACA;GACA;GACA,SAAS;GACT;GACD,CAAC,EAEiB;;;;;cAxK0E;CAMpF,+BAA+BC;CAKtC,kBAAkB;;;;;;;;eCJG;aAMF;AAGzB,MAAM,sBAAsB;AAC5B,MAAM,6BAA6B;AACnC,MAAM,8BACJ;AACF,MAAM,6BACJ;AACF,MAAM,wBACJ;AACF,MAAM,wBACJ;;;;AAKF,SAASC,aACP,SACA,OACiB;AACjB,KAAI,OAAO,YAAY,WACrB,QAAO,QAAQ,MAAM;AAEvB,QAAO;;;;;AAMT,SAAgB,aACd,OACA,SACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO,EACpB,MAAM,EACH,QAAQ,CACR,QAAQ,IAAI,CACZ,SAAS,sDAAsD,EACnE,CAAC;EACF,SAAS,OAAO,EAAE,WAAW;GAE3B,MAAM,QAAQ,MADUA,aAAW,SAAS,MAAM,CACd,OAAO,QAAQ,IAAI;AAGvD,OAAI,QACF,SAAQ;IACN,MAAM;IACN,MAAM,QAAQ;IACd,OAAO,MAAM;IACd,CAAC;AAGJ,OAAI,MAAM,WAAW,EACnB,QAAO,qBAAqB;GAG9B,MAAM,QAAkB,EAAE;AAC1B,QAAK,MAAM,QAAQ,MACjB,KAAI,KAAK,OACP,OAAM,KAAK,GAAG,KAAK,KAAK,cAAc;QACjC;IACL,MAAM,OAAO,KAAK,OAAO,KAAK,KAAK,KAAK,WAAW;AACnD,UAAM,KAAK,GAAG,KAAK,OAAO,OAAO;;AAGrC,UAAO,MAAM,KAAK,KAAK;;EAE1B,CAAC;;;;;AAMJ,SAAgB,mBACd,OACA,SACA,eACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,WAAW,EAAE,QAAQ,CAAC,SAAS,gEAAgE;GAC/F,QAAQ,EACL,QAAQ,CACR,QAAQ,EAAE,CACV,SAAS,gDAAgD;GAC5D,OAAO,EACJ,QAAQ,CACR,QAAQ,IAAK,CACb,SAAS,kCAAkC;GAC/C,CAAC;EACF,SAAS,OAAO,EAAE,WAAW,QAAQ,SAAS,EAAE,iBAAiB;GAC/D,MAAM,kBAAkBA,aAAW,SAAS,MAAM;GAClD,MAAM,UAAU,MAAM,gBAAgB,KAAK,WAAW,UAAU,GAAG,SAAS,IAAK;AAGjF,OAAI,SAAS;IACX,MAAM,YAAY,QAAQ,MAAM,KAAK,CAAC;AACtC,YAAQ,oBAAoB,WAAW,UAAU,CAAC;;AAIpD,OAAI,iBAAiB,gBAAgB,EAQnC,SAPoB,MAAM,gBAAgB;IACxC,QAAQ;IACR,YAAY,cAAc,QAAQ,KAAK,KAAK;IAC5C,UAAU;IACV,SAAS;IACT,YAAY;IACb,CAAC,EACiB;AAGrB,UAAO;;EAEV,CAAC;;;;;AAMJ,SAAgB,oBACd,OACA,SACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,WAAW,EAAE,QAAQ,CAAC,SAAS,iEAAiE;GAChG,SAAS,EAAE,QAAQ,CAAC,SAAS,+BAA+B;GAC7D,CAAC;EACF,SAAS,OAAO,EAAE,WAAW,cAAc;AAEzC,OAAI,QACF,SAAQ,0BAA0B,WAAW,QAAQ,CAAC;GAIxD,MAAM,SAAS,MADSA,aAAW,SAAS,MAAM,CACb,MAAM,WAAW,QAAQ;AAE9D,OAAI,OAAO,MACT,QAAO,OAAO;AAIhB,OAAI,QACF,SAAQ,uBAAuB,WAAW,QAAQ,CAAC;AAGrD,UAAO,0BAA0B,UAAU;;EAE9C,CAAC;;;;;AAMJ,SAAgB,mBACd,OACA,SACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,WAAW,EAAE,QAAQ,CAAC,SAAS,gEAAgE;GAC/F,YAAY,EACT,QAAQ,CACR,SAAS,6CAA6C;GACzD,YAAY,EAAE,QAAQ,CAAC,SAAS,yBAAyB;GACzD,aAAa,EACV,SAAS,CACT,QAAQ,MAAM,CACd,SAAS,qCAAqC;GAClD,CAAC;EACF,SAAS,OAAO,EAAE,WAAW,YAAY,YAAY,kBAAkB;GAErE,MAAM,SAAS,MADSA,aAAW,SAAS,MAAM,CACb,KACnC,WACA,YACA,YACA,eAAe,MAChB;AAED,OAAI,OAAO,MACT,QAAO,OAAO;AAIhB,OAAI,QACF,SAAQ,sBAAsB,WAAW,OAAO,eAAe,EAAE,CAAC;AAGpE,UAAO,yBAAyB,OAAO,YAAY,qBAAqB,UAAU;;EAErF,CAAC;;;;;AAMJ,SAAgB,eACd,OACA,SACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,SAAS,EAAE,QAAQ,CAAC,SAAS,yCAAyC;GACtE,MAAM,EACH,QAAQ,CACR,QAAQ,IAAI,CACZ,SAAS,wDAAwD;GACrE,CAAC;EACF,SAAS,OAAO,EAAE,SAAS,WAAW;GAEpC,MAAM,QAAQ,MADUA,aAAW,SAAS,MAAM,CACd,SAAS,SAAS,QAAQ,IAAI;AAGlE,OAAI,QACF,SAAQ;IACN,MAAM;IACN;IACA,OAAO,MAAM;IACd,CAAC;AAGJ,OAAI,MAAM,WAAW,EACnB,QAAO,oCAAoC,QAAQ;AAGrD,UAAO,MAAM,KAAK,SAAS,KAAK,KAAK,CAAC,KAAK,KAAK;;EAEnD,CAAC;;;;;AAMJ,SAAgB,eACd,OACA,SACA,eACA,SACA;AACA,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,SAAS,EAAE,QAAQ,CAAC,SAAS,8BAA8B;GAC3D,MAAM,EACH,QAAQ,CACR,QAAQ,IAAI,CACZ,SAAS,wDAAwD;GACpE,MAAM,EACH,QAAQ,CACR,UAAU,CACV,UAAU,CACV,SAAS,uDAAuD;GACpE,CAAC;EACF,SAAS,OAAO,EAAE,SAAS,MAAM,gBAAQ,EAAE,iBAAiB;GAC1D,MAAM,kBAAkBA,aAAW,SAAS,MAAM;GAClD,MAAM,SAAS,MAAM,gBAAgB,QACnC,SACA,QAAQ,KACRC,UAAQ,KACT;AAED,OAAI,OAAO,WAAW,UAAU;AAE9B,QAAI,QACF,SAAQ;KACN,MAAM;KACN;KACA,OAAO;KACR,CAAC;AAEJ,WAAO;;AAIT,OAAI,QACF,SAAQ;IACN,MAAM;IACN;IACA,OAAO,OAAO;IACf,CAAC;AAGJ,OAAI,OAAO,WAAW,EACpB,QAAO,iCAAiC,QAAQ;GAIlD,MAAM,QAAkB,EAAE;GAC1B,IAAI,cAA6B;AACjC,QAAK,MAAM,SAAS,QAAQ;AAC1B,QAAI,MAAM,SAAS,aAAa;AAC9B,mBAAc,MAAM;AACpB,WAAM,KAAK,KAAK,YAAY,GAAG;;AAEjC,UAAM,KAAK,KAAK,MAAM,KAAK,IAAI,MAAM,OAAO;;GAG9C,MAAM,UAAU,MAAM,KAAK,KAAK;AAGhC,OAAI,iBAAiB,gBAAgB,EAQnC,SAPoB,MAAM,gBAAgB;IACxC,QAAQ;IACR,YAAY,cAAc,QAAQ,KAAK,KAAK;IAC5C,UAAU;IACV,SAAS;IACT,YAAY;IACb,CAAC,EACiB;AAGrB,UAAO;;EAEV,CAAC;;;;;;;;AAqBJ,SAAgB,sBACd,OACA,kBACA,SACA;CAEA,IAAI;CACJ,IAAI,gBAA2C;CAC/C,IAAI;AAEJ,KAAI,oBAAoB,OAAO,qBAAqB,YAAY,aAAa,kBAAkB;EAE7F,MAAM,UAAU;AAChB,YAAU,QAAQ;AAClB,kBAAgB,QAAQ;AACxB,kBAAgB,QAAQ;OAGxB,WAAU;CAIZ,MAAM,kBACJ,aAAa,MAAsB,IAAI,aAAa,EAAE;AAExD,QAAO;EACL,IAAI,aAAa,OAAO,iBAAiB,cAAc;EACvD,WAAW,mBAAmB,OAAO,iBAAiB,eAAe,cAAc;EACnF,YAAY,oBAAoB,OAAO,iBAAiB,cAAc;EACtE,WAAW,mBAAmB,OAAO,iBAAiB,cAAc;EACpE,MAAM,eAAe,OAAO,iBAAiB,cAAc;EAC3D,MAAM,eAAe,OAAO,iBAAiB,eAAe,cAAc;EAC3E;;;;;;AAWH,MAAa,KAAK;AAClB,MAAa,YAAY;AACzB,MAAa,aAAa;AAC1B,MAAa,YAAY;AACzB,MAAa,OAAO;AACpB,MAAa,OAAO;;;;;;;;;;AC7YpB,eAAe,mBACb,QACA,MACkB;AAClB,KAAI,OAAO,WAAW,UACpB,QAAO;AAGT,KAAI,OAAO,cACT,QAAO,OAAO,cAAc,KAAK;AAGnC,QAAO;;;;;AAMT,SAAS,sBACP,QAC2D;AAC3D,KAAI,OAAO,WAAW,UACpB,QAAO;AAGT,KAAI,OAAO,cACT,QAAO,OAAO;AAGhB,QAAO;;AAGT,IAAI,kBAAkB;AACtB,SAAS,qBAA6B;AACpC,QAAO,YAAY,KAAK,KAAK,CAAC,GAAG,EAAE;;;;;;;;;;;;;;;;;;;AAoBrC,SAAgB,qBACd,OACA,aACS;AACT,KAAI,CAAC,YACH,QAAO;CAGT,MAAM,SAAkB,EAAE;AAE1B,MAAK,MAAM,CAAC,MAAMC,WAAS,OAAO,QAAQ,MAAM,EAAE;EAChD,MAAM,SAAS,YAAY;AAE3B,MAAI,WAAW,UAAa,WAAW,MAErC,QAAO,QAAQA;MAGf,QAAO,QAAQ;GACb,GAAGA;GACH,eAAe,sBAAsB,OAAO;GAC7C;;AAIL,QAAO;;;;;;;;;;;;;;;AAgBT,SAAgB,sBACd,OACA,aACA,mBACS;AACT,KAAI,CAAC,YACH,QAAO;CAGT,MAAM,SAAkB,EAAE;AAE1B,MAAK,MAAM,CAAC,MAAM,iBAAiB,OAAO,QAAQ,MAAM,EAAE;EACxD,MAAM,SAAS,YAAY;AAE3B,MAAI,WAAW,UAAa,WAAW,MAErC,QAAO,QAAQ;OACV;GAEL,MAAM,kBAAkB,aAAa;AACrC,OAAI,CAAC,iBAAiB;AAEpB,WAAO,QAAQ;AACf;;AAKF,UAAO,QAAQ,KAAK;IAClB,aAAa,aAAa;IAC1B,aAAa,aAAa;IAC1B,SAAS,OAAO,MAAM,YAAY;AAIhC,SAFsB,MAAM,mBAAmB,QAAQ,KAAK,EAEzC;AAEjB,UAAI,CAAC,kBACH,QAAO,6DAA6D,KAAK;MAI3E,MAAM,aAAa,oBAAoB;AAWvC,UAAI,CAPa,MAAM,kBAAkB;OACvC;OACA,YALiB,SAAS,cAAc;OAMxC,UAAU;OACV;OACD,CAAC,CAIA,QAAO,sCAAsC,KAAK;;AAKtD,YAAO,gBAAgB,MAAM,QAAQ;;IAExC,CAAC;;;AAIN,QAAO;;;;;;;;;;;;;;;;;;;;;;AC9IT,SAAS,WACP,SACA,OACwB;AACxB,KAAI,CAAC,QAAS,QAAO;AACrB,KAAI,OAAO,YAAY,WACrB,QAAO,QAAQ,MAAM;AAEvB,QAAO;;;;;;AAWT,SAAS,eAAe,MAAc,KAAqB;AACzD,KAAI;EAEF,MAAM,MAAM,IAAI,MAAM,MAAM,EAAE,KAAK,CAAC;EAIpC,MAAM,UADS,IAAI,YAAY,IAAI,OAAO,SAAS,CAC5B,OAAO;AAE9B,MAAI,CAAC,QAGH,SADoB,IAAI,OAAO,SAAS,MAAM,eAAe,IAC1C,MAAM;EAS3B,MAAM,WALW,IAAI,gBAAgB;GACnC,cAAc;GACd,gBAAgB;GACjB,CAAC,CAEwB,SAAS,QAAQ,WAAW,GAAG;AAGzD,MAAI,QAAQ,MACV,QAAO,KAAK,QAAQ,MAAM,MAAM;AAGlC,SAAO;UACA,OAAO;AAEd,SAAO,sCAAsC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;;;;;;AAwBvG,SAAgB,oBACd,OACA,SAMA;CACA,MAAM,EAAE,SAAS,SAAS,yBAAyB,iBAAiB;AAEpE,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,OAAO,EAAE,QAAQ,CAAC,SAChB,+DACD;GACD,aAAa,EACV,QAAQ,CACR,QAAQ,EAAE,CACV,SAAS,qCAAqC;GACjD,OAAO,EACJ,KAAK;IAAC;IAAW;IAAQ;IAAU,CAAC,CACpC,QAAQ,UAAU,CAClB,SAAS,wBAAwB;GACpC,qBAAqB,EAClB,SAAS,CACT,QAAQ,MAAM,CACd,SAAS,wDAAwD;GACrE,CAAC;EACF,SAAS,OAAO,EAAE,OAAO,aAAa,OAAO,uBAAuB,EAAE,iBAAiB;AAErF,OAAI,QACF,SAAQ,0BAA0B,MAAM,CAAC;AAG3C,OAAI;IAYF,MAAM,WAPW,MAHJ,OAAO,EAAE,QAAQ,cAAc,CAAC,CAGjB,OAAO,OAAO;KACxC,YAAY;KACZ;KACA,mBAAmB,sBAAsB,SAAS;KACnD,CAAC,EAGuB,WAAW,EAAE;IACtC,MAAM,mBAAmB,QACtB,KACE,GAAQ,MACP,aAAa,IAAI,EAAE,IAAI,EAAE,MAAM,SACvB,EAAE,IAAI,WACJ,EAAE,OAAO,QAAQ,EAAE,IAAI,MAAM,aAC3B,EAAE,QAAQ,IACzB,CACA,KAAK,YAAY;IAEpB,MAAM,SAAS,SAAS,QAAQ,OAAO,uBAAuB,MAAM,OAAO;AAG3E,QAAI,QACF,SAAQ,2BAA2B,OAAO,QAAQ,OAAO,CAAC;AAI5D,QAAI,2BAA2B,0BAA0B,KAAK,SAAS;KACrE,MAAM,kBAAkB,WAAW,SAAS,MAAM;AAClD,SAAI,gBAQF,SAPoB,MAAM,gBAAgB;MACxC,QAAQ;MACR,YAAY,cAAc,cAAc,KAAK,KAAK;MAClD,UAAU;MACV,SAAS;MACT,YAAY;MACb,CAAC,EACiB;;AAIvB,WAAO;YACA,OAAgB;IAEvB,MAAM,eAAe,iBADT,MAC8B,QAAQ;AAGlD,QAAI,QACF,SAAQ,2BAA2B,OAAO,EAAE,CAAC;AAG/C,WAAO;;;EAGZ,CAAC;;;;;AAeJ,SAAgB,sBACd,OACA,SAMA;CACA,MAAM,EAAE,SAAS,SAAS,yBAAyB,mBAAmB;AAEtE,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,KAAK,EAAE,QAAQ,CAAC,KAAK,CAAC,SAAS,4CAA4C;GAC3E,QAAQ,EACL,KAAK;IAAC;IAAO;IAAQ;IAAO;IAAU;IAAQ,CAAC,CAC/C,QAAQ,MAAM,CACd,SAAS,cAAc;GAC1B,SAAS,EACN,OAAO,EAAE,QAAQ,CAAC,CAClB,UAAU,CACV,SAAS,kCAAkC;GAC9C,MAAM,EACH,MAAM,CAAC,EAAE,QAAQ,EAAE,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC,CACtC,UAAU,CACV,SAAS,uCAAuC;GACnD,QAAQ,EACL,OAAO,EAAE,QAAQ,CAAC,CAClB,UAAU,CACV,SAAS,0CAA0C;GACtD,SAAS,EACN,QAAQ,CACR,QAAQ,eAAe,CACvB,SAAS,6BAA6B;GAC1C,CAAC;EACF,SAAS,OAAO,EAAE,KAAK,QAAQ,SAAS,MAAM,QAAQ,WAAW,EAAE,iBAAiB;AAElF,OAAI,QACF,SAAQ,4BAA4B,KAAK,OAAO,CAAC;AAGnD,OAAI;IAEF,MAAM,SAAS,IAAI,IAAI,IAAI;AAC3B,QAAI,OACF,QAAO,QAAQ,OAAO,CAAC,SAAS,CAAC,KAAK,WAAW;AAC/C,YAAO,aAAa,OAAO,KAAK,MAAM;MACtC;IAIJ,MAAM,iBAA8B;KAClC;KACA,SAAS,WAAW,EAAE;KACtB,QAAQ,YAAY,QAAQ,UAAU,IAAK;KAC5C;AAGD,QAAI,KACF,KAAI,OAAO,SAAS,SAClB,gBAAe,OAAO;SACjB;AACL,oBAAe,OAAO,KAAK,UAAU,KAAK;AAC1C,KAAC,eAAe,QAAmC,kBACjD;;IAKN,MAAM,WAAW,MAAM,MAAM,OAAO,UAAU,EAAE,eAAe;IAG/D,MAAM,cAAc,SAAS,QAAQ,IAAI,eAAe,IAAI;IAC5D,IAAI;AAEJ,QAAI,YAAY,SAAS,mBAAmB,CAC1C,KAAI;AACF,eAAU,MAAM,SAAS,MAAM;YACzB;AACN,eAAU,MAAM,SAAS,MAAM;;QAGjC,WAAU,MAAM,SAAS,MAAM;IAIjC,MAAM,kBACJ,QAAQ,OAAO,GAAG,IAAI,YACX,SAAS,OAAO,aACf,SAAS,GAAG,cACX,OAAO,YAAY,WAAW,UAAU,KAAK,UAAU,SAAS,MAAM,EAAE;AAGvF,QAAI,QACF,SAAQ,6BAA6B,SAAS,KAAK,SAAS,OAAO,CAAC;AAItE,QAAI,2BAA2B,0BAA0B,KAAK,SAAS;KACrE,MAAM,kBAAkB,WAAW,SAAS,MAAM;AAClD,SAAI,gBAQF,SAPoB,MAAM,gBAAgB;MACxC,QAAQ;MACR,YAAY,cAAc,gBAAgB,KAAK,KAAK;MACpD,UAAU;MACV,SAAS;MACT,YAAY;MACb,CAAC,EACiB;;AAIvB,WAAO;YACA,OAAgB;IACvB,MAAM,MAAM;IACZ,IAAI;AAEJ,QAAI,IAAI,SAAS,kBAAkB,IAAI,SAAS,aAC9C,gBAAe,gBAAgB,QAAQ;QAEvC,gBAAe,uBAAuB,IAAI;AAI5C,QAAI,QACF,SAAQ,6BAA6B,KAAK,EAAE,CAAC;AAG/C,WAAO;;;EAGZ,CAAC;;;;;AAoBJ,SAAgB,mBACd,OACA,SAMA;CACA,MAAM,EAAE,SAAS,SAAS,yBAAyB,mBAAmB;AAEtE,QAAO,KAAK;EACV,aAAa;EACb,aAAa,EAAE,OAAO;GACpB,KAAK,EAAE,QAAQ,CAAC,KAAK,CAAC,SAAS,kDAAkD;GACjF,SAAS,EACN,QAAQ,CACR,QAAQ,eAAe,CACvB,SAAS,6BAA6B;GACzC,iBAAiB,EACd,SAAS,CACT,QAAQ,KAAK,CACb,SACC,iFACD;GACJ,CAAC;EACF,SAAS,OAAO,EAAE,KAAK,SAAS,mBAAmB,EAAE,iBAAiB;AAEpE,OAAI,QACF,SAAQ,yBAAyB,IAAI,CAAC;AAGxC,OAAI;IAEF,MAAM,WAAW,MAAM,MAAM,KAAK;KAChC,QAAQ,YAAY,QAAQ,UAAU,IAAK;KAC3C,SAAS,EACP,cAAc,4CACf;KACF,CAAC;AAEF,QAAI,CAAC,SAAS,IAAI;KAChB,MAAM,WAAW,eAAe,SAAS,OAAO,GAAG,SAAS;AAE5D,SAAI,QACF,SAAQ,0BAA0B,SAAS,KAAK,MAAM,CAAC;AAGzD,YAAO;;IAGT,MAAM,OAAO,MAAM,SAAS,MAAM;IAGlC,MAAM,MAAM,IAAI,MAAM,MAAM,EAAE,KAAK,CAAC;IAEpC,IAAI,mBAAmB;AAGvB,QAAI,gBACF,KAAI;KAEF,MAAM,UADS,IAAI,YAAY,IAAI,OAAO,SAAS,CAC5B,OAAO;AAE9B,SAAI,WAAW,QAAQ,QACrB,oBAAmB,QAAQ;aAEtB,kBAAkB;AAEzB,aAAQ,KAAK,iDAAiD;;IAUlE,MAAM,WALkB,IAAI,gBAAgB;KAC1C,cAAc;KACd,gBAAgB;KACjB,CAAC,CAE+B,SAAS,iBAAiB;AAG3D,QAAI,QACF,SAAQ,0BAA0B,SAAS,KAAK,KAAK,CAAC;AAIxD,QAAI,2BAA2B,0BAA0B,KAAK,SAAS;KACrE,MAAM,kBAAkB,WAAW,SAAS,MAAM;AAClD,SAAI,gBASF,SARoB,MAAM,gBAAgB;MACxC,QAAQ;MACR,YAAY,cAAc,aAAa,KAAK,KAAK;MACjD,UAAU;MACV,SAAS;MACT,YAAY;MACb,CAAC,EAEiB;;AAIvB,WAAO;YACA,OAAgB;IACvB,MAAM,MAAM;IACZ,IAAI;AAEJ,QAAI,IAAI,SAAS,kBAAkB,IAAI,SAAS,aAC9C,gBAAe,gBAAgB,QAAQ;QAEvC,gBAAe,uBAAuB,IAAI;AAI5C,QAAI,QACF,SAAQ,0BAA0B,KAAK,MAAM,CAAC;AAGhD,WAAO;;;EAGZ,CAAC;;;;;;AA2BJ,SAAgB,eACd,OACA,SACqB;CACrB,MAAM,EACJ,SACA,SACA,yBACA,eAAe,QAAQ,IAAI,gBAC3B,iBAAiB,4BACf,WAAW,EAAE;AAGjB,KAAI,CAAC,cAAc;AACjB,UAAQ,KACN,gKAED;AACD,SAAO,EAAE;;AAGX,QAAO;EACL,YAAY,oBAAoB,OAAO;GAAE;GAAS;GAAS;GAAyB;GAAc,CAAC;EACnG,cAAc,sBAAsB,OAAO;GAAE;GAAS;GAAS;GAAyB;GAAgB,CAAC;EACzG,WAAW,mBAAmB,OAAO;GAAE;GAAS;GAAS;GAAyB;GAAgB,CAAC;EACpG;;;;gBAlhBiD;cAIvB;cACiC;cAQrC;CAsEnB,8BAA8B;;;;;;;;;CAiH9B,gCAAgC;;;;;CAmJhC,6BAA6B;;;;;;;;;;CA4MtB,aAAa;CACb,eAAe;CACf,YAAY;;;;;;;;;;;;;ACziBzB,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiEjC,SAAgB,kBAAkB,SAAmC;CACnE,MAAM,EAAE,SAAS,SAAS,gBAAgB;AAE1C,QAAO,KAAK;EACV,aAAa,eAAe;EAC5B,aAAa,EAAE,OAAO,EACpB,SAAS,EACN,QAAQ,CACR,SAAS,+EAA+E,EAC5F,CAAC;EACF,SAAS,OAAO,EAAE,cAAc;AAE9B,OAAI,QACF,SAAQ;IACN,MAAM;IACN;IACA,WAAW,QAAQ;IACpB,CAAC;GAIJ,MAAM,SAAS,MAAM,QAAQ,QAAQ,QAAQ;AAG7C,OAAI,QACF,SAAQ;IACN,MAAM;IACN;IACA,UAAU,OAAO;IACjB,WAAW,OAAO;IAClB,WAAW,QAAQ;IACpB,CAAC;GAIJ,MAAM,QAAkB,EAAE;AAE1B,OAAI,OAAO,OACT,OAAM,KAAK,OAAO,OAAO;AAI3B,OAAI,OAAO,aAAa,EACtB,OAAM,KAAK,6BAA6B;YAC/B,OAAO,aAAa,KAC7B,OAAM,KAAK,iBAAiB,OAAO,SAAS,aAAa;OAEzD,OAAM,KAAK,8CAA8C;AAI3D,OAAI,OAAO,UACT,OAAM,KAAK,uCAAuC;AAGpD,UAAO,MAAM,KAAK,GAAG;;EAExB,CAAC;;;;;;;;;;;;;;;;;AAkBJ,SAAgB,6BAA6B,SAAiC;AAC5E,QAAO,kBAAkB,EAAE,SAAS,CAAC;;;;;;AAWvC,MAAa,UAAU;;;;;;;aChJM;aAaJ;UAOV;;;;AAkBf,SAAS,qBAAqB,OAAyC;AACrE,QAAO,OAAO,UAAU,eACtB,UAAU,uBACV,UAAU,yBACV,UAAU,sBACV,UAAU,gBACV,UAAU,sBACV,UAAU,uBACV,UAAU,sBACV,UAAU,kBACV,UAAU,kBACV,UAAU,mBACV,UAAU;;;;;AAOd,SAAS,uBACP,SACA,OACA,SAKS;CACT,MAAM,EAAE,SAAS,SAAS,4BAA4B;CAGtD,MAAM,eAAe,QAAQ,IAAI,kBAAkB;CACnD,MAAM,iBAAiB;AAEvB,KAAI,YAAY,qBAAqB;AACnC,MAAI,CAAC,cAAc;AACjB,WAAQ,KAAK,uDAAuD;AACpE,UAAO,EAAE;;AAEX,SAAO,EACL,YAAY,oBAAoB,OAAO;GAAE;GAAS;GAAS;GAAyB;GAAc,CAAC,EACpG;;AAEH,KAAI,YAAY,sBACd,QAAO,EACL,cAAc,sBAAsB,OAAO;EAAE;EAAS;EAAS;EAAyB;EAAgB,CAAC,EAC1G;AAEH,KAAI,YAAY,mBACd,QAAO,EACL,WAAW,mBAAmB,OAAO;EAAE;EAAS;EAAS;EAAyB;EAAgB,CAAC,EACpG;AAIH,KAAI,YAAY,aACd,QAAO,EACL,IAAI,aAAa,OAAO,SAAU,QAAQ,EAC3C;AAEH,KAAI,YAAY,mBACd,QAAO,EACL,WAAW,mBAAmB,OAAO,SAAU,yBAAyB,QAAQ,EACjF;AAEH,KAAI,YAAY,oBACd,QAAO,EACL,YAAY,oBAAoB,OAAO,SAAU,QAAQ,EAC1D;AAEH,KAAI,YAAY,mBACd,QAAO,EACL,WAAW,mBAAmB,OAAO,SAAU,QAAQ,EACxD;AAEH,KAAI,YAAY,eACd,QAAO,EACL,MAAM,eAAe,OAAO,SAAU,QAAQ,EAC/C;AAEH,KAAI,YAAY,eACd,QAAO,EACL,MAAM,eAAe,OAAO,SAAU,yBAAyB,QAAQ,EACxE;AAIH,KAAI,YAAY,gBACd,QAAO,EACL,aAAa,gBAAgB,OAAO,QAAQ,EAC7C;AAEH,KAAI,YAAY,kBAEd,OAAM,IAAI,MAAM,yFAAyF;AAG3G,OAAM,IAAI,MAAM,iCAAiC,UAAU;;;;;AAM7D,SAAS,qBACP,YACA,OACA,SAKS;AACT,KAAI,CAAC,WACH,QAAO,EAAE;AAIX,KAAI,CAAC,MAAM,QAAQ,WAAW,CAC5B,QAAO;CAIT,IAAI,SAAkB,EAAE;AACxB,MAAK,MAAM,QAAQ,WACjB,KAAI,qBAAqB,KAAK,EAAE;EAE9B,MAAM,eAAe,uBAAuB,MAAM,OAAO,QAAQ;AACjE,WAAS;GAAE,GAAG;GAAQ,GAAG;GAAc;YAC9B,OAAO,SAAS,YAAY,SAAS,KAE9C,UAAS;EAAE,GAAG;EAAQ,GAAG;EAAM;AAKnC,QAAO;;;;;AA+BT,SAAS,0BAA0B,cAA8B;AAC/D,QAAO,GAAG,aAAa;;EAEvB,YAAY;;EAEZ,mBAAmB;;EAEnB;;;;;AAMF,SAAgB,mBACd,OACA,SACA;CACA,MAAM,EACJ,cACA,eAAe,EAAE,EACjB,YAAY,EAAE,EACd,6BAA6B,MAC7B,SACA,kBAAkB,MAClB,SACA,aACA,yBACA,0BACE;CAGJ,MAAM,mBAQF,EAAE;CACN,MAAM,uBAAiC,EAAE;AAGzC,KAAI,4BAA4B;AAC9B,mBAAiB,qBAAqB;GACpC,cAAc,0BAA0B,wBAAwB;GAChE,YAAY;GACZ,OAAO;GACR;AACD,uBAAqB,KACnB,sBAAsB,sCACvB;;AAIH,MAAK,MAAM,YAAY,WAAW;AAChC,mBAAiB,SAAS,QAAQ;GAChC,cAAc,0BAA0B,SAAS,aAAa;GAC9D,YAAY,SAAS,SAAS;GAC9B,OAAO,SAAS,SAAS;GACzB,QAAQ,SAAS;GAClB;AACD,uBAAqB,KAAK,KAAK,SAAS,KAAK,IAAI,SAAS,cAAc;;AAM1E,QAAO,KAAK;EACV,aAHA,mBAAmB,uBAAuB,qBAAqB;EAI/D,aAAa,EAAE,OAAO;GACpB,aAAa,EACV,QAAQ,CACR,SAAS,8CAA8C;GAC1D,eAAe,EACZ,QAAQ,CACR,SACC,wCAAwC,OAAO,KAAK,iBAAiB,CAAC,KAAK,KAAK,GACjF;GACJ,CAAC;EACF,SAAS,OAAO,EAAE,aAAa,oBAAoB;AAEjD,OAAI,EAAE,iBAAiB,kBAIrB,QAAO,gCAAgC,cAAc,+BAHhC,OAAO,KAAK,iBAAiB,CAC/C,KAAK,MAAM,KAAK,EAAE,IAAI,CACtB,KAAK,KAAK;GAIf,MAAM,iBAAiB,iBAAiB;GAGxC,MAAM,eAAe,UAAU,MAAM,OAAO,GAAG,SAAS,cAAc;GACtE,MAAM,sBAAsB,cAAc,eAAe;GAGzD,MAAM,0BAA0B;IAC9B,GAAG;IACH,GAAG,cAAc;IAClB;GAED,MAAM,wBAAwB;IAC5B,GAAG;IACH,GAAG,cAAc;IAClB;AAGD,OAAI,QACF,SAAQ,yBAAyB,eAAe,YAAY,CAAC;GAI/D,MAAM,gBAAgC;IACpC,OAAO,EAAE;IACT,OAAO,MAAM;IACd;GAGD,MAAM,cAAc,qBAClB,eAAe,YACf,eACA;IAAE;IAAS;IAAS,CACrB;GAQD,IAAI,WAAoB;IACtB,aANgB,gBAAgB,eAAe,QAAQ;IAOvD,GANsB,sBAAsB,eAAe,SAAS,QAAQ;IAO5E,GAAG;IACJ;AAGD,cAAW,qBAAqB,UAAU,oBAAoB;AAE9D,OAAI;IAEF,MAAM,mBAAwB;KAC5B,OAAO,eAAe;KACtB,cAAc,eAAe;KAC7B,OAAO;KACP,UAAU,YAAY,2BAA2B;KAEjD,GAAI,eAAe,SAAS,EAAE,QAAQ,OAAO,OAAO,eAAe,OAAO,EAAE,GAAG,EAAE;KAClF;AAGD,QAAI,OAAO,KAAK,wBAAwB,CAAC,SAAS,EAChD,QAAO,OAAO,kBAAkB,wBAAwB;AAI1D,QAAI,uBAAuB;KACzB,MAAM,EAAE,YAAY,aAAa,GAAG,wBAAwB;AAC5D,YAAO,OAAO,kBAAkB,oBAAoB;;IAItD,IAAI,oBAAoB;AAGxB,qBAAiB,eAAe,OAAO,EAAE,WAAW,kBAA4D;AAE9G,SAAI,WAAW,aAAa,UAAU,SAAS,GAAG;MAEhD,MAAM,uBAAuB,UAAU,KAAK,IAAS,WAAmB;OACtE,UAAU,GAAG;OACb,MAAM,GAAG;OACT,QAAQ,YAAY;OACrB,EAAE;AAEH,cAAQ,wBAAwB,qBAAqB,qBAAqB,CAAC;;;IAM/E,MAAM,SAAS,MAFO,IAAI,cAAc,iBAAiB,CAEtB,SAAS,EAC1C,QAAQ,aACT,CAAC;AAGF,UAAM,QAAQ;KAAE,GAAG,MAAM;KAAO,GAAG,cAAc;KAAO;IAExD,MAAM,aAAa,OAAO,QAAQ;IAGlC,IAAI,kBAAkB;AAGtB,QAAI,eAAe,UAAU,YAAY,UAAU,OAAO,OACxD,mBAAkB,GAAG,WAAW,2BAA2B,KAAK,UAAU,OAAO,QAAQ,MAAM,EAAE;AAInG,QAAI,QACF,SAAQ,0BAA0B,eAAe,gBAAgB,CAAC;AAGpE,WAAO;YACA,OAAgB;IAEvB,MAAM,eAAe,6BADT,MAC0C;AAGtD,QAAI,QACF,SAAQ,0BAA0B,eAAe,aAAa,CAAC;AAGjE,WAAO;;;EAGZ,CAAC;;;;;;;;ACzaJ,SAAS,aAAa,SAAgC;AACpD,KAAI,QAAQ,SAAS,YAAa,QAAO;CAGzC,MAAM,UAAU,QAAQ;AACxB,KAAI,MAAM,QAAQ,QAAQ,CACxB,QAAO,QAAQ,MACZ,SAAS,OAAO,SAAS,YAAY,SAAS,QAAQ,UAAU,QAAQ,KAAK,SAAS,YACxF;AAGH,QAAO;;;;;AAMT,SAAS,eAAe,SAAiC;AACvD,KAAI,QAAQ,SAAS,YAAa,QAAO,EAAE;CAE3C,MAAM,UAAU,QAAQ;AACxB,KAAI,CAAC,MAAM,QAAQ,QAAQ,CAAE,QAAO,EAAE;CAEtC,MAAM,MAAgB,EAAE;AACxB,MAAK,MAAM,QAAQ,QACjB,KACE,OAAO,SAAS,YAChB,SAAS,QACT,UAAU,QACV,KAAK,SAAS,eACd,gBAAgB,KAEhB,KAAI,KAAK,KAAK,WAAqB;AAIvC,QAAO;;;;;AAMT,SAAS,gBAAgB,SAAuB,YAA6B;AAC3E,KAAI,QAAQ,SAAS,OAAQ,QAAO;AAGpC,KAAI,gBAAgB,WAAW,QAAQ,eAAe,WACpD,QAAO;CAIT,MAAM,UAAU,QAAQ;AACxB,KAAI,MAAM,QAAQ,QAAQ,CACxB,QAAO,QAAQ,MACZ,SACC,OAAO,SAAS,YAChB,SAAS,QACT,UAAU,QACV,KAAK,SAAS,iBACd,gBAAgB,QAChB,KAAK,eAAe,WACvB;AAGH,QAAO;;;;;AAMT,SAAS,0BACP,YACA,UACc;AAed,QAd8B;EAC5B,MAAM;EACN,SAAS,CACP;GACE,MAAM;GACN;GACA;GACA,QAAQ;IACN,MAAM;IACN,OAAO,aAAa,SAAS,WAAW,WAAW;IACpD;GACF,CACF;EACF;;;;;AAOH,SAAS,YAAY,SAAuB,YAA4B;AACtE,KAAI,QAAQ,SAAS,YAAa,QAAO;CAEzC,MAAM,UAAU,QAAQ;AACxB,KAAI,CAAC,MAAM,QAAQ,QAAQ,CAAE,QAAO;AAEpC,MAAK,MAAM,QAAQ,QACjB,KACE,OAAO,SAAS,YAChB,SAAS,QACT,UAAU,QACV,KAAK,SAAS,eACd,gBAAgB,QAChB,KAAK,eAAe,cACpB,cAAc,KAEd,QAAO,KAAK;AAIhB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;AAyBT,SAAgB,eAAe,UAA0C;AACvE,KAAI,CAAC,YAAY,SAAS,WAAW,EACnC,QAAO;CAGT,MAAM,SAAyB,EAAE;AAEjC,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;EACxC,MAAM,UAAU,SAAS;AACzB,MAAI,CAAC,QAAS;AAEd,SAAO,KAAK,QAAQ;AAGpB,MAAI,aAAa,QAAQ,EAAE;GACzB,MAAM,cAAc,eAAe,QAAQ;AAE3C,QAAK,MAAM,cAAc,aAAa;IAEpC,IAAI,YAAY;AAChB,SAAK,IAAI,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;KAC5C,MAAM,gBAAgB,SAAS;AAC/B,SAAI,iBAAiB,gBAAgB,eAAe,WAAW,EAAE;AAC/D,kBAAY;AACZ;;;AAKJ,QAAI,CAAC,WAAW;KACd,MAAM,WAAW,YAAY,SAAS,WAAW;AACjD,YAAO,KAAK,0BAA0B,YAAY,SAAS,CAAC;;;;;AAMpE,QAAO;;;;;;;;AAST,SAAgB,qBAAqB,UAAmC;AACtE,KAAI,CAAC,YAAY,SAAS,WAAW,EACnC,QAAO;AAGT,MAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;EACxC,MAAM,UAAU,SAAS;AACzB,MAAI,CAAC,QAAS;AAEd,MAAI,aAAa,QAAQ,EAAE;GACzB,MAAM,cAAc,eAAe,QAAQ;AAE3C,QAAK,MAAM,cAAc,aAAa;IACpC,IAAI,YAAY;AAChB,SAAK,IAAI,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;KAC5C,MAAM,gBAAgB,SAAS;AAC/B,SAAI,iBAAiB,gBAAgB,eAAe,WAAW,EAAE;AAC/D,kBAAY;AACZ;;;AAIJ,QAAI,CAAC,UACH,QAAO;;;;AAMf,QAAO;;;;;;;;;;;eC7NmC;aAIf;;;;;AAM7B,MAAa,kCAAkCC;;;;AAK/C,MAAa,wBAAwBC;;;;AAmCrC,SAAgB,uBAAuB,UAAkC;CACvE,IAAI,QAAQ;AAEZ,MAAK,MAAM,WAAW,SACpB,KAAI,OAAO,QAAQ,YAAY,SAC7B,UAAS,eAAe,QAAQ,QAAQ;UAC/B,MAAM,QAAQ,QAAQ,QAAQ,EACvC;OAAK,MAAM,QAAQ,QAAQ,QACzB,KAAI,OAAO,SAAS,YAAY,SAAS,QAAQ,UAAU,KACzD,UAAS,eAAe,OAAO,KAAK,KAAK,CAAC;;AAMlD,QAAO;;;;;AAMT,SAAS,eAAe,SAA+B;AACrD,KAAI,OAAO,QAAQ,YAAY,SAC7B,QAAO,QAAQ;AAGjB,KAAI,MAAM,QAAQ,QAAQ,QAAQ,CAChC,QAAO,QAAQ,QACZ,KAAK,SAAS;AACb,MAAI,OAAO,SAAS,YAAY,SAAS,QAAQ,UAAU,KACzD,QAAO,OAAO,KAAK,KAAK;AAE1B,MAAI,OAAO,SAAS,YAAY,SAAS,QAAQ,UAAU,MAAM;AAC/D,OAAI,KAAK,SAAS,YAChB,QAAO,eAAgB,KAA+B,YAAY,UAAU;AAE9E,OAAI,KAAK,SAAS,cAChB,QAAO;;AAGX,SAAO;GACP,CACD,OAAO,QAAQ,CACf,KAAK,KAAK;AAGf,QAAO;;;;;AAMT,SAAS,yBAAyB,UAAkC;AAClE,QAAO,SACJ,KAAK,QAAQ;AAGZ,SAAO,GAFM,IAAI,SAAS,SAAS,SAAS,IAAI,SAAS,cAAc,cAAc,SAEtE,IADF,eAAe,IAAI;GAEhC,CACD,KAAK,OAAO;;;;;AAMjB,eAAe,gBACb,UACA,OACA,mBACA,iBACiB;CAGjB,MAAM,sBAA2B;EAC/B;EACA,QAAQ;;;;;;;;EAQR,QAAQ,mDAZe,yBAAyB,SAAS;EAa1D;AAGD,KAAI,kBACF,QAAO,OAAO,qBAAqB,kBAAkB;AAEvD,KAAI,gBACF,QAAO,OAAO,qBAAqB,gBAAgB;AAIrD,SADe,MAAM,aAAa,oBAAoB,EACxC;;;;;;;;;;;;;;;;;;;;;;;;;;;AA4BhB,eAAsB,kBACpB,UACA,SAC8B;CAC9B,MAAM,EACJ,OACA,iBAAiB,iCACjB,eAAe,0BACb;CAGJ,MAAM,eAAe,uBAAuB,SAAS;AAGrD,KAAI,eAAe,eACjB,QAAO;EACL,YAAY;EACZ;EACA;EACD;AAIH,KAAI,SAAS,UAAU,aACrB,QAAO;EACL,YAAY;EACZ;EACA;EACD;CAIH,MAAM,sBAAsB,SAAS,MAAM,GAAG,CAAC,aAAa;CAC5D,MAAM,iBAAiB,SAAS,MAAM,CAAC,aAAa;CAiBpD,MAAM,cAAc,CANiB;EACnC,MAAM;EACN,SAAS,oCAVK,MAAM,gBACpB,qBACA,OACA,QAAQ,mBACR,QAAQ,gBACT,CAKsD;EACtD,EAGoC,GAAG,eAAe;AAGvD,QAAO;EACL,YAAY;EACZ,UAAU;EACV;EACA,aANkB,uBAAuB,YAAY;EAOtD;;;;;AAMH,SAAgB,mBACd,UACA,iBAAyB,iCAChB;AAET,QADe,uBAAuB,SAAS,IAC9B;;;;;;;;aCxOoC;aAI/B;;;;AAuCxB,SAAS,kBACP,cACA,cACA,YACA,QACQ;CACR,MAAM,QAAQ;EACZ,gBAAgB;EAChB;EACA;EACA;EACD;AAED,KAAI,WACF,OAAM,KAAK,sBAAsB;AAGnC,KAAI,aACF,OAAM,KAAK,mBAAmB;AAIhC,KAAI,UAAU,OAAO,SAAS,EAC5B,OAAM,KAAK,kBAAkB,OAAO,CAAC;AAGvC,QAAO,MAAM,OAAO,QAAQ,CAAC,KAAK,OAAO;;;;;;AAO3C,IAAa,YAAb,MAAuB;CACrB,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CAMR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ;CACR,AAAQ,iBAA6E,EAAE;CACvF,AAAQ;CAGR,AAAQ;CACR,AAAQ;CACR,AAAQ;CAER,YAAY,QAA+B;EACzC,MAAM,EACJ,OACA,YACA,QAAQ,EAAE,EACV,cACA,YAAY,EAAE,EACd,SACA,WAAW,mBACX,6BAA6B,MAC7B,yBACA,sBAAsB,OACtB,eACA,aACA,cACA,WACA,SACA,QACA,aACA,mBACA,oBACE;AAGJ,MAAI,WAKF,MAAK,QAAQ,kBAAkB;GACtB;GACP,YANkB,MAAM,QAAQ,WAAW,GACzC,aACA,CAAC,WAAW;GAKf,CAAC;MAEF,MAAK,QAAQ;AAEf,OAAK,WAAW;AAChB,OAAK,UACH,aAAa,UAA0B,IAAI,aAAa,MAAM;AAChE,OAAK,0BAA0B;AAC/B,OAAK,sBAAsB;AAC3B,OAAK,sBAAsB;AAC3B,OAAK,cAAc;AACnB,OAAK,eAAe;AACpB,OAAK,eAAe;AAGpB,OAAK,cAAc;AACnB,OAAK,oBAAoB;AACzB,OAAK,kBAAkB;AAGvB,MAAI,SAAS;AAEX,OAAI,UACF,SAAQ,KACN,8GAED;AAGH,QAAK,WAAW,EAAE,SAAS,CAAC,CAAC,OAAM,UAAS;AAC1C,YAAQ,KAAK,sCAAsC,MAAM;KACzD;aACO,UAET,MAAK,WAAW,EAAE,WAAW,CAAC,CAAC,OAAM,UAAS;AAC5C,WAAQ,KAAK,sCAAsC,MAAM;IACzD;AAKJ,OAAK,oBAAoB,OAAO,YAAY,cAAc,YAAY,UAAa,iBAAiB,QAAQ;AAM5G,OAAK,eAAe,kBAAkB,cAFpC,8BAA+B,aAAa,UAAU,SAAS,GAEC,KAAK,mBAAmB,KAAK,eAAe;AAG9G,OAAK,YAAY;AAGjB,OAAK,kBAAkB;GACrB,cAAc;GACd,cAAc;GACd;GACA;GACD;;;;;;CAOH,AAAQ,gBAAgB,OAAuB,SAAkC;AAQ/E,SAAO;GACL,aARgB,gBAAgB,OAAO,QAAQ;GAS/C,GARsB,sBAAsB,OAAO;IACnD,SAAS,KAAK;IACd;IACA,yBAAyB,KAAK;IAC/B,CAAC;GAKA,GAAG,KAAK;GACT;;;;;;;CAQH,AAAQ,iBAAiB,OAAuB,SAAkC;AAEhF,MAAI,CAAC,QAAQ,IAAI,eACf,QAAO,EAAE;AAGX,MAAI;AASF,kDALgC,eAAe,OAAO;IACpD,SAAS,KAAK;IACd;IACA,yBAAyB,KAAK;IAC/B,CAAC;WAEK,OAAO;AAEd,WAAQ,KAAK,gDAAgD,MAAM;AACnE,UAAO,EAAE;;;;;;;CAQb,AAAQ,qBAAqB,SAAkC;AAC7D,MAAI,CAAC,KAAK,kBACR,QAAO,EAAE;EAGX,MAAM,iBAAiB,KAAK;AAC5B,SAAO,EACL,SAAS,kBAAkB;GACzB,SAAS;GACT;GACD,CAAC,EACH;;;;;;CAOH,AAAQ,sBAAsB,OAAuB,SAAkC;AACrF,MACE,CAAC,KAAK,gBAAgB,+BACrB,CAAC,KAAK,gBAAgB,aAAa,KAAK,gBAAgB,UAAU,WAAW,GAE9E,QAAO,EAAE;AAeX,SAAO,EAAE,MAZY,mBAAmB,OAAO;GAC7C,cAAc,KAAK,gBAAgB;GACnC,cAAc,KAAK;GACnB,WAAW,KAAK,gBAAgB;GAChC,4BAA4B,KAAK,gBAAgB;GACjD,SAAS,KAAK;GACd;GACA,aAAa,KAAK;GAClB,yBAAyB,KAAK;GAC9B,uBAAuB,KAAK;GAC7B,CAAC,EAE2B;;;;;;CAO/B,AAAQ,YAAY,OAAuB,SAAkC;EAE3E,IAAI,WAAW,KAAK,gBAAgB,OAAO,QAAQ;EAGnD,MAAM,WAAW,KAAK,iBAAiB,OAAO,QAAQ;AACtD,MAAI,OAAO,KAAK,SAAS,CAAC,SAAS,EACjC,YAAW;GAAE,GAAG;GAAU,GAAG;GAAU;EAIzC,MAAM,eAAe,KAAK,qBAAqB,QAAQ;AACvD,MAAI,OAAO,KAAK,aAAa,CAAC,SAAS,EACrC,YAAW;GAAE,GAAG;GAAU,GAAG;GAAc;EAI7C,MAAM,gBAAgB,KAAK,sBAAsB,OAAO,QAAQ;AAChE,MAAI,OAAO,KAAK,cAAc,CAAC,SAAS,EACtC,YAAW;GAAE,GAAG;GAAU,GAAG;GAAe;AAI9C,aAAW,qBAAqB,UAAU,KAAK,YAAY;AAE3D,SAAO;;;;;;CAOT,AAAQ,oBAAoB,UAAyC;EACnE,MAAM,aAAmC,EAAE;AAG3C,aAAW,KAAK,YAAY,YAAY,KAAK,SAAS,CAAC;AAGvD,MAAI,KAAK,aAAa,SACpB,KAAI,MAAM,QAAQ,KAAK,YAAY,SAAS,CAC1C,YAAW,KAAK,GAAG,KAAK,YAAY,SAAS;MAE7C,YAAW,KAAK,KAAK,YAAY,SAAS;AAI9C,SAAO;;;;;CAMT,AAAQ,mBAAmB,SAAyB;EAClD,MAAM,WAAgB;GACpB,OAAO,KAAK;GACZ,cAAc,KAAK;GACnB,OAAO;GACR;AAGD,MAAI,KAAK,kBACP,QAAO,OAAO,UAAU,KAAK,kBAAkB;AAIjD,MAAI,KAAK,gBACP,QAAO,OAAO,UAAU,KAAK,gBAAgB;AAI/C,MAAI,KAAK,aAAa;AACpB,OAAI,KAAK,YAAY,YACnB,UAAS,cAAc,KAAK,mBAAmB,KAAK,YAAY,YAAY;AAE9E,OAAI,KAAK,YAAY,aACnB,UAAS,eAAe,KAAK,oBAAoB,KAAK,YAAY,aAAa;AAEjF,OAAI,KAAK,YAAY,SACnB,UAAS,WAAW,KAAK,gBAAgB,KAAK,YAAY,SAAS;;AAKvE,MAAI,KAAK,aACP,UAAS,SAAS,OAAO,OAAO,KAAK,aAAa;AAGpD,SAAO;;;;;;;;CAST,AAAQ,YAAY,OAAuB,UAAmB,SAAyB;EACrF,MAAM,QAAQ,KAAK,YAAY,OAAO,QAAQ;EAC9C,MAAM,WAAW,KAAK,mBAAmB,QAAQ;EACjD,MAAM,iBAAiB,KAAK,oBAAoB,SAAS;AAEzD,SAAO,IAAI,cAAc;GACvB,GAAG;GACH;GACA,UAAU;GACX,CAAC;;;;;;CAOJ,MAAc,WAAW,SAAmD;EAC1E,MAAM,EAAE,eAAe,MAAM,OAAO;AAQpC,OAAK,kBANU,MAAM,WACnB,QAAQ,UACJ,EAAE,SAAS,QAAQ,SAAS,GAC5B,EAAE,kBAAkB,QAAQ,WAAW,CAC5C,EAE4B,KAAI,OAAM;GACrC,MAAM,EAAE;GACR,aAAa,EAAE;GACf,MAAM,EAAE;GACT,EAAE;;;;;CAML,MAAM,SAAS,SAAgD;EAE7D,MAAM,QAAwB;GAC5B,OAAO,EAAE;GACT,OAAO,EAAE;GACV;EAGD,MAAM,SAAS,MADD,KAAK,YAAY,OAAO,QAAQ,SAAS,CAC5B,SAAS,EAAE,QAAQ,QAAQ,QAAQ,CAAC;AAI/D,SAAO,eAAe,QAAQ,SAAS;GACrC,OAAO;GACP,YAAY;GACZ,UAAU;GACX,CAAC;AAEF,SAAO;;;;;CAMT,MAAM,OAAO,SAAgD;EAE3D,MAAM,QAAwB;GAC5B,OAAO,EAAE;GACT,OAAO,EAAE;GACV;EAGD,MAAM,SAAS,MADD,KAAK,YAAY,OAAO,QAAQ,SAAS,CAC5B,OAAO,EAAE,QAAQ,QAAQ,QAAQ,CAAC;AAI7D,SAAO,eAAe,QAAQ,SAAS;GACrC,OAAO;GACP,YAAY;GACZ,UAAU;GACX,CAAC;AAEF,SAAO;;;;;CAMT,MAAM,kBAAkB,SAIrB;EAED,MAAM,SAAS,MADD,KAAK,YAAY,QAAQ,OAAO,QAAQ,SAAS,CACpC,SAAS,EAAE,QAAQ,QAAQ,QAAQ,CAAC;AAI/D,SAAO,eAAe,QAAQ,SAAS;GACrC,OAAO,QAAQ;GACf,YAAY;GACZ,UAAU;GACX,CAAC;AAEF,SAAO;;;;;;CAOT,SAAS,OAAwB;EAC/B,MAAM,aAAa,SAAS;GAAE,OAAO,EAAE;GAAE,OAAO,EAAE;GAAE;AACpD,SAAO,KAAK,YAAY,WAAW;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAyCrC,AAAQ,oBAAoB,kBAA0D;AACpF,SAAO,OAAO,WAAgB;AAE5B,OAAI,iBACF,KAAI;AACF,UAAM,iBAAiB,OAAO;YACvB,OAAO;AAEd,YAAQ,MAAM,kDAAkD,MAAM;;;;;;;CAY9E,AAAQ,gBAAgB,cAAkD;AACxE,SAAO,OAAO,WAAgB;AAE5B,OAAI,aACF,KAAI;AACF,UAAM,aAAa,OAAO;YACnB,OAAO;AACd,YAAQ,MAAM,8CAA8C,MAAM;;;;;;;;CAY1E,AAAQ,mBAAmB,iBAA4C;AACrE,SAAO,OAAO,WAAgB;AAE5B,OAAI,gBACF,KAAI;AAGF,WAAO,EACL,GAHa,MAAM,gBAAgB,OAAO,EAK3C;YACM,OAAO;AACd,YAAQ,MAAM,iDAAiD,MAAM;AACrE,WAAO;;AAIX,UAAO;;;;;;;;CASX,AAAQ,uBACN,eACA,OACA,SACA,OACA,UACA,kBACA,YACA,eACkC;EAClC,MAAM,EAAE,aAAa;EAErB,MAAM,gBAAkD;GACtD,OAAO,KAAK;GACZ,UAAU;GACV;GACA,UAAU,KAAK,oBAAoB,QAAQ,SAAS;GACpD,aAAa,QAAQ;GACrB,cAAc,OAAO,EAAE,WAAW,kBAAkB;AAElD,QAAI,KAAK,aAAa,aAEpB,OAD6B,KAAK,oBAAoB,KAAK,YAAY,aAAa,CACzD;KAAE;KAAW;KAAa,CAAC;AAIxD,kBAAc;IACd,MAAM,iBAAiB,WAAW,cAAc;IAGhD,MAAM,YAA4B;KAChC,MAAM;KACN,YAAY,cAAc;KAC1B,WAAW,UAAU,KAAK,IAAI,OAAO;MACnC,UAAU,GAAG;MACb,MAAM,WAAW,KAAK,GAAG,QAAQ;MACjC,QAAQ,YAAY,KAAM,YAAY,YAAY,KAAK,YAAY,GAAG,SAAS,SAAa;MAC7F,EAAE;KACJ;AACD,eAAW,KAAK,UAAU;AAG1B,QAAI,YAAY,KAAK,cAAc;KAGjC,MAAM,aAAyB;MAC7B;MACA,MAAM;MACN,UAAU;MACV,OAAO,EAAE,GAAG,OAAO;MACnB,WAAW;MACX,4BAAW,IAAI,MAAM,EAAC,aAAa;MACnC,4BAAW,IAAI,MAAM,EAAC,aAAa;MACpC;AACD,WAAM,KAAK,aAAa,KAAK,WAAW;AAExC,gBAAW,KAAK,2BAA2B,UAAU,eAAe,CAAC;;;GAG1E;AAGD,MAAI,KAAK,kBACP,QAAO,OAAO,eAAe,KAAK,kBAAkB;AAItD,MAAI,KAAK,gBACP,QAAO,OAAO,eAAe,KAAK,gBAAgB;AAIpD,MAAI,KAAK,aACP,eAAc,SAAS,OAAO,OAAO,KAAK,aAAa;AAIzD,MAAI,KAAK,aAAa;AACpB,OAAI,KAAK,YAAY,YACnB,eAAc,cAAc,KAAK,mBAAmB,KAAK,YAAY,YAAY;AAEnF,OAAI,KAAK,YAAY,SACnB,eAAc,WAAW,KAAK,gBAAgB,KAAK,YAAY,SAAS;;AAK5E,MAAI,KAAK,oBAEP,eAAc,WAAW,CACvB;GACE,MAAM;GACN,SAAS,KAAK;GACd,iBAAiB,EACf,WAAW,EAAE,cAAc,EAAE,MAAM,aAAa,EAAE,EACnD;GACF,EACD,GAAG,cACJ;MAGD,eAAc,SAAS,KAAK;AAG9B,SAAO;;;;;;;;CAST,MAAc,kBACZ,SACA,gBAMC;EACD,MAAM,EAAE,WAAW;AAGnB,MAAI,CAAC,QAAQ,UAAU,CAAC,QAAQ,YAAY,CAAC,UAAU,CAAC,QAAQ,SAC9D,QAAO;GACL,UAAU,EAAE;GACZ;GACA,OAAO;IACL,MAAM;IACN,uBAAO,IAAI,MAAM,mEAAmE;IACrF;GACF;EAIH,IAAI,eAA+B,EAAE;EACrC,IAAI,6BAA6B;AAEjC,MAAI,QAAQ,YAAY,QAAQ,SAAS,SAAS,GAAG;AAEnD,kBAAe,QAAQ;AACvB,gCAA6B;AAG7B,OAAI,QAAQ,UAAU,QAAQ,IAAI,aAAa,aAC7C,SAAQ,KAAK,oFAAoF;aAE1F,QAAQ,UAAU;AAE3B,gCAA6B;AAC7B,oBAAiB,EAAE;AAInB,OAAI,QAAQ,UAAU,QAAQ,IAAI,aAAa,aAC7C,SAAQ,KAAK,kFAAkF;aAGxF,QAAQ,QAAQ;AAEzB,kBAAe,CAAC;IAAE,MAAM;IAAQ,SAAS,QAAQ;IAAQ,CAAiB;AAE1E,OAAI,QAAQ,IAAI,aAAa,aAC3B,SAAQ,KAAK,uDAAuD;;AAMxE,MAAI,8BAA8B,eAAe,SAAS,GAAG;AAE3D,oBAAiB,eAAe,eAAe;AAG/C,OAAI,KAAK,qBAAqB,WAAW,eAAe,SAAS,EAQ/D,mBAP4B,MAAM,kBAAkB,gBAAgB;IAClE,OAAO,KAAK,oBAAoB,SAAS,KAAK;IAC9C,gBAAgB,KAAK,oBAAoB;IACzC,cAAc,KAAK,oBAAoB;IACvC,mBAAmB,KAAK;IACxB,iBAAiB,KAAK;IACvB,CAAC,EACmC;aAE9B,CAAC,2BAEV,kBAAiB,EAAE;EAIrB,MAAM,mBAAmB,QAAQ,YAAY,QAAQ,SAAS,WAAW;EACzE,MAAM,gBAAgB,aAAa,SAAS,KAAK,eAAe,SAAS;AAGzE,MAAI,oBAAoB,CAAC,iBAAiB,CAAC,OAEzC,QAAO;GACL,UAAU,EAAE;GACZ;GACA,mBAAmB;GACpB;AAIH,MAAI,CAAC,iBAAiB,CAAC,OACrB,QAAO;GACL,UAAU,EAAE;GACZ;GACA,OAAO;IACL,MAAM;IACN,uBAAO,IAAI,MAAM,kGAAkG;IACpH;GACF;AAQH,SAAO;GAAE,UAL6B,CACpC,GAAG,gBACH,GAAG,aACJ;GAEiC;GAAgB;;;;;;;;CASpD,MAAc,sBACZ,SAOC;EACD,MAAM,EAAE,UAAU,WAAW;EAC7B,IAAI,QAAwB,QAAQ,SAAS;GAAE,OAAO,EAAE;GAAE,OAAO,EAAE;GAAE;EACrE,IAAI,iBAAiC,EAAE;EACvC,IAAI,cAAc;EAClB,IAAI;EACJ,IAAI;AAEJ,MAAI,YAAY,KAAK,cAAc;GACjC,MAAM,aAAa,MAAM,KAAK,aAAa,KAAK,SAAS;AACzD,OAAI,YAAY;AACd,YAAQ,WAAW;AACnB,qBAAiB,WAAW;AAC5B,kBAAc,WAAW;AACzB,uBAAmB,WAAW;AAE9B,sBAAkB,4BAChB,UACA,WAAW,MACX,WAAW,SAAS,OACrB;;;AAKL,MAAI,UAAU,iBAEZ,KADiB,OAAO,UAAU,IACpB,SAAS,UACrB,oBAAmB;MAEnB,oBAAmB;AAIvB,SAAO;GAAE;GAAO;GAAgB;GAAa;GAAkB;GAAiB;;CAGlF,OAAO,iBACL,SAC+C;EAC/C,MAAM,EAAE,UAAU,WAAW;EAG7B,MAAM,UAAU,MAAM,KAAK,sBAAsB,QAAQ;EACzD,MAAM,EAAE,OAAO,aAAa,kBAAkB,oBAAoB;EAClE,IAAI,iBAAiB,QAAQ;AAG7B,MAAI,gBACF,OAAM;EAIR,MAAM,gBAAgB,MAAM,KAAK,kBAAkB,SAAS,eAAe;AAG3E,MAAI,cAAc,OAAO;AACvB,SAAM,cAAc;AACpB;;AAIF,MAAI,cAAc,mBAAmB;AACnC,SAAM;IACJ,MAAM;IACN,MAAM;IACN,UAAU,EAAE;IACZ;IACD;AACD;;EAIF,MAAM,gBAAgB,cAAc;AACpC,mBAAiB,cAAc;EAG/B,MAAM,aAA+B,EAAE;EACvC,MAAM,gBAAgB,EAAE,OAAO,GAAG;EAClC,MAAM,WAAW;EAGjB,MAAM,WAA0B,UAAU;AACxC,cAAW,KAAK,MAAM;;EAIxB,IAAI,QAAQ,KAAK,YAAY,OAAO,QAAQ;EAI5C,MAAM,iBAAiB,CAAC,CAAC,KAAK;EAC9B,MAAM,sBAAsB,CAAC,CAAC,QAAQ;AAEtC,MAAI,kBAAkB,oBACpB,SAAQ,sBAAsB,OAAO,KAAK,aAAa,QAAQ,kBAAkB;AAGnF,MAAI;GAcF,MAAM,SAAS,WAZO,KAAK,uBACzB,eACA,OACA,SACA,OACA,UACA,kBACA,YACA,cACD,CAGuC;AAGxC,SAAM;IAAE,MAAM;IAAc,YAAY;IAAG;AAG3C,cAAW,MAAM,SAAS,OAAO,YAAY;AAE3C,WAAO,WAAW,SAAS,GAAG;KAC5B,MAAM,QAAQ,WAAW,OAAO;AAChC,WAAM;AAGN,SAAI,MAAM,SAAS,cACjB,OAAM;MAAE,MAAM;MAAc,YAAY,MAAM,aAAa;MAAG;;AAKlE,QAAI,MAAM,SAAS,aACjB,OAAM;KAAE,MAAM;KAAQ,MAAM,MAAM;KAAM;aAC/B,MAAM,SAAS,YAGxB,OAAM;KACJ,MAAM;KACN,UAAU,MAAM;KAChB,YAAY,MAAM;KAClB,MAAM,MAAM;KACb;aACQ,MAAM,SAAS,cAGxB,OAAM;KACJ,MAAM;KACN,UAAU,MAAM;KAChB,YAAY,MAAM;KAClB,QAAQ,MAAM;KACd,SAAS;KACV;aACQ,MAAM,SAAS,aAExB,OAAM;KACJ,MAAM;KACN,UAAU,MAAM;KAChB,YAAY,MAAM;KAClB,QAAQ,MAAM;KACd,SAAS;KACV;;AAKL,UAAO,WAAW,SAAS,EACzB,OAAM,WAAW,OAAO;GAI1B,MAAM,YAAY,MAAM,OAAO;GAI/B,MAAM,kBAAkC,CACtC,GAAG,eACH,GAAI,YAAY,CAAC;IAAE,MAAM;IAAa,SAAS;IAAW,CAAiB,GAAG,EAAE,CACjF;GAGD,MAAM,SAAS,YAAY,SAAU,OAA+B,SAAS;AAG7E,SAAM;IACJ,MAAM;IACN;IACA,MAAM;IACN,UAAU;IACV,GAAI,WAAW,SAAY,EAAE,QAAQ,GAAG,EAAE;IAC3C;AAGD,OAAI,YAAY,KAAK,cAAc;IACjC,MAAM,kBAA8B;KAClC;KACA,MAAM,WAAW,cAAc;KAC/B,UAAU;KACV;KACA,4BAAW,IAAI,MAAM,EAAC,aAAa;KACnC,4BAAW,IAAI,MAAM,EAAC,aAAa;KACpC;AACD,UAAM,KAAK,aAAa,KAAK,gBAAgB;AAG7C,UAAM,2BAA2B,UAAU,WAAW,cAAc,MAAM;;WAErE,OAAO;AAEd,SAAM;IACJ,MAAM;IACN,OAAO,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,MAAM,CAAC;IACjE;;;;;;;CAQL,MAAM,mBACJ,SACA,SAC8E;EAC9E,IAAI,aAA6B,QAAQ,SAAS;GAAE,OAAO,EAAE;GAAE,OAAO,EAAE;GAAE;EAC1E,IAAI;EACJ,IAAI;AAEJ,aAAW,MAAM,SAAS,KAAK,iBAAiB,QAAQ,EAAE;AACxD,WAAQ,MAAM;AAEd,OAAI,MAAM,SAAS,QAAQ;AACzB,iBAAa,MAAM;AACnB,gBAAY,MAAM;AAClB,oBAAgB,MAAM;;;AAI1B,SAAO;GAAE,OAAO;GAAY,MAAM;GAAW,UAAU;GAAe;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyK1E,SAAgB,gBAAgB,QAA0C;AACxE,QAAO,IAAI,UAAU,OAAO;;;;;aCxtCD;aAC4B;;;;;;AAOzD,SAAS,yBACP,cACA,MACoB;CACpB,MAAM,aAAa,aAAa,aAAa;AAE7C,KAAI,WAAW,SAAS,eAAe,IACnC,WAAW,SAAS,YAAY,IAChC,WAAW,SAAS,cAAc,CACpC,QAAO;AAGT,KAAI,WAAW,SAAS,oBAAoB,IACxC,WAAW,SAAS,gBAAgB,IACpC,WAAW,SAAS,YAAY,CAClC,QAAO;AAGT,KAAI,WAAW,SAAS,iBAAiB,IACrC,WAAW,SAAS,sBAAsB,CAC5C,QAAO;AAIT,QAAO;;;;;AAMT,SAAS,SAAS,KAAqB;AACrC,QAAO,OAAO,KAAK,KAAK,QAAQ,CAAC,SAAS,SAAS;;;;;;AAOrD,SAAS,gBAAgB,QAAgB,MAAsC;CAE7E,IAAI,SAAS;AACb,MAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,KAAK,CAC7C,UAAS,OAAO,QAAQ,IAAI,OAAO,KAAK,IAAI,KAAK,IAAI,EAAE,MAAM;AAE/D,QAAO,YAAY,OAAO;;;;;;;;;;;;;;;;;;;;;;;;;AA0B5B,IAAsB,cAAtB,MAAoE;;;;;;;;;;CAsBlE,MAAM,YAAY,OAAmE;EACnF,MAAM,YAAkC,EAAE;AAE1C,OAAK,MAAM,CAAC,MAAM,YAAY,MAC5B,KAAI;GAEF,MAAM,gBAAgB,OAAO,KAAK,QAAQ,CAAC,SAAS,SAAS;GAE7D,MAAM,cAAc,KAAK,QAAQ,MAAM,QAAQ;GAC/C,MAAM,SAAS,MAAM,KAAK,QAAQ,SAAS,cAAc,mBAAmB,YAAY,GAAG;AAE3F,OAAI,OAAO,aAAa,EACtB,WAAU,KAAK;IACb;IACA,OAAO,yBAAyB,OAAO,QAAQ,KAAK;IACrD,CAAC;OAEF,WAAU,KAAK;IAAE;IAAM,OAAO;IAAM,CAAC;WAEhC,OAAO;AACd,aAAU,KAAK;IACb;IACA,OAAO;IACR,CAAC;;AAIN,SAAO;;;;;;;;;;;CAYT,MAAM,cAAc,OAAkD;EACpE,MAAM,YAAoC,EAAE;AAE5C,OAAK,MAAM,QAAQ,MACjB,KAAI;GAEF,MAAM,cAAc,KAAK,QAAQ,MAAM,QAAQ;GAC/C,MAAM,SAAS,MAAM,KAAK,QAAQ,WAAW,YAAY,GAAG;AAE5D,OAAI,OAAO,aAAa,EACtB,WAAU,KAAK;IACb;IACA,SAAS;IACT,OAAO,yBAAyB,OAAO,QAAQ,KAAK;IACrD,CAAC;QACG;IACL,MAAM,gBAAgB,OAAO,OAAO,MAAM;IAC1C,MAAM,UAAU,OAAO,KAAK,eAAe,SAAS;AACpD,cAAU,KAAK;KAAE;KAAM;KAAS,OAAO;KAAM,CAAC;;WAEzC,OAAO;AACd,aAAU,KAAK;IACb;IACA,SAAS;IACT,OAAO;IACR,CAAC;;AAIN,SAAO;;;;;CAMT,MAAM,OAAO,MAAmC;EAC9C,MAAM,UAAU,SAAS,KAAK;EAuB9B,MAAM,SAAS,MAAM,KAAK,QAAQ,gBAtBnB;;;;;;;;;;;;;;;;;;;;;GAsB2C,EAAE,MAAM,SAAS,CAAC,CAAC;EAE7E,MAAM,QAAoB,EAAE;AAC5B,OAAK,MAAM,QAAQ,OAAO,OAAO,MAAM,CAAC,MAAM,KAAK,EAAE;AACnD,OAAI,CAAC,KAAM;AACX,OAAI;IACF,MAAM,OAAO,KAAK,MAAM,KAAK;AAC7B,UAAM,KAAK;KACT,MAAM,KAAK;KACX,QAAQ,KAAK;KACb,MAAM,KAAK;KACX,aAAa,KAAK;KACnB,CAAC;WACI;;AAIV,SAAO;;;;;CAMT,MAAM,KACJ,UACA,SAAiB,GACjB,QAAgB,oBACC;EACjB,MAAM,UAAU,SAAS,SAAS;EAElC,MAAM,SAAS;;;;;;;;;;;;;iBADO,2BAcK;;;;;;;;;;;;;EAa3B,MAAM,SAAS,MAAM,KAAK,QACxB,gBAAgB,QAAQ;GACtB,MAAM;GACN,QAAQ,OAAO,OAAO;GACtB,OAAO,OAAO,MAAM;GACrB,CAAC,CACH;AAED,MAAI,OAAO,aAAa,GAAG;AACzB,OAAI,OAAO,OAAO,SAAS,wBAAwB,CACjD,QAAO,eAAe,SAAS;AAEjC,UAAO,OAAO,OAAO,MAAM;;AAG7B,SAAO,OAAO,OAAO,SAAS;;;;;CAMhC,MAAM,QAAQ,UAAqC;EACjD,MAAM,UAAU,SAAS,SAAS;EAmBlC,MAAM,SAAS,MAAM,KAAK,QAAQ,gBAlBnB;;;;;;;;;;;;;;;;;GAkB2C,EAAE,MAAM,SAAS,CAAC,CAAC;AAE7E,MAAI,OAAO,aAAa,EACtB,OAAM,IAAI,MAAM,SAAS,SAAS,aAAa;AAGjD,MAAI;GACF,MAAM,OAAO,KAAK,MAAM,OAAO,OAAO,MAAM,CAAC;AAC7C,UAAO;IACL,SAAS,KAAK;IACd,YAAY,KAAK;IACjB,aAAa,KAAK;IACnB;UACK;AACN,SAAM,IAAI,MAAM,kCAAkC,SAAS,GAAG;;;;;;CAOlE,MAAM,MAAM,UAAkB,SAAuC;EACnE,MAAM,UAAU,SAAS,SAAS;EAClC,MAAM,aAAa,SAAS,QAAQ;EAoBpC,MAAM,SAAS,MAAM,KAAK,QACxB,gBApBa;;;;;;;;;;;;;;;;;;GAoBW;GAAE,MAAM;GAAS,SAAS;GAAY,CAAC,CAChE;AAED,MAAI,OAAO,aAAa,GAAG;AACzB,OAAI,OAAO,OAAO,SAAS,iBAAiB,CAC1C,QAAO;IACL,SAAS;IACT,OAAO,mBAAmB,SAAS;IACpC;AAEH,UAAO;IAAE,SAAS;IAAO,OAAO,OAAO,OAAO,MAAM,IAAI,oBAAoB,SAAS;IAAI;;AAG3F,SAAO;GAAE,SAAS;GAAM,MAAM;GAAU;;;;;CAM1C,MAAM,KACJ,UACA,WACA,WACA,aAAsB,OACD;EACrB,MAAM,UAAU,SAAS,SAAS;EAClC,MAAM,SAAS,SAAS,UAAU;EAClC,MAAM,SAAS,SAAS,UAAU;EAiClC,MAAM,SAAS,MAAM,KAAK,QACxB,gBAjCa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCW;GACtB,MAAM;GACN,KAAK;GACL,KAAK;GACL,aAAa,OAAO,WAAW;GAChC,CAAC,CACH;AAED,MAAI,OAAO,aAAa,EACtB,QAAO;GAAE,SAAS;GAAO,OAAO,eAAe,SAAS;GAAE;AAE5D,MAAI,OAAO,aAAa,EACtB,QAAO;GAAE,SAAS;GAAO,OAAO,iBAAiB,UAAU,UAAU;GAAE;AAEzE,MAAI,OAAO,aAAa,EACtB,QAAO;GACL,SAAS;GACT,OAAO,kBAAkB,UAAU;GACpC;AAIH,SAAO;GAAE,SAAS;GAAM,MAAM;GAAU,aAD1B,SAAS,OAAO,OAAO,MAAM,EAAE,GAAG,IAAI;GACQ;;;;;CAM9D,MAAM,QACJ,SACA,OAAe,KACf,SAAsB,MACS;EAC/B,MAAM,aAAa,SAAS,QAAQ;EACpC,MAAM,UAAU,SAAS,KAAK;EAC9B,MAAM,UAAUC,SAAO,SAASA,OAAK,GAAG,SAAS,OAAO;EA2DxD,MAAM,SAAS,MAAM,KAAK,QACxB,gBA3Da;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2DW;GACtB,SAAS;GACT,MAAM;GACN,MAAM;GACP,CAAC,CACH;EAED,MAAM,UAAuB,EAAE;AAC/B,OAAK,MAAM,QAAQ,OAAO,OAAO,MAAM,CAAC,MAAM,KAAK,EAAE;AACnD,OAAI,CAAC,KAAM;AACX,OAAI;IACF,MAAM,OAAO,KAAK,MAAM,KAAK;AAC7B,YAAQ,KAAK;KACX,MAAM,KAAK;KACX,MAAM,KAAK;KACX,MAAM,KAAK;KACZ,CAAC;WACI;;AAIV,SAAO;;;;;CAMT,MAAM,SAAS,SAAiB,OAAe,KAA0B;EACvE,MAAM,UAAU,SAAS,KAAK;EAC9B,MAAM,aAAa,SAAS,QAAQ;EAoDpC,MAAM,SAAS,MAAM,KAAK,QACxB,gBApDa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAoDW;GAAE,MAAM;GAAS,SAAS;GAAY,CAAC,CAChE;EAED,MAAM,QAAoB,EAAE;AAC5B,OAAK,MAAM,QAAQ,OAAO,OAAO,MAAM,CAAC,MAAM,KAAK,EAAE;AACnD,OAAI,CAAC,KAAM;AACX,OAAI;IACF,MAAM,OAAO,KAAK,MAAM,KAAK;AAC7B,UAAM,KAAK;KACT,MAAM,KAAK;KACX,QAAQ,KAAK;KACb,MAAM,KAAK;KACX,aAAa,KAAK;KACnB,CAAC;WACI;;AAIV,SAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AC1hBX,IAAa,eAAb,cAAkC,YAAY;CAC5C,AAAiB;CACjB,AAAiB;CACjB,AAAiB;CACjB,AAAiB;CACjB,AAAiB;;;;;;CAOjB,YAAY,UAA+B,EAAE,EAAE;AAC7C,SAAO;AACP,OAAK,MAAM,QAAQ,OAAO,QAAQ,KAAK;AACvC,OAAK,UAAU,QAAQ,WAAW;AAClC,OAAK,MAAM,QAAQ,OAAO,EAAE;AAC5B,OAAK,gBAAgB,QAAQ,iBAAiB,OAAO;AACrD,OAAK,MAAM,SAAS,KAAK,KAAK,CAAC,GAAG,KAAK,QAAQ,CAAC,SAAS,GAAG,CAAC,MAAM,GAAG,EAAE;;;;;;CAO1E,IAAI,KAAa;AACf,SAAO,KAAK;;;;;;;;;;;;;;;;;;CAmBd,MAAM,QAAQ,SAA2C;AACvD,SAAO,IAAI,SAAS,YAAY;GAC9B,MAAM,QAAQ,MAAM,QAAQ,CAAC,MAAM,QAAQ,EAAE;IAC3C,KAAK,KAAK;IACV,KAAK;KAAE,GAAG,QAAQ;KAAK,GAAG,KAAK;KAAK;IACpC,SAAS,KAAK;IACf,CAAC;GAEF,IAAI,SAAS;GACb,IAAI,YAAY;AAEhB,SAAM,OAAO,GAAG,SAAS,SAAiB;AACxC,QAAI,OAAO,SAAS,KAAK,cACvB,WAAU,KAAK,UAAU;QAEzB,aAAY;KAEd;AAEF,SAAM,OAAO,GAAG,SAAS,SAAiB;AACxC,QAAI,OAAO,SAAS,KAAK,cACvB,WAAU,KAAK,UAAU;QAEzB,aAAY;KAEd;AAEF,SAAM,GAAG,UAAU,SAAS;AAC1B,YAAQ;KACN;KACA,UAAU;KACV;KACD,CAAC;KACF;AAEF,SAAM,GAAG,UAAU,QAAQ;AACzB,YAAQ;KACN,QAAQ,UAAU,IAAI;KACtB,UAAU;KACV,WAAW;KACZ,CAAC;KACF;IACF;;;;;;;;;;;;;;;;;;;;;;;;;;;ACjJN,SAAgB,iBAAiB,aAAoC;CACnE,MAAM,CAAC,UAAU,aAAa,YAAY,MAAM,IAAI;AAEpD,KAAI,aAAa,YACf,QAAO,UAAU,aAAa,2BAA2B;UAChD,aAAa,SACtB,QAAO,OAAO,aAAa,aAAa;AAI1C,QAAO,UAAU,YAAY;;;;;;;;;;;;;;;;;;;;;;;ACL/B,IAAa,YAAb,MAAsD;CACpD,AAAQ;CAER,YAAY,SAA2B;AACrC,OAAK,MAAM,QAAQ;AAGnB,MAAI,CAAC,WAAW,KAAK,IAAI,CACvB,WAAU,KAAK,KAAK,EAAE,WAAW,MAAM,CAAC;;CAI5C,AAAQ,YAAY,UAA0B;EAE5C,MAAM,SAAS,SAAS,QAAQ,mBAAmB,IAAI;AACvD,SAAO,KAAK,KAAK,KAAK,GAAG,OAAO,OAAO;;CAGzC,MAAM,KAAK,YAAuC;EAChD,MAAM,WAAW,KAAK,YAAY,WAAW,SAAS;EACtD,MAAM,OAAO;GACX,GAAG;GACH,4BAAW,IAAI,MAAM,EAAC,aAAa;GACpC;AACD,gBAAc,UAAU,KAAK,UAAU,MAAM,MAAM,EAAE,EAAE,QAAQ;;CAGjE,MAAM,KAAK,UAAmD;EAC5D,MAAM,WAAW,KAAK,YAAY,SAAS;AAE3C,MAAI,CAAC,WAAW,SAAS,CACvB;AAGF,MAAI;GACF,MAAM,UAAU,aAAa,UAAU,QAAQ;AAC/C,UAAO,KAAK,MAAM,QAAQ;UACpB;AACN;;;CAIJ,MAAM,OAA0B;AAC9B,MAAI,CAAC,WAAW,KAAK,IAAI,CACvB,QAAO,EAAE;AAIX,SADc,YAAY,KAAK,IAAI,CAEhC,QAAO,MAAK,EAAE,SAAS,QAAQ,CAAC,CAChC,KAAI,MAAK,EAAE,QAAQ,SAAS,GAAG,CAAC;;CAGrC,MAAM,OAAO,UAAiC;EAC5C,MAAM,WAAW,KAAK,YAAY,SAAS;AAE3C,MAAI,WAAW,SAAS,CACtB,YAAW,SAAS;;CAIxB,MAAM,OAAO,UAAoC;AAE/C,SAAO,WADU,KAAK,YAAY,SAAS,CAChB"}