@within-7/minto 0.2.0 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/agents/AgentsCommand.js +22 -24
- package/dist/commands/agents/AgentsCommand.js.map +2 -2
- package/dist/commands/context.js +2 -1
- package/dist/commands/context.js.map +2 -2
- package/dist/commands/export.js +2 -1
- package/dist/commands/export.js.map +2 -2
- package/dist/commands/mcp-interactive.js +7 -6
- package/dist/commands/mcp-interactive.js.map +2 -2
- package/dist/commands/model.js +3 -2
- package/dist/commands/model.js.map +2 -2
- package/dist/commands/permissions.js +4 -3
- package/dist/commands/permissions.js.map +2 -2
- package/dist/commands/plugin/AddMarketplaceForm.js +3 -2
- package/dist/commands/plugin/AddMarketplaceForm.js.map +2 -2
- package/dist/commands/plugin/ConfirmDialog.js +2 -1
- package/dist/commands/plugin/ConfirmDialog.js.map +2 -2
- package/dist/commands/plugin/ErrorView.js +2 -1
- package/dist/commands/plugin/ErrorView.js.map +2 -2
- package/dist/commands/plugin/InstalledPluginsByMarketplace.js +5 -4
- package/dist/commands/plugin/InstalledPluginsByMarketplace.js.map +2 -2
- package/dist/commands/plugin/InstalledPluginsManager.js +5 -4
- package/dist/commands/plugin/InstalledPluginsManager.js.map +2 -2
- package/dist/commands/plugin/MainMenu.js +2 -1
- package/dist/commands/plugin/MainMenu.js.map +2 -2
- package/dist/commands/plugin/MarketplaceManager.js +5 -4
- package/dist/commands/plugin/MarketplaceManager.js.map +2 -2
- package/dist/commands/plugin/MarketplaceSelector.js +4 -3
- package/dist/commands/plugin/MarketplaceSelector.js.map +2 -2
- package/dist/commands/plugin/PlaceholderScreen.js +3 -2
- package/dist/commands/plugin/PlaceholderScreen.js.map +2 -2
- package/dist/commands/plugin/PluginBrowser.js +6 -5
- package/dist/commands/plugin/PluginBrowser.js.map +2 -2
- package/dist/commands/plugin/PluginDetailsInstall.js +5 -4
- package/dist/commands/plugin/PluginDetailsInstall.js.map +2 -2
- package/dist/commands/plugin/PluginDetailsManage.js +4 -3
- package/dist/commands/plugin/PluginDetailsManage.js.map +2 -2
- package/dist/commands/plugin.js +16 -15
- package/dist/commands/plugin.js.map +2 -2
- package/dist/commands/sandbox.js +4 -3
- package/dist/commands/sandbox.js.map +2 -2
- package/dist/commands/setup.js +2 -1
- package/dist/commands/setup.js.map +2 -2
- package/dist/commands/status.js +2 -1
- package/dist/commands/status.js.map +2 -2
- package/dist/commands/undo.js +245 -0
- package/dist/commands/undo.js.map +7 -0
- package/dist/commands.js +2 -0
- package/dist/commands.js.map +2 -2
- package/dist/components/AgentThinkingBlock.js +1 -1
- package/dist/components/AgentThinkingBlock.js.map +2 -2
- package/dist/components/AsciiLogo.js +7 -8
- package/dist/components/AsciiLogo.js.map +2 -2
- package/dist/components/AskUserQuestionDialog/AskUserQuestionDialog.js +3 -2
- package/dist/components/AskUserQuestionDialog/AskUserQuestionDialog.js.map +2 -2
- package/dist/components/AskUserQuestionDialog/QuestionView.js +2 -1
- package/dist/components/AskUserQuestionDialog/QuestionView.js.map +2 -2
- package/dist/components/CollapsibleHint.js +2 -1
- package/dist/components/CollapsibleHint.js.map +2 -2
- package/dist/components/Config.js +3 -2
- package/dist/components/Config.js.map +2 -2
- package/dist/components/ConsoleOAuthFlow.js +2 -1
- package/dist/components/ConsoleOAuthFlow.js.map +2 -2
- package/dist/components/Cost.js +2 -1
- package/dist/components/Cost.js.map +2 -2
- package/dist/components/HeaderBar.js +13 -8
- package/dist/components/HeaderBar.js.map +2 -2
- package/dist/components/HistorySearchOverlay.js +4 -3
- package/dist/components/HistorySearchOverlay.js.map +2 -2
- package/dist/components/HotkeyHelpPanel.js +8 -11
- package/dist/components/HotkeyHelpPanel.js.map +2 -2
- package/dist/components/InvalidConfigDialog.js +2 -1
- package/dist/components/InvalidConfigDialog.js.map +2 -2
- package/dist/components/Logo.js +23 -67
- package/dist/components/Logo.js.map +2 -2
- package/dist/components/MCPServerApprovalDialog.js +2 -1
- package/dist/components/MCPServerApprovalDialog.js.map +2 -2
- package/dist/components/MCPServerDialogCopy.js +2 -1
- package/dist/components/MCPServerDialogCopy.js.map +2 -2
- package/dist/components/MCPServerMultiselectDialog.js +2 -1
- package/dist/components/MCPServerMultiselectDialog.js.map +2 -2
- package/dist/components/MessageSelector.js +4 -3
- package/dist/components/MessageSelector.js.map +2 -2
- package/dist/components/ModeIndicator.js +2 -1
- package/dist/components/ModeIndicator.js.map +2 -2
- package/dist/components/ModelConfig.js +4 -3
- package/dist/components/ModelConfig.js.map +2 -2
- package/dist/components/ModelListManager.js +4 -3
- package/dist/components/ModelListManager.js.map +2 -2
- package/dist/components/ModelSelector/ModelSelector.js +26 -13
- package/dist/components/ModelSelector/ModelSelector.js.map +2 -2
- package/dist/components/Onboarding.js +3 -2
- package/dist/components/Onboarding.js.map +2 -2
- package/dist/components/OperationSummary.js +130 -0
- package/dist/components/OperationSummary.js.map +7 -0
- package/dist/components/PromptInput.js +88 -75
- package/dist/components/PromptInput.js.map +2 -2
- package/dist/components/SensitiveFileWarning.js +31 -0
- package/dist/components/SensitiveFileWarning.js.map +7 -0
- package/dist/components/Spinner.js +71 -22
- package/dist/components/Spinner.js.map +2 -2
- package/dist/components/StructuredDiff.js +6 -8
- package/dist/components/StructuredDiff.js.map +2 -2
- package/dist/components/SubagentBlock.js +4 -2
- package/dist/components/SubagentBlock.js.map +2 -2
- package/dist/components/SubagentProgress.js +17 -6
- package/dist/components/SubagentProgress.js.map +2 -2
- package/dist/components/TaskCard.js +14 -11
- package/dist/components/TaskCard.js.map +2 -2
- package/dist/components/TextInput.js +9 -1
- package/dist/components/TextInput.js.map +2 -2
- package/dist/components/TodoPanel.js +44 -26
- package/dist/components/TodoPanel.js.map +2 -2
- package/dist/components/ToolUseLoader.js +2 -2
- package/dist/components/ToolUseLoader.js.map +2 -2
- package/dist/components/TreeConnector.js +4 -3
- package/dist/components/TreeConnector.js.map +2 -2
- package/dist/components/TrustDialog.js +2 -1
- package/dist/components/TrustDialog.js.map +2 -2
- package/dist/components/binary-feedback/BinaryFeedbackView.js +2 -1
- package/dist/components/binary-feedback/BinaryFeedbackView.js.map +2 -2
- package/dist/components/messages/AssistantTextMessage.js +17 -9
- package/dist/components/messages/AssistantTextMessage.js.map +2 -2
- package/dist/components/messages/AssistantToolUseMessage.js +8 -4
- package/dist/components/messages/AssistantToolUseMessage.js.map +2 -2
- package/dist/components/messages/GroupRenderer.js +2 -1
- package/dist/components/messages/GroupRenderer.js.map +2 -2
- package/dist/components/messages/NestedTasksPreview.js +13 -1
- package/dist/components/messages/NestedTasksPreview.js.map +2 -2
- package/dist/components/messages/ParallelTasksGroupView.js +4 -3
- package/dist/components/messages/ParallelTasksGroupView.js.map +2 -2
- package/dist/components/messages/TaskInModuleView.js +35 -15
- package/dist/components/messages/TaskInModuleView.js.map +2 -2
- package/dist/components/messages/TaskOutputContent.js +9 -6
- package/dist/components/messages/TaskOutputContent.js.map +2 -2
- package/dist/components/messages/UserPromptMessage.js +2 -2
- package/dist/components/messages/UserPromptMessage.js.map +2 -2
- package/dist/constants/colors.js +90 -72
- package/dist/constants/colors.js.map +2 -2
- package/dist/constants/prompts.js +22 -1
- package/dist/constants/prompts.js.map +2 -2
- package/dist/constants/toolInputExamples.js +84 -0
- package/dist/constants/toolInputExamples.js.map +7 -0
- package/dist/core/backupManager.js +321 -0
- package/dist/core/backupManager.js.map +7 -0
- package/dist/core/costTracker.js +9 -18
- package/dist/core/costTracker.js.map +2 -2
- package/dist/core/gitAutoCommit.js +287 -0
- package/dist/core/gitAutoCommit.js.map +7 -0
- package/dist/core/index.js +3 -0
- package/dist/core/index.js.map +2 -2
- package/dist/core/operationTracker.js +212 -0
- package/dist/core/operationTracker.js.map +7 -0
- package/dist/core/permissions/rules/allowedToolsRule.js +1 -1
- package/dist/core/permissions/rules/allowedToolsRule.js.map +2 -2
- package/dist/core/permissions/rules/autoEscalationRule.js +5 -0
- package/dist/core/permissions/rules/autoEscalationRule.js.map +2 -2
- package/dist/core/permissions/rules/projectBoundaryRule.js +5 -0
- package/dist/core/permissions/rules/projectBoundaryRule.js.map +2 -2
- package/dist/core/permissions/rules/sensitivePathsRule.js +5 -0
- package/dist/core/permissions/rules/sensitivePathsRule.js.map +2 -2
- package/dist/core/tokenStats.js +9 -0
- package/dist/core/tokenStats.js.map +7 -0
- package/dist/core/tokenStatsManager.js +331 -0
- package/dist/core/tokenStatsManager.js.map +7 -0
- package/dist/entrypoints/cli.js +122 -88
- package/dist/entrypoints/cli.js.map +2 -2
- package/dist/hooks/useAgentTokenStats.js +72 -0
- package/dist/hooks/useAgentTokenStats.js.map +7 -0
- package/dist/hooks/useAgentTranscripts.js +30 -6
- package/dist/hooks/useAgentTranscripts.js.map +2 -2
- package/dist/hooks/useLogMessages.js +12 -1
- package/dist/hooks/useLogMessages.js.map +2 -2
- package/dist/i18n/locales/en.js +6 -5
- package/dist/i18n/locales/en.js.map +2 -2
- package/dist/i18n/locales/zh-CN.js +6 -5
- package/dist/i18n/locales/zh-CN.js.map +2 -2
- package/dist/i18n/types.js.map +1 -1
- package/dist/permissions.js +147 -1
- package/dist/permissions.js.map +2 -2
- package/dist/query.js +78 -4
- package/dist/query.js.map +3 -3
- package/dist/screens/REPL.js +23 -3
- package/dist/screens/REPL.js.map +2 -2
- package/dist/screens/ResumeConversation.js +2 -0
- package/dist/screens/ResumeConversation.js.map +2 -2
- package/dist/services/claude.js +54 -3
- package/dist/services/claude.js.map +2 -2
- package/dist/services/intelligentCompactor.js +1 -1
- package/dist/services/intelligentCompactor.js.map +2 -2
- package/dist/services/mcpClient.js +81 -25
- package/dist/services/mcpClient.js.map +2 -2
- package/dist/services/sandbox/filesystemBoundary.js +58 -17
- package/dist/services/sandbox/filesystemBoundary.js.map +2 -2
- package/dist/services/taskStore.js +205 -0
- package/dist/services/taskStore.js.map +7 -0
- package/dist/tools/AskExpertModelTool/AskExpertModelTool.js +3 -2
- package/dist/tools/AskExpertModelTool/AskExpertModelTool.js.map +2 -2
- package/dist/tools/AskUserQuestionTool/AskUserQuestionTool.js +42 -4
- package/dist/tools/AskUserQuestionTool/AskUserQuestionTool.js.map +2 -2
- package/dist/tools/BashTool/BashTool.js +43 -7
- package/dist/tools/BashTool/BashTool.js.map +2 -2
- package/dist/tools/BashTool/prompt.js +184 -34
- package/dist/tools/BashTool/prompt.js.map +2 -2
- package/dist/tools/FileEditTool/FileEditTool.js +24 -9
- package/dist/tools/FileEditTool/FileEditTool.js.map +2 -2
- package/dist/tools/FileEditTool/prompt.js +10 -4
- package/dist/tools/FileEditTool/prompt.js.map +2 -2
- package/dist/tools/FileEditTool/utils.js +10 -4
- package/dist/tools/FileEditTool/utils.js.map +2 -2
- package/dist/tools/FileReadTool/FileReadTool.js +1 -1
- package/dist/tools/FileReadTool/FileReadTool.js.map +1 -1
- package/dist/tools/FileReadTool/prompt.js +16 -1
- package/dist/tools/FileReadTool/prompt.js.map +2 -2
- package/dist/tools/FileWriteTool/FileWriteTool.js +1 -1
- package/dist/tools/FileWriteTool/FileWriteTool.js.map +1 -1
- package/dist/tools/FileWriteTool/prompt.js +12 -3
- package/dist/tools/FileWriteTool/prompt.js.map +2 -2
- package/dist/tools/GlobTool/prompt.js +12 -1
- package/dist/tools/GlobTool/prompt.js.map +2 -2
- package/dist/tools/GrepTool/GrepTool.js +333 -65
- package/dist/tools/GrepTool/GrepTool.js.map +2 -2
- package/dist/tools/GrepTool/prompt.js +15 -8
- package/dist/tools/GrepTool/prompt.js.map +2 -2
- package/dist/tools/MultiEditTool/prompt.js +5 -3
- package/dist/tools/MultiEditTool/prompt.js.map +2 -2
- package/dist/tools/NotebookEditTool/NotebookEditTool.js +59 -46
- package/dist/tools/NotebookEditTool/NotebookEditTool.js.map +2 -2
- package/dist/tools/NotebookEditTool/prompt.js +1 -1
- package/dist/tools/NotebookEditTool/prompt.js.map +1 -1
- package/dist/tools/PlanModeTool/EnterPlanModeTool.js +3 -2
- package/dist/tools/PlanModeTool/EnterPlanModeTool.js.map +2 -2
- package/dist/tools/PlanModeTool/ExitPlanModeTool.js +3 -2
- package/dist/tools/PlanModeTool/ExitPlanModeTool.js.map +2 -2
- package/dist/tools/PlanModeTool/prompt.js +1 -1
- package/dist/tools/PlanModeTool/prompt.js.map +1 -1
- package/dist/tools/SkillTool/SkillTool.js +4 -3
- package/dist/tools/SkillTool/SkillTool.js.map +2 -2
- package/dist/tools/SkillTool/prompt.js +1 -1
- package/dist/tools/SkillTool/prompt.js.map +1 -1
- package/dist/tools/TaskCreateTool/TaskCreateTool.js +102 -0
- package/dist/tools/TaskCreateTool/TaskCreateTool.js.map +7 -0
- package/dist/tools/TaskCreateTool/prompt.js +47 -0
- package/dist/tools/TaskCreateTool/prompt.js.map +7 -0
- package/dist/tools/TaskGetTool/TaskGetTool.js +115 -0
- package/dist/tools/TaskGetTool/TaskGetTool.js.map +7 -0
- package/dist/tools/TaskGetTool/prompt.js +28 -0
- package/dist/tools/TaskGetTool/prompt.js.map +7 -0
- package/dist/tools/TaskListTool/TaskListTool.js +102 -0
- package/dist/tools/TaskListTool/TaskListTool.js.map +7 -0
- package/dist/tools/TaskListTool/prompt.js +27 -0
- package/dist/tools/TaskListTool/prompt.js.map +7 -0
- package/dist/tools/TaskOutputTool/TaskOutputTool.js +3 -2
- package/dist/tools/TaskOutputTool/TaskOutputTool.js.map +2 -2
- package/dist/tools/TaskStopTool/TaskStopTool.js +150 -0
- package/dist/tools/TaskStopTool/TaskStopTool.js.map +7 -0
- package/dist/tools/TaskStopTool/prompt.js +15 -0
- package/dist/tools/TaskStopTool/prompt.js.map +7 -0
- package/dist/tools/TaskTool/TaskTool.js +49 -1
- package/dist/tools/TaskTool/TaskTool.js.map +2 -2
- package/dist/tools/TaskUpdateTool/TaskUpdateTool.js +134 -0
- package/dist/tools/TaskUpdateTool/TaskUpdateTool.js.map +7 -0
- package/dist/tools/TaskUpdateTool/prompt.js +81 -0
- package/dist/tools/TaskUpdateTool/prompt.js.map +7 -0
- package/dist/tools/URLFetcherTool/prompt.js +1 -1
- package/dist/tools/URLFetcherTool/prompt.js.map +1 -1
- package/dist/tools.js +12 -0
- package/dist/tools.js.map +2 -2
- package/dist/utils/CircuitBreaker.js +242 -0
- package/dist/utils/CircuitBreaker.js.map +7 -0
- package/dist/utils/ask.js +2 -0
- package/dist/utils/ask.js.map +2 -2
- package/dist/utils/config.js +47 -5
- package/dist/utils/config.js.map +2 -2
- package/dist/utils/credentials/CredentialStore.js +1 -0
- package/dist/utils/credentials/CredentialStore.js.map +7 -0
- package/dist/utils/credentials/EncryptedFileStore.js +157 -0
- package/dist/utils/credentials/EncryptedFileStore.js.map +7 -0
- package/dist/utils/credentials/index.js +37 -0
- package/dist/utils/credentials/index.js.map +7 -0
- package/dist/utils/credentials/migration.js +82 -0
- package/dist/utils/credentials/migration.js.map +7 -0
- package/dist/utils/markdown.js +13 -1
- package/dist/utils/markdown.js.map +2 -2
- package/dist/utils/model.js +15 -2
- package/dist/utils/model.js.map +2 -2
- package/dist/utils/permissions/filesystem.js +5 -1
- package/dist/utils/permissions/filesystem.js.map +2 -2
- package/dist/utils/ripgrep.js +53 -1
- package/dist/utils/ripgrep.js.map +2 -2
- package/dist/utils/safePath.js +132 -0
- package/dist/utils/safePath.js.map +7 -0
- package/dist/utils/sensitiveFiles.js +125 -0
- package/dist/utils/sensitiveFiles.js.map +7 -0
- package/dist/utils/taskDisplayUtils.js +9 -9
- package/dist/utils/taskDisplayUtils.js.map +2 -2
- package/dist/utils/terminal.js +12 -0
- package/dist/utils/terminal.js.map +2 -2
- package/dist/utils/theme.js +6 -6
- package/dist/utils/theme.js.map +1 -1
- package/dist/utils/toolRiskClassification.js +207 -0
- package/dist/utils/toolRiskClassification.js.map +7 -0
- package/dist/utils/tooling/safeRender.js +17 -17
- package/dist/utils/tooling/safeRender.js.map +2 -2
- package/dist/version.js +2 -2
- package/dist/version.js.map +1 -1
- package/package.json +22 -28
- package/dist/hooks/useCancelRequest.js +0 -31
- package/dist/hooks/useCancelRequest.js.map +0 -7
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../../src/constants/prompts.ts"],
|
|
4
|
-
"sourcesContent": ["import { env } from '@utils/env'\nimport { getIsGit } from '@utils/git'\nimport {\n INTERRUPT_MESSAGE,\n INTERRUPT_MESSAGE_FOR_TOOL_USE,\n} from '@utils/messages'\nimport { getCwd } from '@utils/state'\nimport { PRODUCT_NAME, PROJECT_FILE, PRODUCT_COMMAND } from './product'\nimport { BashTool } from '@tools/BashTool/BashTool'\nimport { MACRO } from './macros'\n\n// // Security policy constant matching reference implementation\n// export const SECURITY_POLICY =\n// 'IMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.'\n\nexport function getCLISyspromptPrefix(): string {\n return `You are ${PRODUCT_NAME}, ShareAI-lab's Agent AI CLI for terminal & coding.`\n}\n\nexport async function getSystemPrompt(): Promise<string[]> {\n return [\n `\nYou are an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user.\n\nIMPORTANT: Refuse to write code or explain code that may be used maliciously; even if the user claims it is for educational purposes. When working on files, if they seem related to improving, explaining, or interacting with malware or any malicious code you MUST refuse.\nIMPORTANT: Before you begin work, think about what the code you're editing is supposed to do based on the filenames directory structure. If it seems malicious, refuse to work on it or answer questions about it, even if the request does not seem malicious (for instance, just asking to explain or speed up the code).\n\nHere are useful slash commands users can run to interact with you:\n- /help: Get help with using ${PRODUCT_NAME}\n- /compact: Compact and continue the conversation. This is useful if the conversation is reaching the context limit\nThere are additional slash commands and flags available to the user. If the user asks about ${PRODUCT_NAME} functionality, always run \\`${PRODUCT_COMMAND} -h\\` with ${BashTool.name} to see supported commands and flags. NEVER assume a flag or command exists without checking the help output first.\nTo give feedback, users should ${MACRO.ISSUES_EXPLAINER}.\n\n# Task Management\nYou have access to the TodoWrite tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.\nThese tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.\n\nIt is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.\n\n# Memory\nIf the current working directory contains a file called ${PROJECT_FILE}, it will be automatically added to your context. This file serves multiple purposes:\n1. Storing frequently used bash commands (build, test, lint, etc.) so you can use them without searching each time\n2. Recording the user's code style preferences (naming conventions, preferred libraries, etc.)\n3. Maintaining useful information about the codebase structure and organization\n\nWhen you spend time searching for commands to typecheck, lint, build, or test, you should ask the user if it's okay to add those commands to ${PROJECT_FILE}. Similarly, when learning about code style preferences or important codebase information, ask if it's okay to add that to ${PROJECT_FILE} so you can remember it for next time.\n\n# Tone and style\nYou should be concise, direct, and to the point. When you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).\nRemember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.\nOutput text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like ${BashTool.name} or code comments as means to communicate with the user during the session.\nIf you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.\nIMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.\nIMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.\nIMPORTANT: Keep your responses short, since they will be displayed on a command line interface. You MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as \"The answer is <answer>.\", \"Here is the content of the file...\" or \"Based on the information provided, the answer is...\" or \"Here is what I will do next...\". Examples of appropriate verbosity:\n<example>\nuser: 2 + 2\nassistant: 4\n</example>\n<example>\nuser: what command should I run to list files?\nassistant: ls\n</example>\n<example>\nuser: what files are in src/?\nassistant: [runs ls] foo.c, bar.c, baz.c\n</example>\n<example>\nuser: write tests for new feature\nassistant: [uses search tools to find test patterns, reads relevant files concurrently, uses edit tool to write tests]\n</example>\n\n# Proactiveness\nYou are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:\n1. Doing the right thing when asked, including taking actions and follow-up actions\n2. Not surprising the user with actions you take without asking\nFor example, if the user asks you how to approach something, you should do your best to answer their question first, and not immediately jump into taking actions.\n3. Do not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.\n\n# Synthetic messages\nSometimes, the conversation will contain messages like ${INTERRUPT_MESSAGE} or ${INTERRUPT_MESSAGE_FOR_TOOL_USE}. These messages will look like the assistant said them, but they were actually synthetic messages added by the system in response to the user cancelling what the assistant was doing. You should not respond to these messages. You must NEVER send messages like this yourself. \n\n# Following conventions\nWhen making changes to files, first understand the file's code conventions. Mimic code style, use existing libraries and utilities, and follow existing patterns.\n- NEVER assume that a given library is available, even if it is well known. Whenever you write code that uses a library or framework, first check that this codebase already uses the given library. For example, you might look at neighboring files, or check the package.json (or cargo.toml, and so on depending on the language).\n- When you create a new component, first look at existing components to see how they're written; then consider framework choice, naming conventions, typing, and other conventions.\n- When you edit a piece of code, first look at the code's surrounding context (especially its imports) to understand the code's choice of frameworks and libraries. Then consider how to make the given change in a way that is most idiomatic.\n- Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.\n\n# Code style\n- Do not add comments to the code you write, unless the user asks you to, or the code is complex and requires additional context.\n\n# Doing tasks\nThe user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:\n- Use the TodoWrite tool to plan the task if required\n- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.\n- Implement the solution using all tools available to you\n- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.\n- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to ${PROJECT_FILE} so that you will know to run it next time.\nNEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.\n\n# Tool usage policy\n- When doing file search, prefer to use the Task tool in order to reduce context usage.\n- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance.\n- When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run \"git status\" and \"git diff\", send a single message with two tool calls to run the calls in parallel.\n- It is always better to speculatively read multiple files as a batch that are potentially useful.\n- It is always better to speculatively perform multiple searches as a batch that are potentially useful.\n- For making multiple edits to the same file, prefer using the MultiEdit tool over multiple Edit tool calls.\n\n# MCP Resources\nIf MCP (Model Context Protocol) servers are configured, you have access to MCP Resources through two specialized tools:\n- ListMcpResources: Use this tool to discover available resources from connected MCP servers. Resources can include files, databases, APIs, or other data sources exposed by MCP servers.\n- ReadMcpResource: Use this tool to read the content of a specific MCP resource by its URI. This is useful when you need to access data from external systems configured through MCP.\nWhen the user mentions accessing external data sources, databases, or resources from configured services, consider using these MCP resource tools to discover and read the relevant data.\n\nYou MUST answer concisely with fewer than 4 lines of text (not including tool use or code generation), unless user asks for detail.\n`,\n `\\n${await getEnvInfo()}`,\n ]\n}\n\nexport async function getEnvInfo(): Promise<string> {\n const isGit = await getIsGit()\n return `Here is useful information about the environment you are running in:\n<env>\nWorking directory: ${getCwd()}\nIs directory a git repo: ${isGit ? 'Yes' : 'No'}\nPlatform: ${env.platform}\nToday's date: ${new Date().toLocaleDateString()}\n</env>`\n}\n\nexport async function getAgentPrompt(): Promise<string[]> {\n return [\n `\nYou are an agent for ${PRODUCT_NAME}. Given the user's prompt, you should use the tools available to you to answer the user's question.\n\nNotes:\n1. IMPORTANT: You should be concise, direct, and to the point, since your responses will be displayed on a command line interface. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as \"The answer is <answer>.\", \"Here is the content of the file...\" or \"Based on the information provided, the answer is...\" or \"Here is what I will do next...\".\n2. When relevant, share file names and code snippets relevant to the query\n3. Any file paths you return in your final response MUST be absolute. DO NOT use relative paths.`,\n `${await getEnvInfo()}`,\n ]\n}\n"],
|
|
5
|
-
"mappings": "AAAA,SAAS,WAAW;AACpB,SAAS,gBAAgB;AACzB;AAAA,EACE;AAAA,EACA;AAAA,OACK;AACP,SAAS,cAAc;AACvB,SAAS,cAAc,cAAc,uBAAuB;AAC5D,SAAS,gBAAgB;AACzB,SAAS,aAAa;AAMf,SAAS,wBAAgC;AAC9C,SAAO,WAAW,YAAY;AAChC;AAEA,eAAsB,kBAAqC;AACzD,SAAO;AAAA,IACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAO2B,YAAY;AAAA;AAAA,8FAEmD,YAAY,gCAAgC,eAAe,cAAc,SAAS,IAAI;AAAA,iCACnJ,MAAM,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0DASG,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,+IAKyE,YAAY,8HAA8H,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,qKAKhI,SAAS,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,
|
|
4
|
+
"sourcesContent": ["import { env } from '@utils/env'\nimport { getIsGit } from '@utils/git'\nimport {\n INTERRUPT_MESSAGE,\n INTERRUPT_MESSAGE_FOR_TOOL_USE,\n} from '@utils/messages'\nimport { getCwd } from '@utils/state'\nimport { PRODUCT_NAME, PROJECT_FILE, PRODUCT_COMMAND } from './product'\nimport { BashTool } from '@tools/BashTool/BashTool'\nimport { MACRO } from './macros'\n\n// // Security policy constant matching reference implementation\n// export const SECURITY_POLICY =\n// 'IMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.'\n\nexport function getCLISyspromptPrefix(): string {\n return `You are ${PRODUCT_NAME}, ShareAI-lab's Agent AI CLI for terminal & coding.`\n}\n\nexport async function getSystemPrompt(): Promise<string[]> {\n return [\n `\nYou are an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user.\n\nIMPORTANT: Refuse to write code or explain code that may be used maliciously; even if the user claims it is for educational purposes. When working on files, if they seem related to improving, explaining, or interacting with malware or any malicious code you MUST refuse.\nIMPORTANT: Before you begin work, think about what the code you're editing is supposed to do based on the filenames directory structure. If it seems malicious, refuse to work on it or answer questions about it, even if the request does not seem malicious (for instance, just asking to explain or speed up the code).\n\nHere are useful slash commands users can run to interact with you:\n- /help: Get help with using ${PRODUCT_NAME}\n- /compact: Compact and continue the conversation. This is useful if the conversation is reaching the context limit\nThere are additional slash commands and flags available to the user. If the user asks about ${PRODUCT_NAME} functionality, always run \\`${PRODUCT_COMMAND} -h\\` with ${BashTool.name} to see supported commands and flags. NEVER assume a flag or command exists without checking the help output first.\nTo give feedback, users should ${MACRO.ISSUES_EXPLAINER}.\n\n# Task Management\nYou have access to the TodoWrite tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.\nThese tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.\n\nIt is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.\n\n# Memory\nIf the current working directory contains a file called ${PROJECT_FILE}, it will be automatically added to your context. This file serves multiple purposes:\n1. Storing frequently used bash commands (build, test, lint, etc.) so you can use them without searching each time\n2. Recording the user's code style preferences (naming conventions, preferred libraries, etc.)\n3. Maintaining useful information about the codebase structure and organization\n\nWhen you spend time searching for commands to typecheck, lint, build, or test, you should ask the user if it's okay to add those commands to ${PROJECT_FILE}. Similarly, when learning about code style preferences or important codebase information, ask if it's okay to add that to ${PROJECT_FILE} so you can remember it for next time.\n\n# Tone and style\nYou should be concise, direct, and to the point. When you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).\nRemember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.\nOutput text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like ${BashTool.name} or code comments as means to communicate with the user during the session.\nIf you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.\nIMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.\nIMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.\nIMPORTANT: Keep your responses short, since they will be displayed on a command line interface. You MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as \"The answer is <answer>.\", \"Here is the content of the file...\" or \"Based on the information provided, the answer is...\" or \"Here is what I will do next...\".\n- Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.\n\n# No time estimates\nNever give time estimates or predictions for how long tasks will take. Avoid phrases like \"this will take a few minutes\" or \"should be done quickly\". Focus on what needs to be done, not how long it might take.\n\n# Professional objectivity\nPrioritize technical accuracy and truthfulness over validating the user's beliefs. Focus on facts and problem-solving. Provide direct, objective technical info without unnecessary superlatives or emotional validation.\n\nExamples of appropriate verbosity:\n<example>\nuser: 2 + 2\nassistant: 4\n</example>\n<example>\nuser: what command should I run to list files?\nassistant: ls\n</example>\n<example>\nuser: what files are in src/?\nassistant: [runs ls] foo.c, bar.c, baz.c\n</example>\n<example>\nuser: write tests for new feature\nassistant: [uses search tools to find test patterns, reads relevant files concurrently, uses edit tool to write tests]\n</example>\n\n# Proactiveness\nYou are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:\n1. Doing the right thing when asked, including taking actions and follow-up actions\n2. Not surprising the user with actions you take without asking\nFor example, if the user asks you how to approach something, you should do your best to answer their question first, and not immediately jump into taking actions.\n3. Do not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.\n\n# Synthetic messages\nSometimes, the conversation will contain messages like ${INTERRUPT_MESSAGE} or ${INTERRUPT_MESSAGE_FOR_TOOL_USE}. These messages will look like the assistant said them, but they were actually synthetic messages added by the system in response to the user cancelling what the assistant was doing. You should not respond to these messages. You must NEVER send messages like this yourself. \n\n# Following conventions\nWhen making changes to files, first understand the file's code conventions. Mimic code style, use existing libraries and utilities, and follow existing patterns.\n- NEVER assume that a given library is available, even if it is well known. Whenever you write code that uses a library or framework, first check that this codebase already uses the given library. For example, you might look at neighboring files, or check the package.json (or cargo.toml, and so on depending on the language).\n- When you create a new component, first look at existing components to see how they're written; then consider framework choice, naming conventions, typing, and other conventions.\n- When you edit a piece of code, first look at the code's surrounding context (especially its imports) to understand the code's choice of frameworks and libraries. Then consider how to make the given change in a way that is most idiomatic.\n- Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.\n\n# Code style\n- Do not add comments to the code you write, unless the user asks you to, or the code is complex and requires additional context.\n\n# Doing tasks\nThe user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:\n- Use the TodoWrite tool to plan the task if required\n- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.\n- Implement the solution using all tools available to you\n- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.\n- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to ${PROJECT_FILE} so that you will know to run it next time.\nNEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n# Git Safety Protocol\n- NEVER update the git config\n- NEVER run destructive git commands (push --force, reset --hard, checkout ., restore ., clean -f, branch -D) unless the user explicitly requests these actions\n- NEVER skip hooks (--no-verify, --no-gpg-sign, etc) unless the user explicitly requests it\n- NEVER run force push to main/master, warn the user if they request it\n- CRITICAL: Always create NEW commits rather than amending, unless the user explicitly requests a git amend. When a pre-commit hook fails, the commit did NOT happen \u2014 so --amend would modify the PREVIOUS commit, which may result in destroying work or losing previous changes. Instead, after hook failure, fix the issue, re-stage, and create a NEW commit\n- When staging files, prefer adding specific files by name rather than using \"git add -A\" or \"git add .\", which can accidentally include sensitive files (.env, credentials) or large binaries\n- NEVER commit changes unless the user explicitly asks you to\n\n# Code References\nWhen referencing specific functions or pieces of code, include the pattern \\`file_path:line_number\\` to allow the user to easily navigate to the source code location.\n\n- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.\n\n# Tool usage policy\n- When doing file search, prefer to use the Task tool in order to reduce context usage.\n- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance.\n- When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run \"git status\" and \"git diff\", send a single message with two tool calls to run the calls in parallel.\n- It is always better to speculatively read multiple files as a batch that are potentially useful.\n- It is always better to speculatively perform multiple searches as a batch that are potentially useful.\n- For making multiple edits to the same file, prefer using the MultiEdit tool over multiple Edit tool calls.\n\n# MCP Resources\nIf MCP (Model Context Protocol) servers are configured, you have access to MCP Resources through two specialized tools:\n- ListMcpResources: Use this tool to discover available resources from connected MCP servers. Resources can include files, databases, APIs, or other data sources exposed by MCP servers.\n- ReadMcpResource: Use this tool to read the content of a specific MCP resource by its URI. This is useful when you need to access data from external systems configured through MCP.\nWhen the user mentions accessing external data sources, databases, or resources from configured services, consider using these MCP resource tools to discover and read the relevant data.\n\nYou MUST answer concisely with fewer than 4 lines of text (not including tool use or code generation), unless user asks for detail.\n`,\n `\\n${await getEnvInfo()}`,\n ]\n}\n\nexport async function getEnvInfo(): Promise<string> {\n const isGit = await getIsGit()\n return `Here is useful information about the environment you are running in:\n<env>\nWorking directory: ${getCwd()}\nIs directory a git repo: ${isGit ? 'Yes' : 'No'}\nPlatform: ${env.platform}\nToday's date: ${new Date().toLocaleDateString()}\n</env>`\n}\n\nexport async function getAgentPrompt(): Promise<string[]> {\n return [\n `\nYou are an agent for ${PRODUCT_NAME}. Given the user's prompt, you should use the tools available to you to answer the user's question.\n\nNotes:\n1. IMPORTANT: You should be concise, direct, and to the point, since your responses will be displayed on a command line interface. Answer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as \"The answer is <answer>.\", \"Here is the content of the file...\" or \"Based on the information provided, the answer is...\" or \"Here is what I will do next...\".\n2. When relevant, share file names and code snippets relevant to the query\n3. Any file paths you return in your final response MUST be absolute. DO NOT use relative paths.`,\n `${await getEnvInfo()}`,\n ]\n}\n"],
|
|
5
|
+
"mappings": "AAAA,SAAS,WAAW;AACpB,SAAS,gBAAgB;AACzB;AAAA,EACE;AAAA,EACA;AAAA,OACK;AACP,SAAS,cAAc;AACvB,SAAS,cAAc,cAAc,uBAAuB;AAC5D,SAAS,gBAAgB;AACzB,SAAS,aAAa;AAMf,SAAS,wBAAgC;AAC9C,SAAO,WAAW,YAAY;AAChC;AAEA,eAAsB,kBAAqC;AACzD,SAAO;AAAA,IACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAO2B,YAAY;AAAA;AAAA,8FAEmD,YAAY,gCAAgC,eAAe,cAAc,SAAS,IAAI;AAAA,iCACnJ,MAAM,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0DASG,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,+IAKyE,YAAY,8HAA8H,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,qKAKhI,SAAS,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,yDAuCzH,iBAAiB,OAAO,8BAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,4VAkB6O,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiCpW;AAAA,EAAK,MAAM,WAAW,CAAC;AAAA,EACzB;AACF;AAEA,eAAsB,aAA8B;AAClD,QAAM,QAAQ,MAAM,SAAS;AAC7B,SAAO;AAAA;AAAA,qBAEY,OAAO,CAAC;AAAA,2BACF,QAAQ,QAAQ,IAAI;AAAA,YACnC,IAAI,QAAQ;AAAA,iBACR,oBAAI,KAAK,GAAE,mBAAmB,CAAC;AAAA;AAE/C;AAEA,eAAsB,iBAAoC;AACxD,SAAO;AAAA,IACL;AAAA,uBACmB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,GAAG,MAAM,WAAW,CAAC;AAAA,EACvB;AACF;",
|
|
6
6
|
"names": []
|
|
7
7
|
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
const TOOL_INPUT_EXAMPLES = {
|
|
2
|
+
// File Operations
|
|
3
|
+
Read: '{"file_path": "/path/to/file.txt"}',
|
|
4
|
+
Write: '{"file_path": "/path/to/file.txt", "content": "file contents"}',
|
|
5
|
+
Edit: '{"file_path": "/path/to/file.txt", "old_string": "old text", "new_string": "new text"}',
|
|
6
|
+
MultiEdit: '{"file_path": "/path/to/file.txt", "edits": [{"old_string": "old", "new_string": "new"}]}',
|
|
7
|
+
NotebookEdit: '{"notebook_path": "/path/to/notebook.ipynb", "new_source": "cell source code"}',
|
|
8
|
+
NotebookRead: '{"notebook_path": "/path/to/notebook.ipynb"}',
|
|
9
|
+
// Directory and Search
|
|
10
|
+
Glob: '{"pattern": "**/*.ts", "path": "/optional/search/path"}',
|
|
11
|
+
Grep: '{"pattern": "search_term", "path": "/optional/search/path", "glob": "*.ts"}',
|
|
12
|
+
Ls: '{"path": "/path/to/directory"}',
|
|
13
|
+
// Shell Execution
|
|
14
|
+
Bash: '{"command": "ls -la"}',
|
|
15
|
+
BashOutput: '{"command": "ls -la", "shell_id": "optional_shell_id"}',
|
|
16
|
+
KillShell: '{"shell_id": "shell_id_to_kill"}',
|
|
17
|
+
// Task Management
|
|
18
|
+
Task: '{"prompt": "task description", "description": "short description", "subagent_type": "agent-type"}',
|
|
19
|
+
TodoWrite: '{"todos": [{"subject": "Task name", "description": "Details", "status": "pending"}]}',
|
|
20
|
+
// Web and API
|
|
21
|
+
WebSearch: '{"query": "search query"}',
|
|
22
|
+
URLFetcher: '{"url": "https://example.com", "prompt": "what to extract from page"}',
|
|
23
|
+
// LLM Interactions
|
|
24
|
+
AskExpertModel: '{"model": "model-name", "prompt": "question for expert", "context": "optional context"}',
|
|
25
|
+
AskUserQuestion: '{"question": "question to ask user", "default": "optional default"}',
|
|
26
|
+
// Memory (Anthropic-only)
|
|
27
|
+
MemoryRead: '{"name": "memory_block_name"}',
|
|
28
|
+
MemoryWrite: '{"name": "memory_block_name", "content": "memory content"}',
|
|
29
|
+
// Planning and Reasoning
|
|
30
|
+
Think: '{"reasoning": "your thinking process", "plan": "optional plan"}',
|
|
31
|
+
EnterPlanMode: "{}",
|
|
32
|
+
ExitPlanMode: "{}",
|
|
33
|
+
// Code Intelligence
|
|
34
|
+
Lsp: '{"method": "textDocument/definition", "params": {"position": {"line": 10, "character": 5}}}',
|
|
35
|
+
Skill: '{"skill": "skill_name", "args": "optional arguments"}',
|
|
36
|
+
// MCP Resources
|
|
37
|
+
ListMcpResources: '{"server": "server_name", "uri": "optional_resource_uri", "include": "resource_type"}',
|
|
38
|
+
ReadMcpResource: '{"uri": "resource://path"}'
|
|
39
|
+
};
|
|
40
|
+
const TOOL_DESCRIPTIONS = {
|
|
41
|
+
MultiEdit: 'Allows multiple edits in a single file. The "edits" parameter is an array of edit objects.',
|
|
42
|
+
NotebookEdit: "For Jupyter notebooks. Requires notebook_path and new_source for the cell content.",
|
|
43
|
+
Grep: 'Searches file contents with regex. Optional "glob" parameter filters files by pattern.',
|
|
44
|
+
Task: "Creates a task for an AI agent. subagent_type determines which agent handles the task.",
|
|
45
|
+
URLFetcher: 'Fetches web content. The "prompt" parameter guides what to extract from the page.',
|
|
46
|
+
AskExpertModel: "Queries an alternative LLM model. Useful for specialized tasks requiring different models.",
|
|
47
|
+
Lsp: "Language Server Protocol queries for code intelligence. Method and params depend on LSP spec."
|
|
48
|
+
};
|
|
49
|
+
const PARAMETER_DESCRIPTIONS = {
|
|
50
|
+
Read: {
|
|
51
|
+
file_path: "Must be an absolute file path (e.g., /home/user/file.txt)",
|
|
52
|
+
offset: "Optional line number to start reading from (0-indexed)",
|
|
53
|
+
limit: "Optional number of lines to read from the offset"
|
|
54
|
+
},
|
|
55
|
+
Write: {
|
|
56
|
+
file_path: "Must be an absolute file path (e.g., /home/user/file.txt)",
|
|
57
|
+
content: "The file contents to write"
|
|
58
|
+
},
|
|
59
|
+
Edit: {
|
|
60
|
+
file_path: "Must be an absolute file path (e.g., /home/user/file.txt)",
|
|
61
|
+
old_string: "The exact text to find and replace (must match exactly)",
|
|
62
|
+
new_string: "The text to replace it with"
|
|
63
|
+
},
|
|
64
|
+
Bash: {
|
|
65
|
+
command: "The shell command to execute (will run in current working directory)",
|
|
66
|
+
timeout: "Maximum execution time in milliseconds (max 600000 = 10 minutes)",
|
|
67
|
+
run_in_background: "Set to true to run command without waiting for completion"
|
|
68
|
+
},
|
|
69
|
+
Glob: {
|
|
70
|
+
pattern: "Glob pattern like **/*.ts or src/**/*.{ts,tsx}",
|
|
71
|
+
path: "Directory to search in (optional, defaults to current directory)"
|
|
72
|
+
},
|
|
73
|
+
Grep: {
|
|
74
|
+
pattern: "Regular expression pattern to search for",
|
|
75
|
+
path: "Directory to search in (optional, defaults to current directory)",
|
|
76
|
+
glob: "Additional glob pattern to filter files being searched"
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
export {
|
|
80
|
+
PARAMETER_DESCRIPTIONS,
|
|
81
|
+
TOOL_DESCRIPTIONS,
|
|
82
|
+
TOOL_INPUT_EXAMPLES
|
|
83
|
+
};
|
|
84
|
+
//# sourceMappingURL=toolInputExamples.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../../src/constants/toolInputExamples.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Tool Input Examples and Descriptions\n *\n * Provides user-friendly examples and descriptions for all tools to help with\n * error messaging when tool input validation fails. Each entry includes:\n * - A canonical example showing required parameters\n * - Optional description for complex tools\n */\n\nexport const TOOL_INPUT_EXAMPLES: Record<string, string> = {\n // File Operations\n Read: '{\"file_path\": \"/path/to/file.txt\"}',\n Write: '{\"file_path\": \"/path/to/file.txt\", \"content\": \"file contents\"}',\n Edit: '{\"file_path\": \"/path/to/file.txt\", \"old_string\": \"old text\", \"new_string\": \"new text\"}',\n MultiEdit:\n '{\"file_path\": \"/path/to/file.txt\", \"edits\": [{\"old_string\": \"old\", \"new_string\": \"new\"}]}',\n NotebookEdit:\n '{\"notebook_path\": \"/path/to/notebook.ipynb\", \"new_source\": \"cell source code\"}',\n NotebookRead: '{\"notebook_path\": \"/path/to/notebook.ipynb\"}',\n\n // Directory and Search\n Glob: '{\"pattern\": \"**/*.ts\", \"path\": \"/optional/search/path\"}',\n Grep: '{\"pattern\": \"search_term\", \"path\": \"/optional/search/path\", \"glob\": \"*.ts\"}',\n Ls: '{\"path\": \"/path/to/directory\"}',\n\n // Shell Execution\n Bash: '{\"command\": \"ls -la\"}',\n BashOutput: '{\"command\": \"ls -la\", \"shell_id\": \"optional_shell_id\"}',\n KillShell: '{\"shell_id\": \"shell_id_to_kill\"}',\n\n // Task Management\n Task: '{\"prompt\": \"task description\", \"description\": \"short description\", \"subagent_type\": \"agent-type\"}',\n TodoWrite:\n '{\"todos\": [{\"subject\": \"Task name\", \"description\": \"Details\", \"status\": \"pending\"}]}',\n\n // Web and API\n WebSearch: '{\"query\": \"search query\"}',\n URLFetcher:\n '{\"url\": \"https://example.com\", \"prompt\": \"what to extract from page\"}',\n\n // LLM Interactions\n AskExpertModel:\n '{\"model\": \"model-name\", \"prompt\": \"question for expert\", \"context\": \"optional context\"}',\n AskUserQuestion:\n '{\"question\": \"question to ask user\", \"default\": \"optional default\"}',\n\n // Memory (Anthropic-only)\n MemoryRead: '{\"name\": \"memory_block_name\"}',\n MemoryWrite: '{\"name\": \"memory_block_name\", \"content\": \"memory content\"}',\n\n // Planning and Reasoning\n Think: '{\"reasoning\": \"your thinking process\", \"plan\": \"optional plan\"}',\n EnterPlanMode: '{}',\n ExitPlanMode: '{}',\n\n // Code Intelligence\n Lsp: '{\"method\": \"textDocument/definition\", \"params\": {\"position\": {\"line\": 10, \"character\": 5}}}',\n Skill: '{\"skill\": \"skill_name\", \"args\": \"optional arguments\"}',\n\n // MCP Resources\n ListMcpResources:\n '{\"server\": \"server_name\", \"uri\": \"optional_resource_uri\", \"include\": \"resource_type\"}',\n ReadMcpResource: '{\"uri\": \"resource://path\"}',\n}\n\n/**\n * Tool Input Descriptions\n *\n * Provides helpful descriptions for complex tools to include in error messages\n */\nexport const TOOL_DESCRIPTIONS: Record<string, string> = {\n MultiEdit:\n 'Allows multiple edits in a single file. The \"edits\" parameter is an array of edit objects.',\n NotebookEdit:\n 'For Jupyter notebooks. Requires notebook_path and new_source for the cell content.',\n Grep: 'Searches file contents with regex. Optional \"glob\" parameter filters files by pattern.',\n Task: 'Creates a task for an AI agent. subagent_type determines which agent handles the task.',\n URLFetcher:\n 'Fetches web content. The \"prompt\" parameter guides what to extract from the page.',\n AskExpertModel:\n 'Queries an alternative LLM model. Useful for specialized tasks requiring different models.',\n Lsp: 'Language Server Protocol queries for code intelligence. Method and params depend on LSP spec.',\n}\n\n/**\n * Parameter Descriptions for Common Tool Parameters\n *\n * Helps provide more specific error messages when particular parameters are invalid\n */\nexport const PARAMETER_DESCRIPTIONS: Record<string, Record<string, string>> = {\n Read: {\n file_path: 'Must be an absolute file path (e.g., /home/user/file.txt)',\n offset: 'Optional line number to start reading from (0-indexed)',\n limit: 'Optional number of lines to read from the offset',\n },\n Write: {\n file_path: 'Must be an absolute file path (e.g., /home/user/file.txt)',\n content: 'The file contents to write',\n },\n Edit: {\n file_path: 'Must be an absolute file path (e.g., /home/user/file.txt)',\n old_string: 'The exact text to find and replace (must match exactly)',\n new_string: 'The text to replace it with',\n },\n Bash: {\n command:\n 'The shell command to execute (will run in current working directory)',\n timeout: 'Maximum execution time in milliseconds (max 600000 = 10 minutes)',\n run_in_background:\n 'Set to true to run command without waiting for completion',\n },\n Glob: {\n pattern: 'Glob pattern like **/*.ts or src/**/*.{ts,tsx}',\n path: 'Directory to search in (optional, defaults to current directory)',\n },\n Grep: {\n pattern: 'Regular expression pattern to search for',\n path: 'Directory to search in (optional, defaults to current directory)',\n glob: 'Additional glob pattern to filter files being searched',\n },\n}\n"],
|
|
5
|
+
"mappings": "AASO,MAAM,sBAA8C;AAAA;AAAA,EAEzD,MAAM;AAAA,EACN,OAAO;AAAA,EACP,MAAM;AAAA,EACN,WACE;AAAA,EACF,cACE;AAAA,EACF,cAAc;AAAA;AAAA,EAGd,MAAM;AAAA,EACN,MAAM;AAAA,EACN,IAAI;AAAA;AAAA,EAGJ,MAAM;AAAA,EACN,YAAY;AAAA,EACZ,WAAW;AAAA;AAAA,EAGX,MAAM;AAAA,EACN,WACE;AAAA;AAAA,EAGF,WAAW;AAAA,EACX,YACE;AAAA;AAAA,EAGF,gBACE;AAAA,EACF,iBACE;AAAA;AAAA,EAGF,YAAY;AAAA,EACZ,aAAa;AAAA;AAAA,EAGb,OAAO;AAAA,EACP,eAAe;AAAA,EACf,cAAc;AAAA;AAAA,EAGd,KAAK;AAAA,EACL,OAAO;AAAA;AAAA,EAGP,kBACE;AAAA,EACF,iBAAiB;AACnB;AAOO,MAAM,oBAA4C;AAAA,EACvD,WACE;AAAA,EACF,cACE;AAAA,EACF,MAAM;AAAA,EACN,MAAM;AAAA,EACN,YACE;AAAA,EACF,gBACE;AAAA,EACF,KAAK;AACP;AAOO,MAAM,yBAAiE;AAAA,EAC5E,MAAM;AAAA,IACJ,WAAW;AAAA,IACX,QAAQ;AAAA,IACR,OAAO;AAAA,EACT;AAAA,EACA,OAAO;AAAA,IACL,WAAW;AAAA,IACX,SAAS;AAAA,EACX;AAAA,EACA,MAAM;AAAA,IACJ,WAAW;AAAA,IACX,YAAY;AAAA,IACZ,YAAY;AAAA,EACd;AAAA,EACA,MAAM;AAAA,IACJ,SACE;AAAA,IACF,SAAS;AAAA,IACT,mBACE;AAAA,EACJ;AAAA,EACA,MAAM;AAAA,IACJ,SAAS;AAAA,IACT,MAAM;AAAA,EACR;AAAA,EACA,MAAM;AAAA,IACJ,SAAS;AAAA,IACT,MAAM;AAAA,IACN,MAAM;AAAA,EACR;AACF;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
import { createHash } from "crypto";
|
|
2
|
+
import { promises as fs } from "fs";
|
|
3
|
+
import { join, dirname } from "path";
|
|
4
|
+
import { homedir } from "os";
|
|
5
|
+
import { CONFIG_BASE_DIR } from "../constants/product.js";
|
|
6
|
+
import {
|
|
7
|
+
existsAsync,
|
|
8
|
+
mkdirAsync,
|
|
9
|
+
readJsonAsync,
|
|
10
|
+
writeJsonAsync,
|
|
11
|
+
readdirAsync,
|
|
12
|
+
unlinkAsync
|
|
13
|
+
} from "../utils/asyncFile.js";
|
|
14
|
+
const MAX_VERSIONS_PER_FILE = 10;
|
|
15
|
+
function getMintoBaseDir() {
|
|
16
|
+
return process.env.MINTO_CONFIG_DIR ?? process.env.CLAUDE_CONFIG_DIR ?? join(homedir(), CONFIG_BASE_DIR);
|
|
17
|
+
}
|
|
18
|
+
function getBackupsDir() {
|
|
19
|
+
return join(getMintoBaseDir(), "backups");
|
|
20
|
+
}
|
|
21
|
+
const BACKUPS_DIR = join(getMintoBaseDir(), "backups");
|
|
22
|
+
const INDEX_FILE = "index.json";
|
|
23
|
+
function getPathHash(filePath) {
|
|
24
|
+
const hash = createHash("sha256").update(filePath).digest("hex");
|
|
25
|
+
return hash.slice(0, 12);
|
|
26
|
+
}
|
|
27
|
+
function getBackupDir(filePath) {
|
|
28
|
+
const pathHash = getPathHash(filePath);
|
|
29
|
+
return join(getBackupsDir(), pathHash);
|
|
30
|
+
}
|
|
31
|
+
function getIndexPath(filePath) {
|
|
32
|
+
return join(getBackupDir(filePath), INDEX_FILE);
|
|
33
|
+
}
|
|
34
|
+
function getVersionPath(filePath, version) {
|
|
35
|
+
return join(getBackupDir(filePath), `v${version}.backup`);
|
|
36
|
+
}
|
|
37
|
+
function detectLineEndings(content) {
|
|
38
|
+
if (content.includes("\r\n")) {
|
|
39
|
+
return "CRLF";
|
|
40
|
+
}
|
|
41
|
+
return "LF";
|
|
42
|
+
}
|
|
43
|
+
function detectEncoding(_content) {
|
|
44
|
+
return "utf8";
|
|
45
|
+
}
|
|
46
|
+
async function createBackup(filePath, oldContent, newContent, operation, description) {
|
|
47
|
+
try {
|
|
48
|
+
const backupDir = getBackupDir(filePath);
|
|
49
|
+
const indexPath = getIndexPath(filePath);
|
|
50
|
+
await mkdirAsync(backupDir, true);
|
|
51
|
+
let index;
|
|
52
|
+
if (await existsAsync(indexPath)) {
|
|
53
|
+
const loaded = await readJsonAsync(indexPath);
|
|
54
|
+
if (loaded) {
|
|
55
|
+
index = loaded;
|
|
56
|
+
} else {
|
|
57
|
+
index = {
|
|
58
|
+
filePath,
|
|
59
|
+
pathHash: getPathHash(filePath),
|
|
60
|
+
createdAt: Date.now(),
|
|
61
|
+
versions: []
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
} else {
|
|
65
|
+
index = {
|
|
66
|
+
filePath,
|
|
67
|
+
pathHash: getPathHash(filePath),
|
|
68
|
+
createdAt: Date.now(),
|
|
69
|
+
versions: []
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
const contentToBackup = operation === "create" ? null : oldContent;
|
|
73
|
+
const maxVersion = index.versions.length > 0 ? Math.max(...index.versions.map((v) => v.version)) : 0;
|
|
74
|
+
const newVersion = maxVersion + 1;
|
|
75
|
+
const metadata = {
|
|
76
|
+
version: newVersion,
|
|
77
|
+
timestamp: Date.now(),
|
|
78
|
+
operation,
|
|
79
|
+
encoding: contentToBackup ? detectEncoding(contentToBackup) : "utf8",
|
|
80
|
+
lineEndings: contentToBackup ? detectLineEndings(contentToBackup) : "LF",
|
|
81
|
+
size: contentToBackup ? Buffer.byteLength(contentToBackup, "utf8") : 0,
|
|
82
|
+
description
|
|
83
|
+
};
|
|
84
|
+
if (contentToBackup !== null) {
|
|
85
|
+
const versionPath = getVersionPath(filePath, newVersion);
|
|
86
|
+
await fs.writeFile(versionPath, contentToBackup, {
|
|
87
|
+
encoding: metadata.encoding
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
index.versions.push(metadata);
|
|
91
|
+
if (index.versions.length > MAX_VERSIONS_PER_FILE) {
|
|
92
|
+
await cleanupOldVersions(filePath, index);
|
|
93
|
+
}
|
|
94
|
+
await writeJsonAsync(indexPath, index);
|
|
95
|
+
return metadata;
|
|
96
|
+
} catch (error) {
|
|
97
|
+
console.error(
|
|
98
|
+
`[BackupManager] Failed to create backup for ${filePath}:`,
|
|
99
|
+
error
|
|
100
|
+
);
|
|
101
|
+
return null;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
async function cleanupOldVersions(filePath, index) {
|
|
105
|
+
index.versions.sort((a, b) => a.version - b.version);
|
|
106
|
+
const toRemove = index.versions.length - MAX_VERSIONS_PER_FILE;
|
|
107
|
+
if (toRemove <= 0) return;
|
|
108
|
+
const versionsToDelete = index.versions.splice(0, toRemove);
|
|
109
|
+
for (const version of versionsToDelete) {
|
|
110
|
+
const versionPath = getVersionPath(filePath, version.version);
|
|
111
|
+
await unlinkAsync(versionPath);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
async function listBackedUpFiles() {
|
|
115
|
+
try {
|
|
116
|
+
const backupsDir = getBackupsDir();
|
|
117
|
+
if (!await existsAsync(backupsDir)) {
|
|
118
|
+
return [];
|
|
119
|
+
}
|
|
120
|
+
const entries = await readdirAsync(backupsDir);
|
|
121
|
+
const summaries = [];
|
|
122
|
+
for (const entry of entries) {
|
|
123
|
+
const indexPath = join(backupsDir, entry, INDEX_FILE);
|
|
124
|
+
if (await existsAsync(indexPath)) {
|
|
125
|
+
const index = await readJsonAsync(indexPath);
|
|
126
|
+
if (index && index.versions.length > 0) {
|
|
127
|
+
const sortedVersions = [...index.versions].sort(
|
|
128
|
+
(a, b) => b.timestamp - a.timestamp
|
|
129
|
+
);
|
|
130
|
+
const mostRecent = sortedVersions[0];
|
|
131
|
+
summaries.push({
|
|
132
|
+
filePath: index.filePath,
|
|
133
|
+
pathHash: index.pathHash,
|
|
134
|
+
versionCount: index.versions.length,
|
|
135
|
+
lastBackupAt: mostRecent.timestamp,
|
|
136
|
+
lastOperation: mostRecent.operation
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
summaries.sort((a, b) => b.lastBackupAt - a.lastBackupAt);
|
|
142
|
+
return summaries;
|
|
143
|
+
} catch (error) {
|
|
144
|
+
console.error("[BackupManager] Failed to list backed up files:", error);
|
|
145
|
+
return [];
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
async function getBackupVersions(filePath) {
|
|
149
|
+
try {
|
|
150
|
+
const indexPath = getIndexPath(filePath);
|
|
151
|
+
if (!await existsAsync(indexPath)) {
|
|
152
|
+
return [];
|
|
153
|
+
}
|
|
154
|
+
const index = await readJsonAsync(indexPath);
|
|
155
|
+
if (!index) {
|
|
156
|
+
return [];
|
|
157
|
+
}
|
|
158
|
+
return [...index.versions].sort((a, b) => b.version - a.version);
|
|
159
|
+
} catch (error) {
|
|
160
|
+
console.error(
|
|
161
|
+
`[BackupManager] Failed to get versions for ${filePath}:`,
|
|
162
|
+
error
|
|
163
|
+
);
|
|
164
|
+
return [];
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
async function getBackupContent(filePath, version) {
|
|
168
|
+
try {
|
|
169
|
+
const indexPath = getIndexPath(filePath);
|
|
170
|
+
if (!await existsAsync(indexPath)) {
|
|
171
|
+
return null;
|
|
172
|
+
}
|
|
173
|
+
const index = await readJsonAsync(indexPath);
|
|
174
|
+
if (!index) {
|
|
175
|
+
return null;
|
|
176
|
+
}
|
|
177
|
+
const versionMeta = index.versions.find((v) => v.version === version);
|
|
178
|
+
if (!versionMeta) {
|
|
179
|
+
return null;
|
|
180
|
+
}
|
|
181
|
+
if (versionMeta.operation === "create" && versionMeta.size === 0) {
|
|
182
|
+
return null;
|
|
183
|
+
}
|
|
184
|
+
const versionPath = getVersionPath(filePath, version);
|
|
185
|
+
if (!await existsAsync(versionPath)) {
|
|
186
|
+
return null;
|
|
187
|
+
}
|
|
188
|
+
const content = await fs.readFile(versionPath, {
|
|
189
|
+
encoding: versionMeta.encoding
|
|
190
|
+
});
|
|
191
|
+
return content;
|
|
192
|
+
} catch (error) {
|
|
193
|
+
console.error(
|
|
194
|
+
`[BackupManager] Failed to get backup content for ${filePath} v${version}:`,
|
|
195
|
+
error
|
|
196
|
+
);
|
|
197
|
+
return null;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
async function restoreBackup(filePath, version) {
|
|
201
|
+
try {
|
|
202
|
+
const content = await getBackupContent(filePath, version);
|
|
203
|
+
const indexPath = getIndexPath(filePath);
|
|
204
|
+
const index = await readJsonAsync(indexPath);
|
|
205
|
+
if (!index) {
|
|
206
|
+
console.error(`[BackupManager] No backup index found for ${filePath}`);
|
|
207
|
+
return false;
|
|
208
|
+
}
|
|
209
|
+
const versionMeta = index.versions.find((v) => v.version === version);
|
|
210
|
+
if (!versionMeta) {
|
|
211
|
+
console.error(
|
|
212
|
+
`[BackupManager] Version ${version} not found for ${filePath}`
|
|
213
|
+
);
|
|
214
|
+
return false;
|
|
215
|
+
}
|
|
216
|
+
if (versionMeta.operation === "create" && content === null) {
|
|
217
|
+
await fs.unlink(filePath);
|
|
218
|
+
return true;
|
|
219
|
+
}
|
|
220
|
+
if (content === null) {
|
|
221
|
+
console.error(
|
|
222
|
+
`[BackupManager] No content found for ${filePath} v${version}`
|
|
223
|
+
);
|
|
224
|
+
return false;
|
|
225
|
+
}
|
|
226
|
+
let currentContent = null;
|
|
227
|
+
try {
|
|
228
|
+
currentContent = await fs.readFile(filePath, "utf8");
|
|
229
|
+
} catch {
|
|
230
|
+
}
|
|
231
|
+
if (currentContent !== null) {
|
|
232
|
+
await createBackup(
|
|
233
|
+
filePath,
|
|
234
|
+
currentContent,
|
|
235
|
+
content,
|
|
236
|
+
"update",
|
|
237
|
+
`Restored from version ${version}`
|
|
238
|
+
);
|
|
239
|
+
}
|
|
240
|
+
const parentDir = dirname(filePath);
|
|
241
|
+
await mkdirAsync(parentDir, true);
|
|
242
|
+
await fs.writeFile(filePath, content, {
|
|
243
|
+
encoding: versionMeta.encoding
|
|
244
|
+
});
|
|
245
|
+
return true;
|
|
246
|
+
} catch (error) {
|
|
247
|
+
console.error(
|
|
248
|
+
`[BackupManager] Failed to restore ${filePath} to v${version}:`,
|
|
249
|
+
error
|
|
250
|
+
);
|
|
251
|
+
return false;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
async function getBackupIndex(filePath) {
|
|
255
|
+
const indexPath = getIndexPath(filePath);
|
|
256
|
+
return await readJsonAsync(indexPath);
|
|
257
|
+
}
|
|
258
|
+
async function deleteAllBackups(filePath) {
|
|
259
|
+
try {
|
|
260
|
+
const backupDir = getBackupDir(filePath);
|
|
261
|
+
if (!await existsAsync(backupDir)) {
|
|
262
|
+
return true;
|
|
263
|
+
}
|
|
264
|
+
const entries = await readdirAsync(backupDir);
|
|
265
|
+
for (const entry of entries) {
|
|
266
|
+
await unlinkAsync(join(backupDir, entry));
|
|
267
|
+
}
|
|
268
|
+
await fs.rmdir(backupDir);
|
|
269
|
+
return true;
|
|
270
|
+
} catch (error) {
|
|
271
|
+
console.error(
|
|
272
|
+
`[BackupManager] Failed to delete backups for ${filePath}:`,
|
|
273
|
+
error
|
|
274
|
+
);
|
|
275
|
+
return false;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
async function getBackupStorageUsage() {
|
|
279
|
+
try {
|
|
280
|
+
const backupsDir = getBackupsDir();
|
|
281
|
+
if (!await existsAsync(backupsDir)) {
|
|
282
|
+
return { totalBytes: 0, fileCount: 0, versionCount: 0 };
|
|
283
|
+
}
|
|
284
|
+
let totalBytes = 0;
|
|
285
|
+
let fileCount = 0;
|
|
286
|
+
let versionCount = 0;
|
|
287
|
+
const entries = await readdirAsync(backupsDir);
|
|
288
|
+
for (const entry of entries) {
|
|
289
|
+
const indexPath = join(backupsDir, entry, INDEX_FILE);
|
|
290
|
+
const index = await readJsonAsync(indexPath);
|
|
291
|
+
if (index) {
|
|
292
|
+
fileCount++;
|
|
293
|
+
versionCount += index.versions.length;
|
|
294
|
+
for (const version of index.versions) {
|
|
295
|
+
totalBytes += version.size;
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
return { totalBytes, fileCount, versionCount };
|
|
300
|
+
} catch (error) {
|
|
301
|
+
console.error("[BackupManager] Failed to get storage usage:", error);
|
|
302
|
+
return { totalBytes: 0, fileCount: 0, versionCount: 0 };
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
export {
|
|
306
|
+
BACKUPS_DIR,
|
|
307
|
+
MAX_VERSIONS_PER_FILE,
|
|
308
|
+
createBackup,
|
|
309
|
+
deleteAllBackups,
|
|
310
|
+
detectEncoding,
|
|
311
|
+
detectLineEndings,
|
|
312
|
+
getBackupContent,
|
|
313
|
+
getBackupIndex,
|
|
314
|
+
getBackupStorageUsage,
|
|
315
|
+
getBackupVersions,
|
|
316
|
+
getBackupsDir,
|
|
317
|
+
getPathHash,
|
|
318
|
+
listBackedUpFiles,
|
|
319
|
+
restoreBackup
|
|
320
|
+
};
|
|
321
|
+
//# sourceMappingURL=backupManager.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../../src/core/backupManager.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Backup Manager\n *\n * Provides automatic file backup functionality for Minto.\n * Stores file backups in ~/.minto/backups/ with path-hashed directory names.\n * Keeps up to 10 versions per file using LRU eviction.\n *\n * Features:\n * - Automatic backup creation before file operations\n * - Version history with metadata\n * - Easy restoration to any previous version\n * - Encoding and line ending detection\n * - LRU-based cleanup for old versions\n */\n\nimport { createHash } from 'crypto'\nimport { promises as fs } from 'fs'\nimport { join, dirname } from 'path'\nimport { homedir } from 'os'\nimport { CONFIG_BASE_DIR } from '@constants/product'\nimport {\n existsAsync,\n mkdirAsync,\n readJsonAsync,\n writeJsonAsync,\n readdirAsync,\n unlinkAsync,\n} from '../utils/asyncFile'\n\n// ============================================================================\n// Constants\n// ============================================================================\n\n/** Maximum number of versions to keep per file */\nexport const MAX_VERSIONS_PER_FILE = 10\n\n/**\n * Get the base directory for Minto data files\n * Computed dynamically to support test environment overrides\n */\nfunction getMintoBaseDir(): string {\n return (\n process.env.MINTO_CONFIG_DIR ??\n process.env.CLAUDE_CONFIG_DIR ??\n join(homedir(), CONFIG_BASE_DIR)\n )\n}\n\n/**\n * Get the backups directory path\n * Uses a getter function to support dynamic environment configuration\n */\nexport function getBackupsDir(): string {\n return join(getMintoBaseDir(), 'backups')\n}\n\n/** @deprecated Use getBackupsDir() instead - kept for backward compatibility */\nexport const BACKUPS_DIR = join(getMintoBaseDir(), 'backups')\n\n/** Index file name for each backup directory */\nconst INDEX_FILE = 'index.json'\n\n// ============================================================================\n// Types\n// ============================================================================\n\n/**\n * Operation types for backup metadata\n */\nexport type BackupOperation = 'create' | 'update' | 'delete'\n\n/**\n * Line ending types\n */\nexport type LineEnding = 'LF' | 'CRLF'\n\n/**\n * Metadata for a single backup version\n */\nexport interface BackupMetadata {\n /** Version number (1-based, auto-incremented) */\n version: number\n /** Timestamp when backup was created (Unix ms) */\n timestamp: number\n /** Type of operation that triggered the backup */\n operation: BackupOperation\n /** File encoding */\n encoding: BufferEncoding\n /** Line ending style */\n lineEndings: LineEnding\n /** File size in bytes */\n size: number\n /** Optional description of the change */\n description?: string\n}\n\n/**\n * File backup input for creating a new backup\n */\nexport interface FileBackup {\n /** Absolute path to the file */\n filePath: string\n /** Previous content (null for new files) */\n oldContent: string | null\n /** New content (null for deleted files) */\n newContent: string | null\n /** Backup metadata */\n metadata: BackupMetadata\n}\n\n/**\n * Index structure stored in each backup directory\n */\nexport interface BackupIndex {\n /** Original absolute file path */\n filePath: string\n /** SHA256 hash of the path (first 12 chars) */\n pathHash: string\n /** When this file was first backed up */\n createdAt: number\n /** List of backup versions (sorted by version number) */\n versions: BackupMetadata[]\n}\n\n/**\n * Summary of a backed-up file for listing\n */\nexport interface BackupFileSummary {\n /** Original file path */\n filePath: string\n /** Path hash (directory name) */\n pathHash: string\n /** Number of backup versions */\n versionCount: number\n /** Timestamp of most recent backup */\n lastBackupAt: number\n /** Most recent operation */\n lastOperation: BackupOperation\n}\n\n// ============================================================================\n// Utility Functions\n// ============================================================================\n\n/**\n * Generate a path hash for directory naming\n * Uses first 12 characters of SHA256 hash\n */\nexport function getPathHash(filePath: string): string {\n const hash = createHash('sha256').update(filePath).digest('hex')\n return hash.slice(0, 12)\n}\n\n/**\n * Get the backup directory for a file\n */\nfunction getBackupDir(filePath: string): string {\n const pathHash = getPathHash(filePath)\n return join(getBackupsDir(), pathHash)\n}\n\n/**\n * Get the path to the index file for a file's backups\n */\nfunction getIndexPath(filePath: string): string {\n return join(getBackupDir(filePath), INDEX_FILE)\n}\n\n/**\n * Get the path to a specific backup version file\n */\nfunction getVersionPath(filePath: string, version: number): string {\n return join(getBackupDir(filePath), `v${version}.backup`)\n}\n\n/**\n * Detect line endings in content\n */\nexport function detectLineEndings(content: string): LineEnding {\n // Check for CRLF first (Windows)\n if (content.includes('\\r\\n')) {\n return 'CRLF'\n }\n return 'LF'\n}\n\n/**\n * Detect file encoding from content\n * Currently defaults to utf8, but can be extended for binary detection\n */\nexport function detectEncoding(_content: string): BufferEncoding {\n // For now, we assume utf8 for text files\n // Binary file detection would require buffer analysis\n return 'utf8'\n}\n\n// ============================================================================\n// Core Functions\n// ============================================================================\n\n/**\n * Create a backup before file operations\n *\n * @param filePath - Absolute path to the file being modified\n * @param oldContent - Previous content (null for new files)\n * @param newContent - New content (null for deleted files)\n * @param operation - Type of operation (create, update, delete)\n * @param description - Optional description of the change\n * @returns The created backup metadata, or null if backup failed\n */\nexport async function createBackup(\n filePath: string,\n oldContent: string | null,\n newContent: string | null,\n operation: BackupOperation,\n description?: string,\n): Promise<BackupMetadata | null> {\n try {\n const backupDir = getBackupDir(filePath)\n const indexPath = getIndexPath(filePath)\n\n // Ensure backup directory exists\n await mkdirAsync(backupDir, true)\n\n // Load or create index\n let index: BackupIndex\n if (await existsAsync(indexPath)) {\n const loaded = await readJsonAsync<BackupIndex>(indexPath)\n if (loaded) {\n index = loaded\n } else {\n // Corrupted index, recreate\n index = {\n filePath,\n pathHash: getPathHash(filePath),\n createdAt: Date.now(),\n versions: [],\n }\n }\n } else {\n index = {\n filePath,\n pathHash: getPathHash(filePath),\n createdAt: Date.now(),\n versions: [],\n }\n }\n\n // Determine content to backup (old content for updates/deletes, null for creates)\n const contentToBackup = operation === 'create' ? null : oldContent\n\n // Calculate next version number\n const maxVersion =\n index.versions.length > 0\n ? Math.max(...index.versions.map(v => v.version))\n : 0\n const newVersion = maxVersion + 1\n\n // Create metadata\n const metadata: BackupMetadata = {\n version: newVersion,\n timestamp: Date.now(),\n operation,\n encoding: contentToBackup ? detectEncoding(contentToBackup) : 'utf8',\n lineEndings: contentToBackup ? detectLineEndings(contentToBackup) : 'LF',\n size: contentToBackup ? Buffer.byteLength(contentToBackup, 'utf8') : 0,\n description,\n }\n\n // Write backup content if there is content to backup\n if (contentToBackup !== null) {\n const versionPath = getVersionPath(filePath, newVersion)\n await fs.writeFile(versionPath, contentToBackup, {\n encoding: metadata.encoding,\n })\n }\n\n // Add to versions\n index.versions.push(metadata)\n\n // Cleanup old versions if exceeding limit\n if (index.versions.length > MAX_VERSIONS_PER_FILE) {\n await cleanupOldVersions(filePath, index)\n }\n\n // Save updated index\n await writeJsonAsync(indexPath, index)\n\n return metadata\n } catch (error) {\n // Log error but don't throw - backup failure shouldn't block operations\n console.error(\n `[BackupManager] Failed to create backup for ${filePath}:`,\n error,\n )\n return null\n }\n}\n\n/**\n * Clean up old versions when exceeding MAX_VERSIONS_PER_FILE\n * Uses LRU strategy - removes oldest versions first\n */\nasync function cleanupOldVersions(\n filePath: string,\n index: BackupIndex,\n): Promise<void> {\n // Sort by version number (oldest first)\n index.versions.sort((a, b) => a.version - b.version)\n\n // Calculate how many to remove\n const toRemove = index.versions.length - MAX_VERSIONS_PER_FILE\n\n if (toRemove <= 0) return\n\n // Remove oldest versions\n const versionsToDelete = index.versions.splice(0, toRemove)\n\n // Delete the actual backup files\n for (const version of versionsToDelete) {\n const versionPath = getVersionPath(filePath, version.version)\n await unlinkAsync(versionPath)\n }\n}\n\n/**\n * List all files that have backups, sorted by most recent backup\n *\n * @returns Array of backup file summaries, sorted by most recent first\n */\nexport async function listBackedUpFiles(): Promise<BackupFileSummary[]> {\n try {\n const backupsDir = getBackupsDir()\n\n // Check if backups directory exists\n if (!(await existsAsync(backupsDir))) {\n return []\n }\n\n // Read all subdirectories in backups\n const entries = await readdirAsync(backupsDir)\n const summaries: BackupFileSummary[] = []\n\n for (const entry of entries) {\n const indexPath = join(backupsDir, entry, INDEX_FILE)\n\n if (await existsAsync(indexPath)) {\n const index = await readJsonAsync<BackupIndex>(indexPath)\n if (index && index.versions.length > 0) {\n // Get the most recent version\n const sortedVersions = [...index.versions].sort(\n (a, b) => b.timestamp - a.timestamp,\n )\n const mostRecent = sortedVersions[0]\n\n summaries.push({\n filePath: index.filePath,\n pathHash: index.pathHash,\n versionCount: index.versions.length,\n lastBackupAt: mostRecent.timestamp,\n lastOperation: mostRecent.operation,\n })\n }\n }\n }\n\n // Sort by most recent backup first\n summaries.sort((a, b) => b.lastBackupAt - a.lastBackupAt)\n\n return summaries\n } catch (error) {\n console.error('[BackupManager] Failed to list backed up files:', error)\n return []\n }\n}\n\n/**\n * Get all backup versions for a specific file\n *\n * @param filePath - Absolute path to the file\n * @returns Array of backup metadata, sorted by version (newest first)\n */\nexport async function getBackupVersions(\n filePath: string,\n): Promise<BackupMetadata[]> {\n try {\n const indexPath = getIndexPath(filePath)\n\n if (!(await existsAsync(indexPath))) {\n return []\n }\n\n const index = await readJsonAsync<BackupIndex>(indexPath)\n if (!index) {\n return []\n }\n\n // Return sorted by version (newest first)\n return [...index.versions].sort((a, b) => b.version - a.version)\n } catch (error) {\n console.error(\n `[BackupManager] Failed to get versions for ${filePath}:`,\n error,\n )\n return []\n }\n}\n\n/**\n * Get the content of a specific backup version\n *\n * @param filePath - Absolute path to the file\n * @param version - Version number to retrieve\n * @returns The backup content, or null if not found\n */\nexport async function getBackupContent(\n filePath: string,\n version: number,\n): Promise<string | null> {\n try {\n const indexPath = getIndexPath(filePath)\n\n // Check if index exists\n if (!(await existsAsync(indexPath))) {\n return null\n }\n\n // Load index to verify version exists\n const index = await readJsonAsync<BackupIndex>(indexPath)\n if (!index) {\n return null\n }\n\n // Find the version metadata\n const versionMeta = index.versions.find(v => v.version === version)\n if (!versionMeta) {\n return null\n }\n\n // For 'create' operations, there's no previous content\n if (versionMeta.operation === 'create' && versionMeta.size === 0) {\n return null\n }\n\n // Read the backup file\n const versionPath = getVersionPath(filePath, version)\n if (!(await existsAsync(versionPath))) {\n return null\n }\n\n const content = await fs.readFile(versionPath, {\n encoding: versionMeta.encoding,\n })\n\n return content\n } catch (error) {\n console.error(\n `[BackupManager] Failed to get backup content for ${filePath} v${version}:`,\n error,\n )\n return null\n }\n}\n\n/**\n * Restore a file to a specific backup version\n *\n * @param filePath - Absolute path to the file\n * @param version - Version number to restore to\n * @returns True if restoration was successful, false otherwise\n */\nexport async function restoreBackup(\n filePath: string,\n version: number,\n): Promise<boolean> {\n try {\n // Get the backup content\n const content = await getBackupContent(filePath, version)\n\n // Handle the case where content is null (could be a 'create' backup with no previous content)\n const indexPath = getIndexPath(filePath)\n const index = await readJsonAsync<BackupIndex>(indexPath)\n if (!index) {\n console.error(`[BackupManager] No backup index found for ${filePath}`)\n return false\n }\n\n const versionMeta = index.versions.find(v => v.version === version)\n if (!versionMeta) {\n console.error(\n `[BackupManager] Version ${version} not found for ${filePath}`,\n )\n return false\n }\n\n // If this was a 'create' operation, restoring means deleting the file\n if (versionMeta.operation === 'create' && content === null) {\n // The file didn't exist before creation, so \"restoring\" means deleting it\n await fs.unlink(filePath)\n return true\n }\n\n // For other cases, we need content\n if (content === null) {\n console.error(\n `[BackupManager] No content found for ${filePath} v${version}`,\n )\n return false\n }\n\n // Create a backup of current state before restoring\n let currentContent: string | null = null\n try {\n currentContent = await fs.readFile(filePath, 'utf8')\n } catch {\n // File might not exist, that's okay\n }\n\n if (currentContent !== null) {\n await createBackup(\n filePath,\n currentContent,\n content,\n 'update',\n `Restored from version ${version}`,\n )\n }\n\n // Ensure parent directory exists\n const parentDir = dirname(filePath)\n await mkdirAsync(parentDir, true)\n\n // Write the restored content\n await fs.writeFile(filePath, content, {\n encoding: versionMeta.encoding,\n })\n\n return true\n } catch (error) {\n console.error(\n `[BackupManager] Failed to restore ${filePath} to v${version}:`,\n error,\n )\n return false\n }\n}\n\n/**\n * Get the backup index for a file\n * Useful for debugging and advanced operations\n */\nexport async function getBackupIndex(\n filePath: string,\n): Promise<BackupIndex | null> {\n const indexPath = getIndexPath(filePath)\n return await readJsonAsync<BackupIndex>(indexPath)\n}\n\n/**\n * Delete all backups for a specific file\n * Use with caution - this is permanent\n */\nexport async function deleteAllBackups(filePath: string): Promise<boolean> {\n try {\n const backupDir = getBackupDir(filePath)\n\n if (!(await existsAsync(backupDir))) {\n return true // Nothing to delete\n }\n\n // Delete all files in the backup directory\n const entries = await readdirAsync(backupDir)\n for (const entry of entries) {\n await unlinkAsync(join(backupDir, entry))\n }\n\n // Remove the directory itself\n await fs.rmdir(backupDir)\n\n return true\n } catch (error) {\n console.error(\n `[BackupManager] Failed to delete backups for ${filePath}:`,\n error,\n )\n return false\n }\n}\n\n/**\n * Get total backup storage usage in bytes\n */\nexport async function getBackupStorageUsage(): Promise<{\n totalBytes: number\n fileCount: number\n versionCount: number\n}> {\n try {\n const backupsDir = getBackupsDir()\n\n if (!(await existsAsync(backupsDir))) {\n return { totalBytes: 0, fileCount: 0, versionCount: 0 }\n }\n\n let totalBytes = 0\n let fileCount = 0\n let versionCount = 0\n\n const entries = await readdirAsync(backupsDir)\n\n for (const entry of entries) {\n const indexPath = join(backupsDir, entry, INDEX_FILE)\n const index = await readJsonAsync<BackupIndex>(indexPath)\n\n if (index) {\n fileCount++\n versionCount += index.versions.length\n\n // Sum up the sizes from metadata\n for (const version of index.versions) {\n totalBytes += version.size\n }\n }\n }\n\n return { totalBytes, fileCount, versionCount }\n } catch (error) {\n console.error('[BackupManager] Failed to get storage usage:', error)\n return { totalBytes: 0, fileCount: 0, versionCount: 0 }\n }\n}\n"],
|
|
5
|
+
"mappings": "AAeA,SAAS,kBAAkB;AAC3B,SAAS,YAAY,UAAU;AAC/B,SAAS,MAAM,eAAe;AAC9B,SAAS,eAAe;AACxB,SAAS,uBAAuB;AAChC;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAOA,MAAM,wBAAwB;AAMrC,SAAS,kBAA0B;AACjC,SACE,QAAQ,IAAI,oBACZ,QAAQ,IAAI,qBACZ,KAAK,QAAQ,GAAG,eAAe;AAEnC;AAMO,SAAS,gBAAwB;AACtC,SAAO,KAAK,gBAAgB,GAAG,SAAS;AAC1C;AAGO,MAAM,cAAc,KAAK,gBAAgB,GAAG,SAAS;AAG5D,MAAM,aAAa;AAwFZ,SAAS,YAAY,UAA0B;AACpD,QAAM,OAAO,WAAW,QAAQ,EAAE,OAAO,QAAQ,EAAE,OAAO,KAAK;AAC/D,SAAO,KAAK,MAAM,GAAG,EAAE;AACzB;AAKA,SAAS,aAAa,UAA0B;AAC9C,QAAM,WAAW,YAAY,QAAQ;AACrC,SAAO,KAAK,cAAc,GAAG,QAAQ;AACvC;AAKA,SAAS,aAAa,UAA0B;AAC9C,SAAO,KAAK,aAAa,QAAQ,GAAG,UAAU;AAChD;AAKA,SAAS,eAAe,UAAkB,SAAyB;AACjE,SAAO,KAAK,aAAa,QAAQ,GAAG,IAAI,OAAO,SAAS;AAC1D;AAKO,SAAS,kBAAkB,SAA6B;AAE7D,MAAI,QAAQ,SAAS,MAAM,GAAG;AAC5B,WAAO;AAAA,EACT;AACA,SAAO;AACT;AAMO,SAAS,eAAe,UAAkC;AAG/D,SAAO;AACT;AAgBA,eAAsB,aACpB,UACA,YACA,YACA,WACA,aACgC;AAChC,MAAI;AACF,UAAM,YAAY,aAAa,QAAQ;AACvC,UAAM,YAAY,aAAa,QAAQ;AAGvC,UAAM,WAAW,WAAW,IAAI;AAGhC,QAAI;AACJ,QAAI,MAAM,YAAY,SAAS,GAAG;AAChC,YAAM,SAAS,MAAM,cAA2B,SAAS;AACzD,UAAI,QAAQ;AACV,gBAAQ;AAAA,MACV,OAAO;AAEL,gBAAQ;AAAA,UACN;AAAA,UACA,UAAU,YAAY,QAAQ;AAAA,UAC9B,WAAW,KAAK,IAAI;AAAA,UACpB,UAAU,CAAC;AAAA,QACb;AAAA,MACF;AAAA,IACF,OAAO;AACL,cAAQ;AAAA,QACN;AAAA,QACA,UAAU,YAAY,QAAQ;AAAA,QAC9B,WAAW,KAAK,IAAI;AAAA,QACpB,UAAU,CAAC;AAAA,MACb;AAAA,IACF;AAGA,UAAM,kBAAkB,cAAc,WAAW,OAAO;AAGxD,UAAM,aACJ,MAAM,SAAS,SAAS,IACpB,KAAK,IAAI,GAAG,MAAM,SAAS,IAAI,OAAK,EAAE,OAAO,CAAC,IAC9C;AACN,UAAM,aAAa,aAAa;AAGhC,UAAM,WAA2B;AAAA,MAC/B,SAAS;AAAA,MACT,WAAW,KAAK,IAAI;AAAA,MACpB;AAAA,MACA,UAAU,kBAAkB,eAAe,eAAe,IAAI;AAAA,MAC9D,aAAa,kBAAkB,kBAAkB,eAAe,IAAI;AAAA,MACpE,MAAM,kBAAkB,OAAO,WAAW,iBAAiB,MAAM,IAAI;AAAA,MACrE;AAAA,IACF;AAGA,QAAI,oBAAoB,MAAM;AAC5B,YAAM,cAAc,eAAe,UAAU,UAAU;AACvD,YAAM,GAAG,UAAU,aAAa,iBAAiB;AAAA,QAC/C,UAAU,SAAS;AAAA,MACrB,CAAC;AAAA,IACH;AAGA,UAAM,SAAS,KAAK,QAAQ;AAG5B,QAAI,MAAM,SAAS,SAAS,uBAAuB;AACjD,YAAM,mBAAmB,UAAU,KAAK;AAAA,IAC1C;AAGA,UAAM,eAAe,WAAW,KAAK;AAErC,WAAO;AAAA,EACT,SAAS,OAAO;AAEd,YAAQ;AAAA,MACN,+CAA+C,QAAQ;AAAA,MACvD;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AAMA,eAAe,mBACb,UACA,OACe;AAEf,QAAM,SAAS,KAAK,CAAC,GAAG,MAAM,EAAE,UAAU,EAAE,OAAO;AAGnD,QAAM,WAAW,MAAM,SAAS,SAAS;AAEzC,MAAI,YAAY,EAAG;AAGnB,QAAM,mBAAmB,MAAM,SAAS,OAAO,GAAG,QAAQ;AAG1D,aAAW,WAAW,kBAAkB;AACtC,UAAM,cAAc,eAAe,UAAU,QAAQ,OAAO;AAC5D,UAAM,YAAY,WAAW;AAAA,EAC/B;AACF;AAOA,eAAsB,oBAAkD;AACtE,MAAI;AACF,UAAM,aAAa,cAAc;AAGjC,QAAI,CAAE,MAAM,YAAY,UAAU,GAAI;AACpC,aAAO,CAAC;AAAA,IACV;AAGA,UAAM,UAAU,MAAM,aAAa,UAAU;AAC7C,UAAM,YAAiC,CAAC;AAExC,eAAW,SAAS,SAAS;AAC3B,YAAM,YAAY,KAAK,YAAY,OAAO,UAAU;AAEpD,UAAI,MAAM,YAAY,SAAS,GAAG;AAChC,cAAM,QAAQ,MAAM,cAA2B,SAAS;AACxD,YAAI,SAAS,MAAM,SAAS,SAAS,GAAG;AAEtC,gBAAM,iBAAiB,CAAC,GAAG,MAAM,QAAQ,EAAE;AAAA,YACzC,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE;AAAA,UAC5B;AACA,gBAAM,aAAa,eAAe,CAAC;AAEnC,oBAAU,KAAK;AAAA,YACb,UAAU,MAAM;AAAA,YAChB,UAAU,MAAM;AAAA,YAChB,cAAc,MAAM,SAAS;AAAA,YAC7B,cAAc,WAAW;AAAA,YACzB,eAAe,WAAW;AAAA,UAC5B,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAGA,cAAU,KAAK,CAAC,GAAG,MAAM,EAAE,eAAe,EAAE,YAAY;AAExD,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,mDAAmD,KAAK;AACtE,WAAO,CAAC;AAAA,EACV;AACF;AAQA,eAAsB,kBACpB,UAC2B;AAC3B,MAAI;AACF,UAAM,YAAY,aAAa,QAAQ;AAEvC,QAAI,CAAE,MAAM,YAAY,SAAS,GAAI;AACnC,aAAO,CAAC;AAAA,IACV;AAEA,UAAM,QAAQ,MAAM,cAA2B,SAAS;AACxD,QAAI,CAAC,OAAO;AACV,aAAO,CAAC;AAAA,IACV;AAGA,WAAO,CAAC,GAAG,MAAM,QAAQ,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,UAAU,EAAE,OAAO;AAAA,EACjE,SAAS,OAAO;AACd,YAAQ;AAAA,MACN,8CAA8C,QAAQ;AAAA,MACtD;AAAA,IACF;AACA,WAAO,CAAC;AAAA,EACV;AACF;AASA,eAAsB,iBACpB,UACA,SACwB;AACxB,MAAI;AACF,UAAM,YAAY,aAAa,QAAQ;AAGvC,QAAI,CAAE,MAAM,YAAY,SAAS,GAAI;AACnC,aAAO;AAAA,IACT;AAGA,UAAM,QAAQ,MAAM,cAA2B,SAAS;AACxD,QAAI,CAAC,OAAO;AACV,aAAO;AAAA,IACT;AAGA,UAAM,cAAc,MAAM,SAAS,KAAK,OAAK,EAAE,YAAY,OAAO;AAClE,QAAI,CAAC,aAAa;AAChB,aAAO;AAAA,IACT;AAGA,QAAI,YAAY,cAAc,YAAY,YAAY,SAAS,GAAG;AAChE,aAAO;AAAA,IACT;AAGA,UAAM,cAAc,eAAe,UAAU,OAAO;AACpD,QAAI,CAAE,MAAM,YAAY,WAAW,GAAI;AACrC,aAAO;AAAA,IACT;AAEA,UAAM,UAAU,MAAM,GAAG,SAAS,aAAa;AAAA,MAC7C,UAAU,YAAY;AAAA,IACxB,CAAC;AAED,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ;AAAA,MACN,oDAAoD,QAAQ,KAAK,OAAO;AAAA,MACxE;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AASA,eAAsB,cACpB,UACA,SACkB;AAClB,MAAI;AAEF,UAAM,UAAU,MAAM,iBAAiB,UAAU,OAAO;AAGxD,UAAM,YAAY,aAAa,QAAQ;AACvC,UAAM,QAAQ,MAAM,cAA2B,SAAS;AACxD,QAAI,CAAC,OAAO;AACV,cAAQ,MAAM,6CAA6C,QAAQ,EAAE;AACrE,aAAO;AAAA,IACT;AAEA,UAAM,cAAc,MAAM,SAAS,KAAK,OAAK,EAAE,YAAY,OAAO;AAClE,QAAI,CAAC,aAAa;AAChB,cAAQ;AAAA,QACN,2BAA2B,OAAO,kBAAkB,QAAQ;AAAA,MAC9D;AACA,aAAO;AAAA,IACT;AAGA,QAAI,YAAY,cAAc,YAAY,YAAY,MAAM;AAE1D,YAAM,GAAG,OAAO,QAAQ;AACxB,aAAO;AAAA,IACT;AAGA,QAAI,YAAY,MAAM;AACpB,cAAQ;AAAA,QACN,wCAAwC,QAAQ,KAAK,OAAO;AAAA,MAC9D;AACA,aAAO;AAAA,IACT;AAGA,QAAI,iBAAgC;AACpC,QAAI;AACF,uBAAiB,MAAM,GAAG,SAAS,UAAU,MAAM;AAAA,IACrD,QAAQ;AAAA,IAER;AAEA,QAAI,mBAAmB,MAAM;AAC3B,YAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,yBAAyB,OAAO;AAAA,MAClC;AAAA,IACF;AAGA,UAAM,YAAY,QAAQ,QAAQ;AAClC,UAAM,WAAW,WAAW,IAAI;AAGhC,UAAM,GAAG,UAAU,UAAU,SAAS;AAAA,MACpC,UAAU,YAAY;AAAA,IACxB,CAAC;AAED,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ;AAAA,MACN,qCAAqC,QAAQ,QAAQ,OAAO;AAAA,MAC5D;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AAMA,eAAsB,eACpB,UAC6B;AAC7B,QAAM,YAAY,aAAa,QAAQ;AACvC,SAAO,MAAM,cAA2B,SAAS;AACnD;AAMA,eAAsB,iBAAiB,UAAoC;AACzE,MAAI;AACF,UAAM,YAAY,aAAa,QAAQ;AAEvC,QAAI,CAAE,MAAM,YAAY,SAAS,GAAI;AACnC,aAAO;AAAA,IACT;AAGA,UAAM,UAAU,MAAM,aAAa,SAAS;AAC5C,eAAW,SAAS,SAAS;AAC3B,YAAM,YAAY,KAAK,WAAW,KAAK,CAAC;AAAA,IAC1C;AAGA,UAAM,GAAG,MAAM,SAAS;AAExB,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ;AAAA,MACN,gDAAgD,QAAQ;AAAA,MACxD;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;AAKA,eAAsB,wBAInB;AACD,MAAI;AACF,UAAM,aAAa,cAAc;AAEjC,QAAI,CAAE,MAAM,YAAY,UAAU,GAAI;AACpC,aAAO,EAAE,YAAY,GAAG,WAAW,GAAG,cAAc,EAAE;AAAA,IACxD;AAEA,QAAI,aAAa;AACjB,QAAI,YAAY;AAChB,QAAI,eAAe;AAEnB,UAAM,UAAU,MAAM,aAAa,UAAU;AAE7C,eAAW,SAAS,SAAS;AAC3B,YAAM,YAAY,KAAK,YAAY,OAAO,UAAU;AACpD,YAAM,QAAQ,MAAM,cAA2B,SAAS;AAExD,UAAI,OAAO;AACT;AACA,wBAAgB,MAAM,SAAS;AAG/B,mBAAW,WAAW,MAAM,UAAU;AACpC,wBAAc,QAAQ;AAAA,QACxB;AAAA,MACF;AAAA,IACF;AAEA,WAAO,EAAE,YAAY,WAAW,aAAa;AAAA,EAC/C,SAAS,OAAO;AACd,YAAQ,MAAM,gDAAgD,KAAK;AACnE,WAAO,EAAE,YAAY,GAAG,WAAW,GAAG,cAAc,EAAE;AAAA,EACxD;AACF;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
package/dist/core/costTracker.js
CHANGED
|
@@ -7,14 +7,11 @@ import {
|
|
|
7
7
|
saveCurrentProjectConfig
|
|
8
8
|
} from "../utils/config.js";
|
|
9
9
|
import { SESSION_ID } from "../utils/log.js";
|
|
10
|
+
import { tokenStatsManager } from "./tokenStatsManager.js";
|
|
10
11
|
const STATE = {
|
|
11
12
|
totalCost: 0,
|
|
12
13
|
totalAPIDuration: 0,
|
|
13
14
|
startTime: Date.now(),
|
|
14
|
-
inputTokens: 0,
|
|
15
|
-
outputTokens: 0,
|
|
16
|
-
cacheCreationTokens: 0,
|
|
17
|
-
cacheReadTokens: 0,
|
|
18
15
|
requestCount: 0
|
|
19
16
|
};
|
|
20
17
|
function addToTotalCost(cost, duration) {
|
|
@@ -22,11 +19,7 @@ function addToTotalCost(cost, duration) {
|
|
|
22
19
|
STATE.totalAPIDuration += duration;
|
|
23
20
|
STATE.requestCount += 1;
|
|
24
21
|
}
|
|
25
|
-
function addTokenUsage(
|
|
26
|
-
STATE.inputTokens += inputTokens;
|
|
27
|
-
STATE.outputTokens += outputTokens;
|
|
28
|
-
if (cacheCreationTokens) STATE.cacheCreationTokens += cacheCreationTokens;
|
|
29
|
-
if (cacheReadTokens) STATE.cacheReadTokens += cacheReadTokens;
|
|
22
|
+
function addTokenUsage(_inputTokens, _outputTokens, _cacheCreationTokens, _cacheReadTokens) {
|
|
30
23
|
}
|
|
31
24
|
function getTotalCost() {
|
|
32
25
|
return STATE.totalCost;
|
|
@@ -38,12 +31,13 @@ function getTotalAPIDuration() {
|
|
|
38
31
|
return STATE.totalAPIDuration;
|
|
39
32
|
}
|
|
40
33
|
function getTokenCounts() {
|
|
34
|
+
const globalStats = tokenStatsManager.getGlobalStats();
|
|
41
35
|
return {
|
|
42
|
-
input:
|
|
43
|
-
output:
|
|
44
|
-
cacheCreation:
|
|
45
|
-
cacheRead:
|
|
46
|
-
total:
|
|
36
|
+
input: globalStats.totalInputTokens,
|
|
37
|
+
output: globalStats.totalOutputTokens,
|
|
38
|
+
cacheCreation: globalStats.totalCacheCreationTokens,
|
|
39
|
+
cacheRead: globalStats.totalCacheReadTokens,
|
|
40
|
+
total: globalStats.grandTotalTokens
|
|
47
41
|
};
|
|
48
42
|
}
|
|
49
43
|
function getRequestCount() {
|
|
@@ -115,11 +109,8 @@ function resetStateForTests() {
|
|
|
115
109
|
STATE.startTime = Date.now();
|
|
116
110
|
STATE.totalCost = 0;
|
|
117
111
|
STATE.totalAPIDuration = 0;
|
|
118
|
-
STATE.inputTokens = 0;
|
|
119
|
-
STATE.outputTokens = 0;
|
|
120
|
-
STATE.cacheCreationTokens = 0;
|
|
121
|
-
STATE.cacheReadTokens = 0;
|
|
122
112
|
STATE.requestCount = 0;
|
|
113
|
+
tokenStatsManager.reset();
|
|
123
114
|
}
|
|
124
115
|
export {
|
|
125
116
|
addToTotalCost,
|