omni-context-cli 0.0.74 → 0.0.77

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/README.md +68 -112
  2. package/README.zh-CN.md +68 -112
  3. package/dist/agents/explore.md +26 -19
  4. package/dist/agents/glance.md +7 -22
  5. package/dist/agents/{slice.md → search.md} +4 -4
  6. package/dist/assets/cone-bw-white.svg +24 -7
  7. package/dist/assets/cone-bw.svg +24 -7
  8. package/dist/assets/cone-mac@256.png +0 -0
  9. package/dist/assets/cone-mac@512.png +0 -0
  10. package/dist/assets/cone.svg +24 -2
  11. package/dist/assets/cone@128.png +0 -0
  12. package/dist/assets/cone@16.png +0 -0
  13. package/dist/assets/cone@24.png +0 -0
  14. package/dist/assets/cone@256.png +0 -0
  15. package/dist/assets/cone@32.png +0 -0
  16. package/dist/assets/cone@512.png +0 -0
  17. package/dist/assets/cone@64.png +0 -0
  18. package/dist/assets/icon.ico +0 -0
  19. package/dist/assets/omnicontext-cli-logo-white.svg +24 -7
  20. package/dist/assets/omnicontext-cli-logo.svg +24 -7
  21. package/dist/cli.js +7 -7
  22. package/dist/clients/extension.vsix +0 -0
  23. package/dist/clients/web/assets/{_baseUniq-CH4b0N5_.js → _baseUniq-BbAI5D6T.js} +1 -1
  24. package/dist/clients/web/assets/{arc-BQVP7-Bp.js → arc-C01cHi_J.js} +1 -1
  25. package/dist/clients/web/assets/{architectureDiagram-VXUJARFQ-CZtE2OyB.js → architectureDiagram-VXUJARFQ-C4SW9geU.js} +1 -1
  26. package/dist/clients/web/assets/{blockDiagram-VD42YOAC-CtSz7Vx1.js → blockDiagram-VD42YOAC-BJBjUKKm.js} +1 -1
  27. package/dist/clients/web/assets/{c4Diagram-YG6GDRKO-CeUU0lHA.js → c4Diagram-YG6GDRKO-DRMS2exe.js} +1 -1
  28. package/dist/clients/web/assets/channel-BFxt5Aug.js +1 -0
  29. package/dist/clients/web/assets/{chunk-4BX2VUAB-Df-kQvTf.js → chunk-4BX2VUAB-DdM_Xgvz.js} +1 -1
  30. package/dist/clients/web/assets/{chunk-55IACEB6-DR-uhhX0.js → chunk-55IACEB6-sCGtW4jw.js} +1 -1
  31. package/dist/clients/web/assets/{chunk-B4BG7PRW-DOrSo5dD.js → chunk-B4BG7PRW-CrTfpFlk.js} +1 -1
  32. package/dist/clients/web/assets/{chunk-DI55MBZ5-BbUWe3KA.js → chunk-DI55MBZ5-BJxcEBin.js} +1 -1
  33. package/dist/clients/web/assets/{chunk-FMBD7UC4-BnLGK1jf.js → chunk-FMBD7UC4-ZCzWQMnr.js} +1 -1
  34. package/dist/clients/web/assets/{chunk-QN33PNHL-VCJVvyMp.js → chunk-QN33PNHL-7lsjOUTT.js} +1 -1
  35. package/dist/clients/web/assets/{chunk-QZHKN3VN-CPfXJo_T.js → chunk-QZHKN3VN-DM6taDix.js} +1 -1
  36. package/dist/clients/web/assets/{chunk-TZMSLE5B-BkHNVsyH.js → chunk-TZMSLE5B-Dc2DN3ii.js} +1 -1
  37. package/dist/clients/web/assets/classDiagram-2ON5EDUG-CMfvUA3j.js +1 -0
  38. package/dist/clients/web/assets/classDiagram-v2-WZHVMYZB-CMfvUA3j.js +1 -0
  39. package/dist/clients/web/assets/clone-BWI7Baa7.js +1 -0
  40. package/dist/clients/web/assets/{cose-bilkent-S5V4N54A-C9Wxnne5.js → cose-bilkent-S5V4N54A-r8T3f5yK.js} +1 -1
  41. package/dist/clients/web/assets/{dagre-6UL2VRFP-B6-ozXSh.js → dagre-6UL2VRFP-UnxnSeQH.js} +1 -1
  42. package/dist/clients/web/assets/{diagram-PSM6KHXK-D2TK1Mkk.js → diagram-PSM6KHXK-D2ENZ6qS.js} +1 -1
  43. package/dist/clients/web/assets/{diagram-QEK2KX5R-C5OGEwzB.js → diagram-QEK2KX5R-Eye3JgZm.js} +1 -1
  44. package/dist/clients/web/assets/{diagram-S2PKOQOG-CjdFw3eQ.js → diagram-S2PKOQOG-Blf5P9RJ.js} +1 -1
  45. package/dist/clients/web/assets/{erDiagram-Q2GNP2WA-hQIEDub-.js → erDiagram-Q2GNP2WA-wyIeLoVZ.js} +1 -1
  46. package/dist/clients/web/assets/{flowDiagram-NV44I4VS-WDuMquGw.js → flowDiagram-NV44I4VS-D99ef0t0.js} +1 -1
  47. package/dist/clients/web/assets/{ganttDiagram-JELNMOA3-BT_k8mao.js → ganttDiagram-JELNMOA3-CvsjeBuE.js} +1 -1
  48. package/dist/clients/web/assets/{gitGraphDiagram-NY62KEGX-Dq5JUR6E.js → gitGraphDiagram-NY62KEGX-B2ylogok.js} +1 -1
  49. package/dist/clients/web/assets/{graph-EhiCIhKK.js → graph-BEwCLq4h.js} +1 -1
  50. package/dist/clients/web/assets/index-30onxp6I.js +2 -0
  51. package/dist/clients/web/assets/index-pGSHhaP9.css +1 -0
  52. package/dist/clients/web/assets/{infoDiagram-WHAUD3N6-BrkLiOdx.js → infoDiagram-WHAUD3N6-C9HLlPj3.js} +1 -1
  53. package/dist/clients/web/assets/{journeyDiagram-XKPGCS4Q-2iAEjN27.js → journeyDiagram-XKPGCS4Q-BGMoCoGu.js} +1 -1
  54. package/dist/clients/web/assets/{kanban-definition-3W4ZIXB7-C222cIK8.js → kanban-definition-3W4ZIXB7-Z1B1b4dY.js} +1 -1
  55. package/dist/clients/web/assets/{layout-DDV6LJLJ.js → layout-KBr90mM8.js} +1 -1
  56. package/dist/clients/web/assets/{linear-Cj0itOnY.js → linear-VOTetRqU.js} +1 -1
  57. package/dist/clients/web/assets/{min-DB8NGWI2.js → min-BcFr1Xgw.js} +1 -1
  58. package/dist/clients/web/assets/{mindmap-definition-VGOIOE7T-CCAhA60J.js → mindmap-definition-VGOIOE7T-Bf9fJD2v.js} +1 -1
  59. package/dist/clients/web/assets/{pieDiagram-ADFJNKIX-D-XXo3zP.js → pieDiagram-ADFJNKIX-DITjn_1i.js} +1 -1
  60. package/dist/clients/web/assets/{quadrantDiagram-AYHSOK5B-5IBnb1WQ.js → quadrantDiagram-AYHSOK5B-CG8qEQsc.js} +1 -1
  61. package/dist/clients/web/assets/{requirementDiagram-UZGBJVZJ-4woYoyhj.js → requirementDiagram-UZGBJVZJ-_fiifEP6.js} +1 -1
  62. package/dist/clients/web/assets/{sankeyDiagram-TZEHDZUN-CQITNuFm.js → sankeyDiagram-TZEHDZUN-dGOtogtu.js} +1 -1
  63. package/dist/clients/web/assets/{sequenceDiagram-WL72ISMW-BIUgU_on.js → sequenceDiagram-WL72ISMW-BPWg-aZ3.js} +1 -1
  64. package/dist/clients/web/assets/{stateDiagram-FKZM4ZOC-C3Wi-EZk.js → stateDiagram-FKZM4ZOC-XlU9RkKz.js} +1 -1
  65. package/dist/clients/web/assets/stateDiagram-v2-4FDKWEC3-Rsaa7fF9.js +1 -0
  66. package/dist/clients/web/assets/{timeline-definition-IT6M3QCI-CwRxrax-.js → timeline-definition-IT6M3QCI-CQGxtqhi.js} +1 -1
  67. package/dist/clients/web/assets/{treemap-KMMF4GRG-i_IDwp2d.js → treemap-KMMF4GRG-Do7kW41s.js} +1 -1
  68. package/dist/clients/web/assets/{xychartDiagram-PRI3JC2R-BIS0b9I9.js → xychartDiagram-PRI3JC2R-C_QymW0P.js} +1 -1
  69. package/dist/clients/web/favicon.png +0 -0
  70. package/dist/clients/web/index.html +2 -2
  71. package/dist/skills/simplify/SKILL.md +53 -0
  72. package/dist/workflows/general.md +19 -0
  73. package/dist/workflows/programming.md +33 -0
  74. package/package.json +2 -3
  75. package/scripts/bump-version.mjs +0 -3
  76. package/scripts/clean.mjs +17 -0
  77. package/scripts/release-build.mjs +8 -129
  78. package/dist/agents/pluck.md +0 -49
  79. package/dist/agents/quest.md +0 -47
  80. package/dist/agents/ripple.md +0 -58
  81. package/dist/agents/sculpt.md +0 -63
  82. package/dist/agents/spark.md +0 -53
  83. package/dist/agents/sweep.md +0 -44
  84. package/dist/agents/weave.md +0 -54
  85. package/dist/clients/web/assets/channel-D-ssYovj.js +0 -1
  86. package/dist/clients/web/assets/classDiagram-2ON5EDUG-W6DMTXK_.js +0 -1
  87. package/dist/clients/web/assets/classDiagram-v2-WZHVMYZB-W6DMTXK_.js +0 -1
  88. package/dist/clients/web/assets/clone-Dn7sFHUy.js +0 -1
  89. package/dist/clients/web/assets/index-B5OdXvjH.js +0 -27
  90. package/dist/clients/web/assets/index-BP79FsyI.css +0 -1
  91. package/dist/clients/web/assets/stateDiagram-v2-4FDKWEC3-BudH3yEK.js +0 -1
  92. package/dist/slash/ping.md +0 -4
package/README.md CHANGED
@@ -1,153 +1,109 @@
1
1
  # OmniContext CLI
2
2
 
3
- **Precision context. Minimal cost.**
3
+ A zero-telemetry coding assistant that runs in your terminal and extends into VS Code, Office, the browser, and mobile. Most AI coding tools bolt a chat interface onto an LLM and call it a day. OmniContext CLI takes a different approach: it treats the context window as a scarce resource and engineers every layer of the system to use it efficiently. Lean system prompts, agent sub-delegation, automatic context editing, and native prompt caching work together so your tokens go toward solving the problem, not repeating boilerplate. Each LLM protocol (Anthropic, OpenAI, Gemini, Responses API) has its own dedicated request builder and stream handler with zero translation overhead. Custom workflows, agents, skills, and MCP servers make it fully extensible without touching the core.
4
4
 
5
- OmniContext CLI is a terminal-native coding assistant that treats context as a first-class resource. Lean system prompts keep overhead low. Specialist delegation routes grunt work to cheaper models while keeping your main context clean. Zero telemetry means your code never leaves your machine. And it extends into VS Code, Office, the browser, Figma, Obsidian, and Zed.
5
+ ## Context-First Architecture
6
6
 
7
- ```bash
8
- npm install -g omni-context-cli && omx
9
- ```
10
-
11
- ## How It Works
12
-
13
- Traditional assistants call basic tools one at a time, resending your entire context with every round. OmniContext CLI delegates multi-step operations to agentic sub-agents running on a cheaper model -- your expensive model stays focused on reasoning, not file I/O.
14
-
15
- **Task: "Find the definition of `handleAuth`"**
7
+ Every token matters. OmniContext CLI is engineered from the ground up to squeeze maximum value out of every context window.
16
8
 
17
- Traditional approach:
9
+ **Lean system prompts.** The built-in system prompts are short, focused, and free of boilerplate. Tool descriptions are minimal. Your context budget goes toward actual work, not framework overhead.
18
10
 
19
- | Round | Call | Result |
20
- |-------|------|--------|
21
- | R1 | `glob("src/**/*.ts")` | 43 files returned |
22
- | R2 | `grep("handleAuth", ...)` | 7 matches in 4 files |
23
- | R3 | `read("src/middleware/auth.ts")` | 186 lines -- wrong file |
24
- | R4 | `read("src/routes/login.ts")` | 124 lines -- still looking |
25
- | R5 | `read("src/services/auth.ts", 40-90)` | Found it -- 50 more lines |
11
+ **Agent sub-delegation.** Exploratory tasks (searching code, surveying project structure, previewing files) run as autonomous sub-agents on a cheaper model. Their intermediate tool calls, file contents, and reasoning never enter your main context. You get a concise answer back; the scratch work stays off the books.
26
12
 
27
- > 5 rounds, ~12K context added, all on main model
13
+ **Context editing.** As a conversation grows, older rounds accumulate tool call payloads and thinking blocks that are no longer relevant. Context editing automatically compresses these, replacing bulky tool inputs and outputs with compact placeholders and stripping reasoning traces, so the model sees a clean, focused history instead of a bloated one.
28
14
 
29
- Specialist mode:
15
+ **Auto-compaction.** When token usage hits 80% of the model's context limit, the conversation is automatically summarized, key memories are extracted, and a fresh session picks up seamlessly. You never have to manually manage context overflow.
30
16
 
31
- | Round | Call | Result |
32
- |-------|------|--------|
33
- | R1 | `Pluck("handleAuth definition")` | Sub-agent (cheap model): glob -> grep -> read -> locate -> extract |
17
+ **Native prompt caching.** Automatic cache control for Anthropic and Gemini with configurable TTL (5-minute or 1-hour). Repeated context blocks are served from cache instead of being re-processed.
34
18
 
35
- > 1 round, ~1K context added, grunt work on cheap model
19
+ ## Workflow System
36
20
 
37
- ## Agentic Tools
21
+ A workflow controls everything about how OmniContext CLI behaves: the system prompt, which tools are available, and how the assistant interacts with you. Two workflows ship built-in:
38
22
 
39
- Each tool runs as an autonomous sub-agent on a cheaper model. It handles file I/O, error recovery, and retries internally -- keeping intermediate output out of your main context and your token bill down.
23
+ | Preset | Use Case | Description |
24
+ |--------|----------|-------------|
25
+ | **Programming** (default) | Terminal, VS Code | Coding assistant with base tools, agent tools, and MCP integration. Concise output, minimal overhead. |
26
+ | **General** | Office, browser sidebar | Personal assistant for documents, spreadsheets, and presentations. Proactive with tools, conversational tone. |
40
27
 
41
- | Tool | Purpose |
42
- |------|---------|
43
- | **Explore** | Survey project architecture -- directory layout, key files, and how the codebase is organized |
44
- | **Spark** | Run shell commands with automatic error detection and retry |
45
- | **Sculpt** | Edit files with surgical precision, find the right location, make the change, validate the result |
46
- | **Weave** | Write entire files from scratch with auto-validation |
47
- | **Sweep** | Find files matching complex criteria by name, content, or structure |
48
- | **Pluck** | Extract specific code segments -- functions, classes, or blocks you need |
49
- | **Ripple** | Trace symbol references across your codebase |
50
- | **Slice** | Answer targeted code questions by reading only the relevant parts |
51
- | **Quest** | Research topics via web search |
52
- | **Glance** | Preview multiple files at once with brief summaries |
28
+ Create your own by dropping a markdown file in `~/.omx/workflows/` or `.omx/workflows/`. Each workflow is a markdown file with YAML frontmatter that defines the tool set and a body that becomes the system prompt:
53
29
 
54
- ## Workflow Presets
55
-
56
- Switch how OmniContext CLI behaves with a single command. Each preset changes the tools available, the system prompt, and the response style.
30
+ ```markdown
31
+ ---
32
+ name: My Workflow
33
+ allowBaseTools: true
34
+ allowBuiltinAgents: true
35
+ allowCustomAgents: true
36
+ allowMcpTools: true
37
+ allowRemoteTools: true
38
+ ---
39
+ Your system prompt here. Template variables like {{OS_TYPE}},
40
+ {{PLATFORM}}, {{ARCH}}, {{CWD}}, and {{TODAY}} are available.
41
+ ```
57
42
 
58
- | Preset | Description |
59
- |--------|-------------|
60
- | **Specialist** (default) | Your main model reasons, a cheaper agent model executes. Fewer rounds, cleaner context, lower cost. |
61
- | **Explorer** | Research-first mode. Launches multiple web searches before answering. Great for current events, docs, and fact-checking. |
62
- | **Artist** | Visual-first responses. Prioritizes image generation when the model supports it. Ideal for design exploration and mockups. |
63
- | **Assistant** | Personal assistant for app integrations. Controls browser tabs, Office documents, and Figma designs through natural language. |
64
- | **Normal** | Basic tools with manual orchestration. Direct read, write, edit, and bash access. Full control, no abstraction. |
43
+ Switch workflows at startup with `omx --workflow my-workflow` or set a default in `omx.json`.
65
44
 
66
45
  ## Native Multi-Protocol
67
46
 
68
- Most tools funnel everything through a single API format and hope for the best. OmniContext CLI has a dedicated request builder and stream handler for each protocol. Prompt caching, extended thinking, and provider-specific features work exactly as the vendor intended -- no lossy translation layer in between.
47
+ Each LLM API protocol has its own dedicated request builder and stream handler. Prompt caching, extended thinking, structured outputs, and every other provider-specific feature work exactly as the vendor designed them. There is no translation layer. Anthropic requests are not converted into OpenAI format or vice versa.
69
48
 
70
49
  | Protocol | Description |
71
50
  |----------|-------------|
72
- | **Anthropic** | Native Messages API with prompt caching, extended thinking, and streaming. Token-level cache control via custom TTL. |
73
- | **OpenAI** | Native Chat Completions API. Compatible with any endpoint that speaks the OpenAI format. |
74
- | **Gemini** | Native generateContent API with Gemini-specific streaming. Tools and function calling use Gemini's own schema. |
75
- | **Responses API** | OpenAI's newer Responses API with built-in tool orchestration. Separate path from Chat Completions. |
76
-
77
- ## Cost Optimization
78
-
79
- Every API call resends your full conversation history. Fewer rounds means fewer cache reads. Cleaner context means fewer tokens written. Specialist mode cuts both -- and offloads the grunt work to a cheaper model.
80
-
81
- - **Fewer API rounds** -- Traditional tools need 5 rounds to find a function definition. Specialist mode does it in 1. That's 4 fewer full-context resends -- saving cache read costs on every skipped round.
82
- - **Smaller context growth** -- Basic tools dump ~10KB of intermediate output into your conversation. Agentic tools return only the final result. Context editing automatically trims old tool payloads and thinking blocks, keeping growth in check even over long sessions.
83
- - **Cheap model for execution** -- Sub-agents run on a low-cost model while your main model handles only planning and decisions. The expensive model never does file I/O.
84
- - **1-hour cache for deep work** -- The default 5-minute prompt cache expires if you pause to think. Switch to 1-hour for debugging, refactoring, or research -- it eliminates repeated cache rebuilds across a session.
51
+ | **Anthropic** | Messages API with prompt caching, extended thinking, and streaming |
52
+ | **OpenAI** | Chat Completions API, compatible with any OpenAI-format endpoint |
53
+ | **Gemini** | generateContent API with native streaming and function calling |
54
+ | **Responses API** | OpenAI's Responses API with built-in tool orchestration |
85
55
 
86
- **Simulated cost comparison: "Find the definition of handleAuth"**
56
+ Provider-specific interceptors handle the quirks of individual model sources (DeepSeek, Kimi, MiniMax, xAI, Zhipu, Zenmux, and others) without compromising the protocol.
87
57
 
88
- | | Traditional | Specialist | Saved |
89
- |---|---|---|---|
90
- | API rounds | 5 | 1 | -4 rounds |
91
- | Cache read per round | ~20K tokens x 5 | ~20K tokens x 1 | -80K tokens |
92
- | New context added | ~10KB | ~3KB | -70% |
93
- | Cache write (new tokens) | ~2.5K tokens | ~1K tokens | -60% |
94
- | Execution model | Expensive model only | Expensive + cheap | ~30% cheaper |
95
-
96
- *Based on a 20K-token conversation finding a function across a TypeScript project. Actual savings depend on project size and model pricing.*
97
-
98
- ## Model Providers
99
-
100
- One command to add all your models. OmniContext CLI ships with built-in provider presets -- pick one, paste your API key, and every model from that service is ready to use.
58
+ One command to add all models from a provider:
101
59
 
102
60
  ```bash
103
- # List available providers
104
- $ omx --list-providers
105
-
106
- # Add all models from a provider in one go
107
- $ omx --add-provider zenmux --api-key zmx-...
108
-
109
- # Remove a provider just as easily
110
- $ omx --remove-provider zenmux
61
+ omx --list-providers
62
+ omx --add-provider openrouter --api-key sk-...
63
+ omx --remove-provider openrouter
111
64
  ```
112
65
 
113
- Built-in providers: **Zenmux**, **DeepSeek**, **OpenRouter**, **Zhipu (GLM)**, **MiniMax**
66
+ Built-in providers: **Zenmux**, **DeepSeek**, **Kimi for Coding**, **OpenRouter**, **Zhipu (GLM)**, **MiniMax**
114
67
 
115
68
  ## Cross-Session Memory
116
69
 
117
- OmniContext CLI remembers your coding style, project patterns, and past decisions across sessions. Key points are extracted from every conversation and injected into future sessions. Helpful points gain score, harmful ones drop fast, unused ones decay naturally. Each project has its own memory file -- edit it directly if you want full control.
70
+ Memory extraction runs automatically when a conversation is compacted. The model reflects on the session, pulls out key points, and evaluates existing ones. Each point carries a score that starts at 0 and shifts based on how useful it was in the conversation:
118
71
 
119
- ## Integrations
72
+ | Rating | Delta |
73
+ |--------|-------|
74
+ | Helpful | +3 |
75
+ | Neutral | -1 |
76
+ | Harmful | -6 |
120
77
 
121
- Terminal is home base, but OmniContext CLI reaches into every tool you use. One AI, consistent context, zero context switching.
78
+ Points that drop below -10 are pruned. This means good insights accumulate weight over multiple sessions while bad advice is flushed quickly and stale knowledge decays on its own. Every project maintains its own memory file at `.omx/memory.json`. Edit it directly for full control.
122
79
 
123
- - **VS Code Extension** -- full IDE integration with file context, diagnostics, and diff views
124
- - **Desktop App** -- standalone GUI that acts as the local hub connecting Office, browser, and Figma extensions
125
- - **Chrome Extension** -- sidebar on any webpage for summarization, data extraction, and browser automation
126
- - **Office Add-in** -- AI panel inside Word, Excel, and PowerPoint
127
- - **Figma Plugin** -- inspect layouts, create shapes, modify nodes, and export assets through chat
128
- - **Zed Editor** -- external agent via Agent Client Protocol with full tool access
129
- - **Web Client** -- browser UI with LaTeX, Mermaid diagrams, file attachments, and drag-and-drop
130
- - **Mobile Access** -- run `omx --serve` and connect from your phone
80
+ ## Desktop & Integrations
81
+
82
+ Terminal is home base, but OmniContext CLI extends into every tool you use. The desktop app ties it all together as a lightweight server launcher that connects your IDE, browser, and Office apps to a single OmniContext CLI instance.
83
+
84
+ - **Desktop App** - workspace management, model configuration, and one-click server launch that bridges all the integrations below
85
+ - **VS Code Extension** - IDE integration with file context, diagnostics, and diff views
86
+ - **Chrome Extension** - sidebar on any webpage for summarization, data extraction, and browser automation
87
+ - **Office Add-in** - AI panel inside Word, Excel, and PowerPoint
88
+ - **Zed Editor** - external agent via Agent Client Protocol with full tool access
89
+ - **Web Client** - browser UI with LaTeX, Mermaid diagrams, file attachments, and drag-and-drop
90
+ - **Mobile Access** - run `omx --serve` and connect from your phone
131
91
 
132
92
  ## Extensibility
133
93
 
134
- Custom agents, skills, slash commands, and MCP servers. Everything is a markdown file or JSON config.
94
+ Custom workflows, agents, skills, and MCP servers. Everything is a markdown file or JSON config.
135
95
 
136
- - **Custom SubAgents** -- write a markdown file with a prompt template and tool permissions. It becomes a new agentic tool instantly. Add `OMX-AGENTS.md` for global agent instructions.
137
- - **Custom Skills** -- teach OmniContext CLI domain-specific knowledge and workflows. Skills inject instructions into the current conversation.
138
- - **Slash Commands** -- create shortcuts for common prompts with Handlebars templating.
139
- - **MCP Servers** -- connect external tools and data sources via Model Context Protocol. Stdio and HTTP transports supported.
96
+ - **Custom Workflows** - define your own system prompt and control exactly which tools are available: base tools, agents, MCP servers, and remote tools.
97
+ - **Custom Agents** - write a markdown file with a prompt template, parameter schema, and tool permissions. It becomes a callable agent tool instantly. Add `OMX-AGENTS.md` for global agent instructions.
98
+ - **Custom Skills** - directory-based prompt capabilities invoked as slash commands (`/skill-name`). Skills inject instructions into the current conversation for domain-specific knowledge and workflows. Compatible with Claude Code skill format.
99
+ - **MCP Servers** - connect external tools and data sources via Model Context Protocol. Stdio and HTTP transports supported.
100
+ - **Project Instructions** - drop an `OMX.md` or `CLAUDE.md` in your repo root and everyone on the team gets the same conventions and context.
140
101
 
141
- ## The Details
102
+ ## Install
142
103
 
143
- - **Lean system prompts** -- minimal, focused instructions and concise tool descriptions. Your tokens go toward actual work, not bloated framework overhead.
144
- - **Zero telemetry** -- no usage tracking, no analytics, no data collection.
145
- - **Context editing** -- automatically trims old tool call payloads and thinking blocks from your conversation history.
146
- - **Extended thinking** -- enable deeper reasoning for complex tasks with configurable budget limits.
147
- - **CLAUDE.md compatible** -- already have a CLAUDE.md in your repo? OmniContext CLI reads it automatically.
148
- - **Auto-compaction** -- when context hits 80% capacity, the conversation is compacted, key memories are extracted, and a fresh session picks up where you left off.
149
- - **Native prompt caching** -- automatic cache control for Anthropic and Gemini with custom TTL settings.
150
- - **Project instructions** -- drop an `OMX.md` in your repo root and everyone on the team gets the same conventions and context.
104
+ ```bash
105
+ npm install -g omni-context-cli && omx
106
+ ```
151
107
 
152
108
  ## Build & Release
153
109
 
@@ -155,7 +111,7 @@ Custom agents, skills, slash commands, and MCP servers. Everything is a markdown
155
111
  npm run release
156
112
  ```
157
113
 
158
- One command builds the CLI, all clients, packages release zips, and builds the desktop app for the current platform. Artifacts go to `release/`.
114
+ One command builds the CLI, all clients, and packages the desktop app for the current platform.
159
115
 
160
116
  ## Documentation
161
117
 
package/README.zh-CN.md CHANGED
@@ -1,153 +1,109 @@
1
1
  # OmniContext CLI
2
2
 
3
- **精准上下文,最小成本。**
3
+ 一个零遥测的编程助手,运行在终端,延伸到 VS Code、Office、浏览器和移动端。大多数 AI 编程工具只是给 LLM 套一个聊天界面就完事了。OmniContext CLI 走了不同的路,它把上下文窗口视为稀缺资源,从每一层设计上压榨它的利用效率。精简的系统提示词、Agent 子委托、自动上下文编辑和原生提示词缓存协同工作,让你的 token 花在解决问题上,而不是重复模板废话。每种 LLM 协议(Anthropic、OpenAI、Gemini、Responses API)都有专用的请求构建器和流处理器,零转换开销。自定义工作流、Agent、技能和 MCP 服务器提供完整的可扩展性,无需修改核心代码。
4
4
 
5
- OmniContext CLI 是一个终端原生的编程助手,把上下文当作一等资源来管理。精简的系统提示词控制开销。专家委派机制把脏活路由给便宜的模型,同时保持主上下文的干净。零遥测意味着你的代码不会离开你的机器。它还能延伸到 VS Code、Office、浏览器、Figma、Obsidian 和 Zed。
5
+ ## 上下文优先架构
6
6
 
7
- ```bash
8
- npm install -g omni-context-cli && omx
9
- ```
7
+ 每一个 token 都很重要。OmniContext CLI 从底层开始就为最大化利用上下文窗口而设计。
10
8
 
11
- ## 工作原理
9
+ **精简系统提示词。** 内置提示词短小、聚焦、没有模板废话。工具描述力求精简。你的上下文预算用在实际工作上,而不是框架开销。
12
10
 
13
- 传统助手逐个调用基础工具,每一轮都重新发送完整上下文。OmniContext CLI 把多步操作委派给运行在便宜模型上的 Agentic 子代理——贵价模型专注推理,不做文件 I/O。
11
+ **Agent 子委托。** 探索性任务(搜索代码、勘察项目结构、预览文件)由自主子 Agent 在更便宜的模型上运行。它们的中间工具调用、文件内容和推理过程不会进入你的主上下文。你只拿到一个简洁的结论,中间草稿完全不占空间。
14
12
 
15
- **任务:"找到 `handleAuth` 的定义"**
13
+ **上下文编辑。** 随着对话增长,早期的工具调用负载和思考块变得不再相关。上下文编辑自动压缩这些内容,用紧凑的占位符替换庞大的工具输入输出,剥离推理痕迹,让模型看到干净、聚焦的历史,而不是臃肿的堆积。
16
14
 
17
- 传统模式:
15
+ **自动压缩。** 当 token 用量达到模型上下文限制的 80% 时,对话自动摘要,关键记忆被提取,新会话无缝接续。你永远不需要手动管理上下文溢出。
18
16
 
19
- | 轮次 | 调用 | 结果 |
20
- |------|------|------|
21
- | R1 | `glob("src/**/*.ts")` | 返回 43 个文件 |
22
- | R2 | `grep("handleAuth", ...)` | 4 个文件中有 7 处匹配 |
23
- | R3 | `read("src/middleware/auth.ts")` | 186 行——找错文件了 |
24
- | R4 | `read("src/routes/login.ts")` | 124 行——还在找 |
25
- | R5 | `read("src/services/auth.ts", 40-90)` | 找到了——又多 50 行 |
17
+ **原生提示词缓存。** Anthropic Gemini 的自动缓存控制,支持可配置的 TTL(5 分钟或 1 小时)。重复的上下文块从缓存读取,而不是重新处理。
26
18
 
27
- > 5 轮,新增 ~12K 上下文,全部在主模型上执行
19
+ ## 工作流系统
28
20
 
29
- 专家模式:
21
+ 工作流控制 OmniContext CLI 行为的方方面面:系统提示词、可用工具列表、助手交互方式。内置两套工作流:
30
22
 
31
- | 轮次 | 调用 | 结果 |
32
- |------|------|------|
33
- | R1 | `Pluck("handleAuth definition")` | 子代理(便宜模型):glob -> grep -> read -> locate -> extract |
23
+ | 预设 | 使用场景 | 说明 |
24
+ |------|----------|------|
25
+ | **Programming**(默认) | 终端、VS Code | 编程助手,包含基础工具、Agent 工具和 MCP 集成。简洁输出,最小开销。 |
26
+ | **General** | Office、浏览器侧边栏 | 多功能个人助理,处理文档、表格和演示文稿。主动使用工具,匹配对话语气。 |
34
27
 
35
- > 1 轮,新增 ~1K 上下文,脏活在便宜模型上完成
28
+ `~/.omx/workflows/` `.omx/workflows/` 中放一个 Markdown 文件即可创建自定义工作流。每个工作流是一个带 YAML frontmatter 的 Markdown 文件,frontmatter 定义工具集,正文成为系统提示词:
36
29
 
37
- ## Agentic 工具
38
-
39
- 每个工具作为自主子代理运行在便宜模型上,内部处理文件 I/O、错误恢复和重试——中间输出不会进入你的主上下文,token 账单也不会膨胀。
30
+ ```markdown
31
+ ---
32
+ name: My Workflow
33
+ allowBaseTools: true
34
+ allowBuiltinAgents: true
35
+ allowCustomAgents: true
36
+ allowMcpTools: true
37
+ allowRemoteTools: true
38
+ ---
39
+ 你的系统提示词。可以使用模板变量 {{OS_TYPE}}、
40
+ {{PLATFORM}}、{{ARCH}}、{{CWD}} 和 {{TODAY}}。
41
+ ```
40
42
 
41
- | 工具 | 用途 |
42
- |------|------|
43
- | **Explore** | 勘察项目架构——目录布局、关键文件和代码组织方式 |
44
- | **Spark** | 执行 shell 命令,自动检测错误并重试 |
45
- | **Sculpt** | 精准编辑文件,定位正确位置,修改并验证结果 |
46
- | **Weave** | 从头写入完整文件,自动验证 |
47
- | **Sweep** | 按名称、内容或结构查找匹配的文件 |
48
- | **Pluck** | 提取特定代码片段——函数、类或你需要的代码块 |
49
- | **Ripple** | 追踪符号在代码库中的所有引用 |
50
- | **Slice** | 只读取相关部分来回答针对性的代码问题 |
51
- | **Quest** | 通过网络搜索调研主题 |
52
- | **Glance** | 一次预览多个文件,附带简要摘要 |
53
-
54
- ## 工作流预设
55
-
56
- 一条命令切换 OmniContext CLI 的行为模式。每个预设改变可用工具、系统提示词和响应风格。
57
-
58
- | 预设 | 说明 |
59
- |------|------|
60
- | **Specialist**(默认) | 主模型负责推理,便宜的代理模型负责执行。更少轮次,更干净的上下文,更低的成本。 |
61
- | **Explorer** | 调研优先模式。先发起多次网络搜索再回答。适合时事、文档查阅和事实核查。 |
62
- | **Artist** | 视觉优先响应。在模型支持时优先生成图像。适合设计探索和原型。 |
63
- | **Assistant** | 应用集成的个人助理。通过自然语言控制浏览器标签页、Office 文档和 Figma 设计。 |
64
- | **Normal** | 基础工具加手动编排。直接使用 read、write、edit 和 bash。完全控制,没有抽象。 |
43
+ 启动时用 `omx --workflow my-workflow` 切换工作流,或在 `omx.json` 中设置默认值。
65
44
 
66
45
  ## 原生多协议
67
46
 
68
- 大多数工具把所有请求转换成单一 API 格式。OmniContext CLI 为每种协议提供专用的请求构建器和流处理器。提示词缓存、扩展思考和供应商专属特性按原厂设计工作——没有有损的转换层。
47
+ 每种 LLM API 协议都有专用的请求构建器和流处理器。提示词缓存、扩展思考、结构化输出和每一个供应商专属特性都按原厂设计工作。没有转换层,不会把 Anthropic 请求转成 OpenAI 格式,也不会反过来。
69
48
 
70
49
  | 协议 | 说明 |
71
50
  |------|------|
72
- | **Anthropic** | 原生 Messages API,支持提示词缓存、扩展思考和流式传输。通过自定义 TTL 实现 token 级缓存控制。 |
73
- | **OpenAI** | 原生 Chat Completions API。兼容任何 OpenAI 格式的接口。 |
74
- | **Gemini** | 原生 generateContent API,Gemini 专用流式传输。工具和函数调用使用 Gemini 自己的 schema。 |
75
- | **Responses API** | OpenAI 新一代 Responses API,内置工具编排。独立于 Chat Completions 的路径。 |
76
-
77
- ## 成本优化
78
-
79
- 每次 API 调用都会重新发送完整的对话历史。更少的轮次意味着更少的缓存读取。更干净的上下文意味着更少的 token 写入。专家模式两者兼省——并且把脏活卸载给便宜的模型。
80
-
81
- - **更少的 API 轮次** ——传统工具需要 5 轮才能找到一个函数定义,专家模式只要 1 轮。省掉 4 次完整上下文重传,每省一轮都节省缓存读取成本。
82
- - **更小的上下文增长** ——基础工具往对话里塞 ~10KB 中间输出,Agentic 工具只返回最终结果。上下文编辑自动裁剪旧的工具负载和思考块,长会话也能控制增长。
83
- - **便宜模型做执行** ——子代理运行在低成本模型上,主模型只负责规划和决策。贵价模型永远不做文件 I/O。
84
- - **1 小时缓存应对深度工作** ——默认 5 分钟提示词缓存在你暂停思考时就会过期。切换到 1 小时缓存适合调试、重构或调研——消除会话中反复重建缓存的开销。
51
+ | **Anthropic** | Messages API,支持提示词缓存、扩展思考和流式传输 |
52
+ | **OpenAI** | Chat Completions API,兼容任何 OpenAI 格式的接口 |
53
+ | **Gemini** | generateContent API,原生流式传输和函数调用 |
54
+ | **Responses API** | OpenAI Responses API,内置工具编排 |
85
55
 
86
- **模拟成本对比:"找到 handleAuth 的定义"**
56
+ 供应商专属拦截器处理各模型源(DeepSeek、Kimi、MiniMax、xAI、Zhipu、Zenmux 等)的差异,同时不影响协议本身。
87
57
 
88
- | | 传统模式 | 专家模式 | 节省 |
89
- |---|---|---|---|
90
- | API 轮次 | 5 | 1 | -4 轮 |
91
- | 每轮缓存读取 | ~20K tokens x 5 | ~20K tokens x 1 | -80K tokens |
92
- | 新增上下文 | ~10KB | ~3KB | -70% |
93
- | 缓存写入(新 token) | ~2.5K tokens | ~1K tokens | -60% |
94
- | 执行模型 | 仅贵价模型 | 贵价 + 便宜 | 便宜 ~30% |
95
-
96
- *基于在 TypeScript 项目中查找函数的 20K token 对话。实际节省取决于项目规模和模型定价。*
97
-
98
- ## 模型供应商
99
-
100
- 一条命令添加所有模型。OmniContext CLI 内置供应商预设——选一个,粘贴 API key,该服务的所有模型就可以使用了。
58
+ 一条命令添加供应商的所有模型:
101
59
 
102
60
  ```bash
103
- # 列出可用供应商
104
- $ omx --list-providers
105
-
106
- # 一次性添加供应商的所有模型
107
- $ omx --add-provider zenmux --api-key zmx-...
108
-
109
- # 移除同样简单
110
- $ omx --remove-provider zenmux
61
+ omx --list-providers
62
+ omx --add-provider openrouter --api-key sk-...
63
+ omx --remove-provider openrouter
111
64
  ```
112
65
 
113
- 内置供应商:**Zenmux**、**DeepSeek**、**OpenRouter**、**Zhipu (GLM)**、**MiniMax**
66
+ 内置供应商:**Zenmux**、**DeepSeek**、**Kimi for Coding**、**OpenRouter**、**Zhipu (GLM)**、**MiniMax**
114
67
 
115
68
  ## 跨会话记忆
116
69
 
117
- OmniContext CLI 跨会话记住你的编码风格、项目模式和历史决策。关键要点从每次对话中提取并注入未来的会话。有用的要点加分(+1),有害的快速扣分(-3),不再使用的自然衰减。每个项目有自己的记忆文件——想要完全控制可以直接编辑。
70
+ 记忆提取在对话压缩时自动触发。模型回顾当前会话,提炼关键要点,并评估已有的要点。每个要点的评分从 0 开始,根据它在对话中的表现变动:
118
71
 
119
- ## 集成
72
+ | 评价 | 分值变化 |
73
+ |------|----------|
74
+ | 有帮助 | +3 |
75
+ | 中性 | -1 |
76
+ | 有害 | -6 |
120
77
 
121
- 终端是大本营,但 OmniContext CLI 延伸到你使用的每个工具。一个 AI,一致的上下文,零切换成本。
78
+ 评分低于 -10 的要点被剪枝。好的见解在多次会话中不断积累权重,坏的建议很快被清除,过时的知识自然衰减。每个项目在 `.omx/memory.json` 中维护自己的记忆文件,想要完全控制可以直接编辑。
122
79
 
123
- - **VS Code 扩展** ——完整的 IDE 集成,感知打开文件、诊断信息和 diff 视图
124
- - **桌面应用** ——独立 GUI,作为本地中枢连接 Office、浏览器和 Figma 扩展
125
- - **Chrome 扩展** ——任意网页上的侧边栏,支持摘要、数据提取和浏览器自动化
126
- - **Office 插件** ——Word、Excel 和 PowerPoint 内的 AI 面板
127
- - **Figma 插件** ——通过聊天面板检查布局、创建图形、修改节点和导出资源
128
- - **Zed 编辑器** ——通过 Agent Client Protocol 作为外部代理接入,拥有完整工具访问
129
- - **Web 客户端** ——浏览器 UI,支持 LaTeX、Mermaid 图表、文件附件和拖拽
130
- - **移动端访问** ——运行 `omx --serve` 后从手机连接
80
+ ## 桌面版与集成
81
+
82
+ 终端是大本营,但 OmniContext CLI 延伸到你使用的每个工具。桌面版把一切串联起来,一个轻量级服务启动器,将你的 IDE、浏览器和 Office 应用连接到同一个 OmniContext CLI 实例。
83
+
84
+ - **桌面应用** - 工作区管理、模型配置、一键启动服务并桥接下面所有集成
85
+ - **VS Code 扩展** - IDE 集成,感知打开文件、诊断信息和 diff 视图
86
+ - **Chrome 扩展** - 任意网页上的侧边栏,支持摘要、数据提取和浏览器自动化
87
+ - **Office 插件** - Word、Excel 和 PowerPoint 内的 AI 面板
88
+ - **Zed 编辑器** - 通过 Agent Client Protocol 作为外部代理接入
89
+ - **Web 客户端** - 浏览器 UI,支持 LaTeX、Mermaid 图表、文件附件和拖拽
90
+ - **移动端访问** - 运行 `omx --serve` 后从手机连接
131
91
 
132
92
  ## 可扩展性
133
93
 
134
- 自定义 Agent、技能、斜杠命令和 MCP 服务器。一切都是 Markdown 文件或 JSON 配置。
94
+ 自定义工作流、Agent、技能和 MCP 服务器。一切都是 Markdown 文件或 JSON 配置。
135
95
 
136
- - **自定义子代理** ——写一个带提示词模板和工具权限的 Markdown 文件,它立刻成为新的 Agentic 工具。添加 `OMX-AGENTS.md` 作为全局代理指令。
137
- - **自定义技能** ——教 OmniContext CLI 领域知识和工作流。技能会注入当前对话。
138
- - **斜杠命令** ——为常用提示词创建快捷方式,支持 Handlebars 模板。
139
- - **MCP 服务器** ——通过 Model Context Protocol 接入外部工具和数据源。支持 stdio 和 HTTP 传输。
96
+ - **自定义工作流** - 定义自己的系统提示词,精确控制可用工具列表:基础工具、Agent、MCP 服务器和远程工具。
97
+ - **自定义 Agent** - 写一个带提示词模板、参数定义和工具权限的 Markdown 文件,立刻成为可调用的 Agent 工具。添加 `OMX-AGENTS.md` 作为全局 Agent 指令。
98
+ - **自定义技能** - 基于目录的提示词能力,通过斜杠命令(`/技能名`)调用。技能注入当前对话,提供领域知识和工作流。兼容 Claude Code 技能格式。
99
+ - **MCP 服务器** - 通过 Model Context Protocol 接入外部工具和数据源,支持 stdio 和 HTTP 传输。
100
+ - **项目指令** - 在仓库根目录放一个 `OMX.md` 或 `CLAUDE.md`,团队里每个人都能得到相同的约定和上下文。
140
101
 
141
- ## 细节
102
+ ## 安装
142
103
 
143
- - **精简的系统提示词** ——最小化、聚焦的指令和简洁的工具描述。你的 token 用在实际工作上,而不是臃肿的框架开销。
144
- - **零遥测** ——没有使用追踪,没有数据分析,没有数据收集。
145
- - **上下文编辑** ——自动裁剪对话历史中旧的工具调用负载和思考块。
146
- - **扩展思考** ——为复杂任务启用深度推理,支持可配置的预算限制。
147
- - **兼容 CLAUDE.md** ——仓库里已经有 CLAUDE.md?OmniContext CLI 会自动读取。
148
- - **自动压缩** ——上下文达到 80% 容量时,对话被压缩,关键记忆被提取,新会话无缝接续。
149
- - **原生提示词缓存** ——Anthropic 和 Gemini 的自动缓存控制,支持自定义 TTL 设置。
150
- - **项目指令** ——在仓库根目录放一个 `OMX.md`,团队里每个人都能得到相同的约定和上下文。
104
+ ```bash
105
+ npm install -g omni-context-cli && omx
106
+ ```
151
107
 
152
108
  ## 构建与发布
153
109
 
@@ -155,7 +111,7 @@ OmniContext CLI 跨会话记住你的编码风格、项目模式和历史决策
155
111
  npm run release
156
112
  ```
157
113
 
158
- 一条命令构建 CLI 和所有客户端,打包发布 zip,并为当前平台构建桌面应用。产物输出到 `release/`。
114
+ 一条命令构建 CLI 和所有客户端,并为当前平台打包桌面应用。
159
115
 
160
116
  ## 文档
161
117
 
@@ -1,32 +1,39 @@
1
1
  ---
2
2
  name: Explore
3
- description: Survey project structure and architecture. Shows directory layout, where features live, and how the codebase is organized. For structural overview, not detailed analysis.
4
- allowedTools: [Read, Glob, Grep, Bash, BashOutput]
5
- displayFields: [query, directory]
3
+ description: Get a high-level overview of how a project or directory is organized. Use this to understand what exists and where things live, not to answer specific code questions.
4
+ allowBaseTools: [Read, Glob, Grep, Bash, BashOutput]
5
+ displayFields: [directory]
6
6
  parameters:
7
7
  properties:
8
- query:
9
- type: string
10
- description: What aspect of the project structure to explore
11
8
  directory:
12
9
  type: string
13
- description: Limit the search to this directory. If not provided, searches the entire project.
14
- required: [query]
10
+ description: Directory to explore. If not provided, explores the entire project.
15
11
  ---
16
12
 
17
- Survey the project structure and find out: {{query}}
13
+ Survey the project structure.
14
+
15
+ {{#if directory}}Limit the exploration to this directory: {{directory}}.{{/if}}
16
+
17
+ Use glob to map out the directory tree. Start broad, then selectively read key files (READMEs, configs, entry points) to understand their roles.
18
+
19
+ Focus on the big picture: what directories exist, how the project is organized, where different concerns are separated. Don't dive deep into implementation details.
20
+
21
+ Return the results in this format:
18
22
 
19
- {{#if directory}}Limit the search to this directory: {{directory}}.{{/if}}
23
+ ```
24
+ Overview: One-paragraph summary of the project and its tech stack.
20
25
 
21
- Use glob to survey the file structure and identify relevant areas. Use grep to scan for where functionality lives.
26
+ Directory Structure:
27
+ src/ - Source code, organized by feature
28
+ tests/ - Test suites
29
+ scripts/ - Build and deployment scripts
22
30
 
23
- Focus on the big picture: what directories exist, how the project is organized, where different concerns are separated.
31
+ Key Files:
32
+ package.json - Dependencies and scripts
33
+ src/index.ts - Application entry point
24
34
 
25
- Return a report covering:
26
- - A brief overview of what you found
27
- - Relevant directories and their purposes
28
- - Key files and their roles
29
- - How the codebase is organized (features, layers, modules)
30
- - Where the queried functionality lives
35
+ Architecture:
36
+ How the codebase is organized (features, layers, modules, patterns).
37
+ ```
31
38
 
32
- Keep the report concise and to the point. Focus on what matters, skip the fluff.
39
+ Do not include explanations beyond the result format. Keep responses concise and structured.
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: Glance
3
3
  description: Preview multiple files at once. Reads files and directories, returns a brief summary for each file to help understand what's in them without reading the full contents.
4
- allowedTools: [Read, Glob, Grep]
4
+ allowBaseTools: [Read, Glob, Grep, Bash, BashOutput]
5
5
  displayFields: [paths, recursive, maxFiles]
6
6
  parameters:
7
7
  properties:
@@ -27,33 +27,18 @@ Maximum files to process: {{#if maxFiles}}{{maxFiles}}{{else}}20 (default){{/if}
27
27
 
28
28
  First, expand any directories into file lists using glob. Then read each file and write a brief summary (under 100 words) describing what the file does, its main exports, or key contents.
29
29
 
30
- Return the summaries in this format:
30
+ For large files, focus on the top-level structure rather than reading everything.
31
+
32
+ Return the results in this format:
31
33
 
32
34
  ```
33
- path/to/file1.ts
35
+ path/to/file.ts
34
36
  Brief summary of what this file does, its purpose, main functions or exports.
35
37
 
36
- path/to/file2.tsx
37
- Brief summary of this file's contents and role in the project.
38
-
39
- path/to/file3.json
38
+ path/to/config.json
40
39
  Brief summary of the configuration or data structure.
41
40
  ```
42
41
 
43
- If a file can't be read, note it:
44
-
45
- ```
46
- path/to/file.bin
47
- [Binary file, skipped]
48
-
49
- path/to/missing.ts
50
- [File not found]
51
- ```
52
-
53
- If the file list exceeds the limit, process only the first N files and note:
54
-
55
- ```
56
- [Reached limit of N files, X more files not processed]
57
- ```
42
+ If a file can't be read (binary, missing, etc.), note it briefly and move on.
58
43
 
59
44
  Do not include explanations beyond the result format. Keep responses concise and structured.
@@ -1,7 +1,7 @@
1
1
  ---
2
- name: Slice
3
- description: Extract code snippets relevant to answering a specific question. Returns targeted code segments from across the codebase that address the query.
4
- allowedTools: [Read, Grep, Glob, Bash, BashOutput]
2
+ name: Search
3
+ description: Search the codebase to answer a specific question. Returns targeted code segments from across the project that address the query.
4
+ allowBaseTools: [Read, Glob, Grep, Bash, BashOutput]
5
5
  displayFields: [question, directory]
6
6
  parameters:
7
7
  properties:
@@ -26,7 +26,7 @@ Read the files that contain the answer to the question. Focus on the specific co
26
26
 
27
27
  For complex flows, read multiple files to understand the complete picture.
28
28
 
29
- Return the code in this format. Use line ranges when only a section is relevant, complete contents for small files:
29
+ Return the results in this format. Use line ranges when only a section is relevant, complete contents for small files:
30
30
 
31
31
  ```
32
32
  File: path/to/file.ts