@hhsw2015/task-master-ai 0.43.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/CHANGELOG.md +4072 -0
  2. package/LICENSE +25 -0
  3. package/README-task-master.md +648 -0
  4. package/README.md +415 -0
  5. package/dist/ai-services-unified-BgdcS4fE.js +7 -0
  6. package/dist/ai-services-unified-DVAKOPK0.js +1 -0
  7. package/dist/assets/.windsurfrules +524 -0
  8. package/dist/assets/AGENTS.md +435 -0
  9. package/dist/assets/GEMINI.md +110 -0
  10. package/dist/assets/claude/TM_COMMANDS_GUIDE.md +147 -0
  11. package/dist/assets/config.json +34 -0
  12. package/dist/assets/env.example +12 -0
  13. package/dist/assets/example_prd.txt +47 -0
  14. package/dist/assets/example_prd_rpg.txt +511 -0
  15. package/dist/assets/gitignore +25 -0
  16. package/dist/assets/hamster-art.txt +49 -0
  17. package/dist/assets/kiro-hooks/tm-code-change-task-tracker.kiro.hook +23 -0
  18. package/dist/assets/kiro-hooks/tm-complexity-analyzer.kiro.hook +16 -0
  19. package/dist/assets/kiro-hooks/tm-daily-standup-assistant.kiro.hook +13 -0
  20. package/dist/assets/kiro-hooks/tm-git-commit-task-linker.kiro.hook +13 -0
  21. package/dist/assets/kiro-hooks/tm-pr-readiness-checker.kiro.hook +13 -0
  22. package/dist/assets/kiro-hooks/tm-task-dependency-auto-progression.kiro.hook +17 -0
  23. package/dist/assets/kiro-hooks/tm-test-success-task-completer.kiro.hook +23 -0
  24. package/dist/assets/roocode/.roo/rules-architect/architect-rules +93 -0
  25. package/dist/assets/roocode/.roo/rules-ask/ask-rules +89 -0
  26. package/dist/assets/roocode/.roo/rules-code/code-rules +61 -0
  27. package/dist/assets/roocode/.roo/rules-debug/debug-rules +68 -0
  28. package/dist/assets/roocode/.roo/rules-orchestrator/orchestrator-rules +181 -0
  29. package/dist/assets/roocode/.roo/rules-test/test-rules +61 -0
  30. package/dist/assets/roocode/.roomodes +63 -0
  31. package/dist/assets/rules/cursor_rules.mdc +53 -0
  32. package/dist/assets/rules/dev_workflow.mdc +424 -0
  33. package/dist/assets/rules/hamster.mdc +173 -0
  34. package/dist/assets/rules/self_improve.mdc +72 -0
  35. package/dist/assets/rules/taskmaster.mdc +573 -0
  36. package/dist/assets/rules/taskmaster_hooks_workflow.mdc +59 -0
  37. package/dist/assets/scripts_README.md +445 -0
  38. package/dist/commands-D7m4KWx1.js +329 -0
  39. package/dist/config-manager-CvbfYtIR.js +1 -0
  40. package/dist/config-manager-cjltSxIS.js +270 -0
  41. package/dist/dependency-manager-CyOxi5uo.js +1078 -0
  42. package/dist/git-utils-DllbRE35.js +1 -0
  43. package/dist/git-utils-PBP1PRVP.js +1 -0
  44. package/dist/mcp-server.js +44 -0
  45. package/dist/profiles-DcD-JxPM.js +3528 -0
  46. package/dist/research-DN4RyyJY.js +1 -0
  47. package/dist/response-language-C5AwQSfD.js +1 -0
  48. package/dist/response-language-LzM2RD6-.js +1 -0
  49. package/dist/sentry-CBAZ4LSk.js +1 -0
  50. package/dist/tag-management-6HOtYZMj.js +1 -0
  51. package/dist/task-manager-BtFURFe0.js +1 -0
  52. package/dist/task-master.js +2 -0
  53. package/dist/update-subtask-by-id-DiWMqGfw.js +1 -0
  54. package/dist/update-task-by-id-eyL-PNVX.js +1 -0
  55. package/dist/utils-CGk8TL6x.js +1 -0
  56. package/index.js +160 -0
  57. package/package.json +183 -0
package/README.md ADDED
@@ -0,0 +1,415 @@
1
+ <a name="readme-top"></a>
2
+
3
+ <div align='center'>
4
+ <a href="https://trendshift.io/repositories/13971" target="_blank"><img src="https://trendshift.io/api/badge/repositories/13971" alt="eyaltoledano%2Fclaude-task-master | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
5
+ </div>
6
+
7
+ <p align="center">
8
+ <a href="https://task-master.dev"><img src="./images/logo.png?raw=true" alt="Taskmaster logo"></a>
9
+ </p>
10
+
11
+ <p align="center">
12
+ <b>Taskmaster</b>: A task management system for AI-driven development, designed to work seamlessly with any AI chat.
13
+ </p>
14
+
15
+ <p align="center">
16
+ <a href="https://discord.gg/taskmasterai" target="_blank"><img src="https://dcbadge.limes.pink/api/server/https://discord.gg/taskmasterai?style=flat" alt="Discord"></a> |
17
+ <a href="https://docs.task-master.dev" target="_blank">Docs</a>
18
+ </p>
19
+
20
+ <p align="center">
21
+ <a href="https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml"><img src="https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml/badge.svg" alt="CI"></a>
22
+ <a href="https://github.com/eyaltoledano/claude-task-master/stargazers"><img src="https://img.shields.io/github/stars/eyaltoledano/claude-task-master?style=social" alt="GitHub stars"></a>
23
+ <a href="https://badge.fury.io/js/task-master-ai"><img src="https://badge.fury.io/js/task-master-ai.svg" alt="npm version"></a>
24
+ <a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT%20with%20Commons%20Clause-blue.svg" alt="License"></a>
25
+ </p>
26
+
27
+ <p align="center">
28
+ <a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/d18m/task-master-ai?style=flat" alt="NPM Downloads"></a>
29
+ <a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/dm/task-master-ai?style=flat" alt="NPM Downloads"></a>
30
+ <a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/dw/task-master-ai?style=flat" alt="NPM Downloads"></a>
31
+ </p>
32
+
33
+ ## By [@eyaltoledano](https://x.com/eyaltoledano) & [@RalphEcom](https://x.com/RalphEcom)
34
+
35
+ [![Twitter Follow](https://img.shields.io/twitter/follow/eyaltoledano)](https://x.com/eyaltoledano)
36
+ [![Twitter Follow](https://img.shields.io/twitter/follow/RalphEcom)](https://x.com/RalphEcom)
37
+
38
+ A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
39
+
40
+ ## Documentation
41
+
42
+ 📚 **[View Full Documentation](https://docs.task-master.dev)**
43
+
44
+ For detailed guides, API references, and comprehensive examples, visit our documentation site.
45
+
46
+ ### Quick Reference
47
+
48
+ The following documentation is also available in the `docs` directory:
49
+
50
+ - [Configuration Guide](docs/configuration.md) - Set up environment variables and customize Task Master
51
+ - [Tutorial](docs/tutorial.md) - Step-by-step guide to getting started with Task Master
52
+ - [Command Reference](docs/command-reference.md) - Complete list of all available commands
53
+ - [Task Structure](docs/task-structure.md) - Understanding the task format and features
54
+ - [Example Interactions](docs/examples.md) - Common Cursor AI interaction examples
55
+ - [Migration Guide](docs/migration-guide.md) - Guide to migrating to the new project structure
56
+
57
+ #### Quick Install for Cursor 1.0+ (One-Click)
58
+
59
+ [![Add task-master-ai MCP server to Cursor](https://cursor.com/deeplink/mcp-install-dark.svg)](https://cursor.com/en/install-mcp?name=task-master-ai&config=eyJjb21tYW5kIjoibnB4IC15IC0tcGFja2FnZT10YXNrLW1hc3Rlci1haSB0YXNrLW1hc3Rlci1haSIsImVudiI6eyJBTlRIUk9QSUNfQVBJX0tFWSI6IllPVVJfQU5USFJPUElDX0FQSV9LRVlfSEVSRSIsIlBFUlBMRVhJVFlfQVBJX0tFWSI6IllPVVJfUEVSUExFWElUWV9BUElfS0VZX0hFUkUiLCJPUEVOQUlfQVBJX0tFWSI6IllPVVJfT1BFTkFJX0tFWV9IRVJFIiwiR09PR0xFX0FQSV9LRVkiOiJZT1VSX0dPT0dMRV9LRVlfSEVSRSIsIk1JU1RSQUxfQVBJX0tFWSI6IllPVVJfTUlTVFJBTF9LRVlfSEVSRSIsIkdST1FfQVBJX0tFWSI6IllPVVJfR1JPUV9LRVlfSEVSRSIsIk9QRU5ST1VURVJfQVBJX0tFWSI6IllPVVJfT1BFTlJPVVRFUl9LRVlfSEVSRSIsIlhBSV9BUElfS0VZIjoiWU9VUl9YQUlfS0VZX0hFUkUiLCJBWlVSRV9PUEVOQUlfQVBJX0tFWSI6IllPVVJfQVpVUkVfS0VZX0hFUkUiLCJPTExBTUFfQVBJX0tFWSI6IllPVVJfT0xMQU1BX0FQSV9LRVlfSEVSRSJ9fQ%3D%3D)
60
+
61
+ > **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
62
+
63
+ #### Claude Code Quick Install
64
+
65
+ For Claude Code users:
66
+
67
+ ```bash
68
+ claude mcp add taskmaster-ai -- npx -y task-master-ai
69
+ ```
70
+
71
+ Don't forget to add your API keys to the configuration:
72
+ - in the root .env of your Project
73
+ - in the "env" section of your mcp config for taskmaster-ai
74
+
75
+
76
+ ## Requirements
77
+
78
+ Taskmaster utilizes AI across several commands, and those require a separate API key. You can use a variety of models from different AI providers provided you add your API keys. For example, if you want to use Claude 3.7, you'll need an Anthropic API key.
79
+
80
+ You can define 3 types of models to be used: the main model, the research model, and the fallback model (in case either the main or research fail). Whatever model you use, its provider API key must be present in either mcp.json or .env.
81
+
82
+ At least one (1) of the following is required:
83
+
84
+ - Anthropic API key (Claude API)
85
+ - OpenAI API key
86
+ - Google Gemini API key
87
+ - Perplexity API key (for research model)
88
+ - xAI API Key (for research or main model)
89
+ - OpenRouter API Key (for research or main model)
90
+ - Claude Code (no API key required - requires Claude Code CLI)
91
+ - Codex CLI (OAuth via ChatGPT subscription - requires Codex CLI)
92
+
93
+ Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code or Codex CLI with OAuth). Adding all API keys enables you to seamlessly switch between model providers at will.
94
+
95
+ ## Quick Start
96
+
97
+ ### Option 1: MCP (Recommended)
98
+
99
+ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
100
+
101
+ #### 1. Add your MCP config at the following path depending on your editor
102
+
103
+ | Editor | Scope | Linux/macOS Path | Windows Path | Key |
104
+ | ------------ | ------- | ------------------------------------- | ------------------------------------------------- | ------------ |
105
+ | **Cursor** | Global | `~/.cursor/mcp.json` | `%USERPROFILE%\.cursor\mcp.json` | `mcpServers` |
106
+ | | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
107
+ | **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
108
+ | **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
109
+ | **Q CLI** | Global | `~/.aws/amazonq/mcp.json` | | `mcpServers` |
110
+
111
+ ##### Manual Configuration
112
+
113
+ ###### Cursor & Windsurf & Q Developer CLI (`mcpServers`)
114
+
115
+ ```json
116
+ {
117
+ "mcpServers": {
118
+ "task-master-ai": {
119
+ "command": "npx",
120
+ "args": ["-y", "task-master-ai"],
121
+ "env": {
122
+ // "TASK_MASTER_TOOLS": "all", // Options: "all", "standard", "core", or comma-separated list of tools
123
+ "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
124
+ "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
125
+ "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
126
+ "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
127
+ "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
128
+ "GROQ_API_KEY": "YOUR_GROQ_KEY_HERE",
129
+ "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
130
+ "XAI_API_KEY": "YOUR_XAI_KEY_HERE",
131
+ "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
132
+ "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
133
+ }
134
+ }
135
+ }
136
+ }
137
+ ```
138
+
139
+ > 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
140
+
141
+ > **Note**: If you see `0 tools enabled` in the MCP settings, restart your editor and check that your API keys are correctly configured.
142
+
143
+ ###### VS Code (`servers` + `type`)
144
+
145
+ ```json
146
+ {
147
+ "servers": {
148
+ "task-master-ai": {
149
+ "command": "npx",
150
+ "args": ["-y", "task-master-ai"],
151
+ "env": {
152
+ // "TASK_MASTER_TOOLS": "all", // Options: "all", "standard", "core", or comma-separated list of tools
153
+ "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
154
+ "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
155
+ "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
156
+ "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
157
+ "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
158
+ "GROQ_API_KEY": "YOUR_GROQ_KEY_HERE",
159
+ "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
160
+ "XAI_API_KEY": "YOUR_XAI_KEY_HERE",
161
+ "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
162
+ "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
163
+ },
164
+ "type": "stdio"
165
+ }
166
+ }
167
+ }
168
+ ```
169
+
170
+ > 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
171
+
172
+ #### 2. (Cursor-only) Enable Taskmaster MCP
173
+
174
+ Open Cursor Settings (Ctrl+Shift+J) ➡ Click on MCP tab on the left ➡ Enable task-master-ai with the toggle
175
+
176
+ #### 3. (Optional) Configure the models you want to use
177
+
178
+ In your editor's AI chat pane, say:
179
+
180
+ ```txt
181
+ Change the main, research and fallback models to <model_name>, <model_name> and <model_name> respectively.
182
+ ```
183
+
184
+ For example, to use Claude Code (no API key required):
185
+ ```txt
186
+ Change the main model to claude-code/sonnet
187
+ ```
188
+
189
+ [Table of available models](docs/models.md) | [Claude Code setup](docs/examples/claude-code-usage.md)
190
+
191
+ #### 4. Initialize Task Master
192
+
193
+ In your editor's AI chat pane, say:
194
+
195
+ ```txt
196
+ Initialize taskmaster-ai in my project
197
+ ```
198
+
199
+ #### 5. Make sure you have a PRD (Recommended)
200
+
201
+ For **new projects**: Create your PRD at `.taskmaster/docs/prd.txt`.
202
+ For **existing projects**: You can use `scripts/prd.txt` or migrate with `task-master migrate`
203
+
204
+ An example PRD template is available after initialization in `.taskmaster/templates/example_prd.txt`.
205
+
206
+ > [!NOTE]
207
+ > While a PRD is recommended for complex projects, you can always create individual tasks by asking "Can you help me implement [description of what you want to do]?" in chat.
208
+
209
+ **Always start with a detailed PRD.**
210
+
211
+ The more detailed your PRD, the better the generated tasks will be.
212
+
213
+ #### 6. Common Commands
214
+
215
+ Use your AI assistant to:
216
+
217
+ - Parse requirements: `Can you parse my PRD at scripts/prd.txt?`
218
+ - Plan next step: `What's the next task I should work on?`
219
+ - Implement a task: `Can you help me implement task 3?`
220
+ - View multiple tasks: `Can you show me tasks 1, 3, and 5?`
221
+ - Expand a task: `Can you help me expand task 4?`
222
+ - **Research fresh information**: `Research the latest best practices for implementing JWT authentication with Node.js`
223
+ - **Research with context**: `Research React Query v5 migration strategies for our current API implementation in src/api.js`
224
+
225
+ [More examples on how to use Task Master in chat](docs/examples.md)
226
+
227
+ ### Option 2: Using Command Line
228
+
229
+ #### Installation
230
+
231
+ ```bash
232
+ # Install globally
233
+ npm install -g task-master-ai
234
+
235
+ # OR install locally within your project
236
+ npm install task-master-ai
237
+ ```
238
+
239
+ #### Initialize a new project
240
+
241
+ ```bash
242
+ # If installed globally
243
+ task-master init
244
+
245
+ # If installed locally
246
+ npx task-master init
247
+
248
+ # Initialize project with specific rules
249
+ task-master init --rules cursor,windsurf,vscode
250
+ ```
251
+
252
+ This will prompt you for project details and set up a new project with the necessary files and structure.
253
+
254
+ #### Common Commands
255
+
256
+ ```bash
257
+ # Initialize a new project
258
+ task-master init
259
+
260
+ # Parse a PRD and generate tasks
261
+ task-master parse-prd your-prd.txt
262
+
263
+ # List all tasks
264
+ task-master list
265
+
266
+ # Show the next task to work on
267
+ task-master next
268
+
269
+ # Show specific task(s) - supports comma-separated IDs
270
+ task-master show 1,3,5
271
+
272
+ # Research fresh information with project context
273
+ task-master research "What are the latest best practices for JWT authentication?"
274
+
275
+ # Move tasks between tags (cross-tag movement)
276
+ task-master move --from=5 --from-tag=backlog --to-tag=in-progress
277
+ task-master move --from=5,6,7 --from-tag=backlog --to-tag=done --with-dependencies
278
+ task-master move --from=5 --from-tag=backlog --to-tag=in-progress --ignore-dependencies
279
+
280
+ # Add rules after initialization
281
+ task-master rules add windsurf,roo,vscode
282
+ ```
283
+
284
+ ## Tool Loading Configuration
285
+
286
+ ### Optimizing MCP Tool Loading
287
+
288
+ Task Master's MCP server supports selective tool loading to reduce context window usage. By default, all 36 tools are loaded (~21,000 tokens) to maintain backward compatibility with existing installations.
289
+
290
+ You can optimize performance by configuring the `TASK_MASTER_TOOLS` environment variable:
291
+
292
+ ### Available Modes
293
+
294
+ | Mode | Tools | Context Usage | Use Case |
295
+ |------|-------|--------------|----------|
296
+ | `all` (default) | 36 | ~21,000 tokens | Complete feature set - all tools available |
297
+ | `standard` | 15 | ~10,000 tokens | Common task management operations |
298
+ | `core` (or `lean`) | 7 | ~5,000 tokens | Essential daily development workflow |
299
+ | `custom` | Variable | Variable | Comma-separated list of specific tools |
300
+
301
+ ### Configuration Methods
302
+
303
+ #### Method 1: Environment Variable in MCP Configuration
304
+
305
+ Add `TASK_MASTER_TOOLS` to your MCP configuration file's `env` section:
306
+
307
+ ```jsonc
308
+ {
309
+ "mcpServers": { // or "servers" for VS Code
310
+ "task-master-ai": {
311
+ "command": "npx",
312
+ "args": ["-y", "task-master-ai"],
313
+ "env": {
314
+ "TASK_MASTER_TOOLS": "standard", // Options: "all", "standard", "core", "lean", or comma-separated list
315
+ "ANTHROPIC_API_KEY": "your-key-here",
316
+ // ... other API keys
317
+ }
318
+ }
319
+ }
320
+ }
321
+ ```
322
+
323
+ #### Method 2: Claude Code CLI (One-Time Setup)
324
+
325
+ For Claude Code users, you can set the mode during installation:
326
+
327
+ ```bash
328
+ # Core mode example (~70% token reduction)
329
+ claude mcp add task-master-ai --scope user \
330
+ --env TASK_MASTER_TOOLS="core" \
331
+ -- npx -y task-master-ai@latest
332
+
333
+ # Custom tools example
334
+ claude mcp add task-master-ai --scope user \
335
+ --env TASK_MASTER_TOOLS="get_tasks,next_task,set_task_status" \
336
+ -- npx -y task-master-ai@latest
337
+ ```
338
+
339
+ ### Tool Sets Details
340
+
341
+ **Core Tools (7):** `get_tasks`, `next_task`, `get_task`, `set_task_status`, `update_subtask`, `parse_prd`, `expand_task`
342
+
343
+ **Standard Tools (15):** All core tools plus `initialize_project`, `analyze_project_complexity`, `expand_all`, `add_subtask`, `remove_task`, `generate`, `add_task`, `complexity_report`
344
+
345
+ **All Tools (36):** Complete set including project setup, task management, analysis, dependencies, tags, research, and more
346
+
347
+ ### Recommendations
348
+
349
+ - **New users**: Start with `"standard"` mode for a good balance
350
+ - **Large projects**: Use `"core"` mode to minimize token usage
351
+ - **Complex workflows**: Use `"all"` mode or custom selection
352
+ - **Backward compatibility**: If not specified, defaults to `"all"` mode
353
+
354
+ ## Claude Code Support
355
+
356
+ Task Master now supports Claude models through the Claude Code CLI, which requires no API key:
357
+
358
+ - **Models**: `claude-code/opus` and `claude-code/sonnet`
359
+ - **Requirements**: Claude Code CLI installed
360
+ - **Benefits**: No API key needed, uses your local Claude instance
361
+
362
+ [Learn more about Claude Code setup](docs/examples/claude-code-usage.md)
363
+
364
+ ## Troubleshooting
365
+
366
+ ### If `task-master init` doesn't respond
367
+
368
+ Try running it with Node directly:
369
+
370
+ ```bash
371
+ node node_modules/claude-task-master/scripts/init.js
372
+ ```
373
+
374
+ Or clone the repository and run:
375
+
376
+ ```bash
377
+ git clone https://github.com/eyaltoledano/claude-task-master.git
378
+ cd claude-task-master
379
+ node scripts/init.js
380
+ ```
381
+
382
+ ## Join Our Team
383
+
384
+ <a href="https://tryhamster.com" target="_blank">
385
+ <img src="./images/hamster-hiring.png" alt="Join Hamster's founding team" />
386
+ </a>
387
+
388
+ ## Contributors
389
+
390
+ <a href="https://github.com/eyaltoledano/claude-task-master/graphs/contributors">
391
+ <img src="https://contrib.rocks/image?repo=eyaltoledano/claude-task-master" alt="Task Master project contributors" />
392
+ </a>
393
+
394
+ ## Star History
395
+
396
+ [![Star History Chart](https://api.star-history.com/svg?repos=eyaltoledano/claude-task-master&type=Timeline)](https://www.star-history.com/#eyaltoledano/claude-task-master&Timeline)
397
+
398
+ ## Licensing
399
+
400
+ Task Master is licensed under the MIT License with Commons Clause. This means you can:
401
+
402
+ ✅ **Allowed**:
403
+
404
+ - Use Task Master for any purpose (personal, commercial, academic)
405
+ - Modify the code
406
+ - Distribute copies
407
+ - Create and sell products built using Task Master
408
+
409
+ ❌ **Not Allowed**:
410
+
411
+ - Sell Task Master itself
412
+ - Offer Task Master as a hosted service
413
+ - Create competing products based on Task Master
414
+
415
+ See the [LICENSE](LICENSE) file for the complete license text and [licensing details](docs/licensing.md) for more information.
@@ -0,0 +1,7 @@
1
+ import{A as e,C as t,D as n,Et as r,F as i,H as a,I as o,J as s,M as c,N as l,P as u,S as d,T as f,_ as p,a as m,b as h,d as g,ht as ee,j as _,l as v,o as te,p as y,s as ne,ut as b,v as x,yt as S}from"./config-manager-cjltSxIS.js";import{n as C,t as w}from"./sentry-CBAZ4LSk.js";import{createRequire as T}from"node:module";import{promises as E}from"fs";import{join as D}from"path";import{homedir as O}from"os";import{execSync as k,spawn as A}from"child_process";import*as j from"ai";import{jsonrepair as M}from"jsonrepair";import{EnvHttpProxyAgent as N}from"undici";import{createAnthropic as P}from"@ai-sdk/anthropic";import{createPerplexity as F}from"@ai-sdk/perplexity";import{createGoogleGenerativeAI as re}from"@ai-sdk/google";import{createOpenAI as ie}from"@ai-sdk/openai";import{createXai as ae}from"@ai-sdk/xai";import{createGroq as oe}from"@ai-sdk/groq";import{createOpenRouter as se}from"@openrouter/ai-sdk-provider";import{createOllama as ce}from"ollama-ai-provider-v2";import{createAmazonBedrock as le}from"@ai-sdk/amazon-bedrock";import{fromNodeProviderChain as ue}from"@aws-sdk/credential-providers";import{createAzure as de}from"@ai-sdk/azure";import{createVertex as fe}from"@ai-sdk/google-vertex";import{createClaudeCode as pe}from"ai-sdk-provider-claude-code";import{createGeminiProvider as me}from"ai-sdk-provider-gemini-cli";import{APICallError as I,LoadAPIKeyError as he,NoSuchModelError as L}from"@ai-sdk/provider";import{generateId as R}from"@ai-sdk/provider-utils";import{parse as ge}from"jsonc-parser";import{createCodexCli as _e}from"ai-sdk-provider-codex-cli";import{createOpenAICompatible as ve}from"@ai-sdk/openai-compatible";var ye=Object.defineProperty,be=e=>{let t={};for(var n in e)ye(t,n,{get:e[n],enumerable:!0});return t},z=T(import.meta.url);let B=null;var V=class e{constructor(){this._providers=new Map,this._initialized=!1}static getInstance(){return B||=new e,B}initialize(){return this._initialized||=!0,this}registerProvider(e,t,n={}){if(!e||typeof e!=`string`)throw Error(`Provider name must be a non-empty string`);if(!t)throw Error(`Provider instance is required`);if(typeof t.generateText!=`function`||typeof t.streamText!=`function`||typeof t.generateObject!=`function`)throw Error(`Provider must implement BaseAIProvider interface`);return this._providers.set(e,{instance:t,options:n,registeredAt:new Date}),this}hasProvider(e){return this._providers.has(e)}getProvider(e){let t=this._providers.get(e);return t?t.instance:null}getAllProviders(){return new Map(this._providers)}unregisterProvider(e){return this._providers.has(e)?(this._providers.delete(e),!0):!1}reset(){this._providers.clear(),this._initialized=!1}};V.getInstance().initialize();var H=V;const{JSONParseError:xe,NoObjectGeneratedError:Se,generateObject:Ce,generateText:we,streamObject:Te,streamText:Ee,zodSchema:De}=j,U=j.jsonSchema,Oe=new Set([`minimum`,`maximum`,`exclusiveMinimum`,`exclusiveMaximum`]),ke=[`additionalProperties`,`contains`,`if`,`then`,`else`,`not`,`propertyNames`],Ae=[`allOf`,`anyOf`,`oneOf`,`prefixItems`],je=[`definitions`,`$defs`,`dependentSchemas`,`patternProperties`,`properties`],Me=e=>e?Array.isArray(e)?e.includes(`integer`):e===`integer`:!1,W=e=>{if(!e||typeof e!=`object`)return e;if(Array.isArray(e))return e.map(W);let t={...e};if(Me(t.type))for(let e of Oe)e in t&&delete t[e];for(let e of ke)t[e]&&(t[e]=W(t[e]));for(let e of Ae)Array.isArray(t[e])&&(t[e]=t[e].map(W));for(let e of je)if(t[e]&&typeof t[e]==`object`){let n={};for(let[r,i]of Object.entries(t[e]))n[r]=W(i);t[e]=n}return t.items&&=W(t.items),t},G=e=>{let t=De(e);if(!t||typeof t!=`object`||!t.jsonSchema)return t;let n=W(t.jsonSchema);return typeof U==`function`?U(n,{validate:t.validate}):{...t,jsonSchema:n}};var K=class e{constructor(){if(this.constructor===e)throw Error(`BaseAIProvider cannot be instantiated directly`);this.name=this.constructor.name,this._proxyAgent=null,this.needsExplicitJsonSchema=!1,this.supportsTemperature=!0}validateAuth(e){if(!e.apiKey)throw Error(`${this.name} API key is required`)}createProxyFetch(){this._projectRoot||=b();let e=this._projectRoot;if(a(null,e))return this._proxyAgent||=new N,(e,t={})=>fetch(e,{...t,dispatcher:this._proxyAgent})}validateParams(e){if(this.validateAuth(e),!e.modelId)throw Error(`${this.name} Model ID is required`);this.validateOptionalParams(e)}validateOptionalParams(e){if(e.temperature!==void 0&&(e.temperature<0||e.temperature>1))throw Error(`Temperature must be between 0 and 1`);if(e.maxTokens!==void 0){let t=Number(e.maxTokens);if(!Number.isFinite(t)||t<=0)throw Error(`maxTokens must be a finite number greater than 0`)}}validateMessages(e){if(!e||!Array.isArray(e)||e.length===0)throw Error(`Invalid or empty messages array provided`);for(let t of e)if(!t.role||!t.content)throw Error(`Invalid message format. Each message must have role and content`)}handleError(e,t){let n=t.message||`Unknown error occurred`;throw S(`error`,`${this.name} ${e} failed: ${n}`,{error:t}),Error(`${this.name} API error during ${e}: ${n}`)}getClient(e){throw Error(`getClient must be implemented by provider`)}isRequiredApiKey(){return!0}getRequiredApiKeyName(){throw Error(`getRequiredApiKeyName must be implemented by provider`)}prepareTokenParam(e,t){return t===void 0?{}:{maxOutputTokens:Math.floor(Number(t))}}async generateText(e){try{this.validateParams(e),this.validateMessages(e.messages),S(`debug`,`Generating ${this.name} text with model: ${e.modelId}`);let t=await this.getClient(e),n=e.commandName||`unknown`,r=w(`${this.name}.${e.modelId}.${n}.generateText`,{command:n,outputType:e.outputType,tag:e.tag,projectHash:C(e.projectRoot),userId:e.userId,briefId:e.briefId}),i=await we({model:t(e.modelId),messages:e.messages,...this.prepareTokenParam(e.modelId,e.maxTokens),...this.supportsTemperature&&e.temperature!==void 0?{temperature:e.temperature}:{},...r&&{experimental_telemetry:r}});S(`debug`,`${this.name} generateText completed successfully for model: ${e.modelId}`);let a=i.usage?.inputTokens??i.usage?.promptTokens??0,o=i.usage?.outputTokens??i.usage?.completionTokens??0,s=i.usage?.totalTokens??a+o;return{text:i.text,usage:{inputTokens:a,outputTokens:o,totalTokens:s}}}catch(e){this.handleError(`text generation`,e)}}async streamText(e){try{this.validateParams(e),this.validateMessages(e.messages),S(`debug`,`Streaming ${this.name} text with model: ${e.modelId}`);let t=await this.getClient(e),n=e.commandName||`unknown`,r=w(`${this.name}.${e.modelId}.${n}.streamText`,{command:n,outputType:e.outputType,tag:e.tag,projectHash:C(e.projectRoot),userId:e.userId,briefId:e.briefId}),i=await Ee({model:t(e.modelId),messages:e.messages,...this.prepareTokenParam(e.modelId,e.maxTokens),...this.supportsTemperature&&e.temperature!==void 0?{temperature:e.temperature}:{},...r&&{experimental_telemetry:r},...e.experimental_transform&&{experimental_transform:e.experimental_transform}});return S(`debug`,`${this.name} streamText initiated successfully for model: ${e.modelId}`),i}catch(e){this.handleError(`text streaming`,e)}}async streamObject(e){try{if(this.validateParams(e),this.validateMessages(e.messages),!e.schema)throw Error(`Schema is required for object streaming`);S(`debug`,`Streaming ${this.name} object with model: ${e.modelId}`);let t=await this.getClient(e),n=e.commandName||`unknown`,r=w(`${this.name}.${e.modelId}.${n}.streamObject`,{command:n,outputType:e.outputType,tag:e.tag,projectHash:C(e.projectRoot),userId:e.userId,briefId:e.briefId}),i=G(e.schema),a=await Te({model:t(e.modelId),messages:e.messages,schema:i,mode:e.mode||`auto`,maxOutputTokens:e.maxTokens,...this.supportsTemperature&&e.temperature!==void 0?{temperature:e.temperature}:{},...r&&{experimental_telemetry:r}});return S(`debug`,`${this.name} streamObject initiated successfully for model: ${e.modelId}`),a}catch(e){this.handleError(`object streaming`,e)}}async generateObject(e){try{if(this.validateParams(e),this.validateMessages(e.messages),!e.schema)throw Error(`Schema is required for object generation`);if(!e.objectName)throw Error(`Object name is required for object generation`);S(`debug`,`Generating ${this.name} object ('${e.objectName}') with model: ${e.modelId}`);let t=await this.getClient(e),n=e.commandName||`unknown`,r=w(`${this.name}.${e.modelId}.${n}.generateObject.${e.objectName}`,{command:n,outputType:e.outputType,tag:e.tag,projectHash:C(e.projectRoot),userId:e.userId,briefId:e.briefId}),i=G(e.schema),a=await Ce({model:t(e.modelId),messages:e.messages,schema:i,mode:this.needsExplicitJsonSchema?`json`:`auto`,schemaName:e.objectName,schemaDescription:`Generate a valid JSON object for ${e.objectName}`,maxTokens:e.maxTokens,...this.supportsTemperature&&e.temperature!==void 0?{temperature:e.temperature}:{},...r&&{experimental_telemetry:r}});S(`debug`,`${this.name} generateObject completed successfully for model: ${e.modelId}`);let o=a.usage?.inputTokens??a.usage?.promptTokens??0,s=a.usage?.outputTokens??a.usage?.completionTokens??0,c=a.usage?.totalTokens??o+s;return{object:a.object,usage:{inputTokens:o,outputTokens:s,totalTokens:c}}}catch(e){if(Se.isInstance(e)&&e.cause instanceof xe&&e.cause.text){S(`warn`,`${this.name} generated malformed JSON, attempting to repair...`);try{let t=M(e.cause.text),n=JSON.parse(t);return S(`info`,`Successfully repaired ${this.name} JSON output`),{object:n,usage:{inputTokens:e.usage?.promptTokens||e.usage?.inputTokens||0,outputTokens:e.usage?.completionTokens||e.usage?.outputTokens||0,totalTokens:e.usage?.totalTokens||0}}}catch(e){S(`error`,`Failed to repair ${this.name} JSON: ${e.message}`)}}this.handleError(`object generation`,e)}}},Ne=class extends K{constructor(){super(),this.name=`Anthropic`}getRequiredApiKeyName(){return`ANTHROPIC_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e,r=this.createProxyFetch();return P({apiKey:t,...n&&{baseURL:n},headers:{"anthropic-beta":`output-128k-2025-02-19`},...r&&{fetch:r}})}catch(e){this.handleError(`client initialization`,e)}}},Pe=class extends K{constructor(){super(),this.name=`Perplexity`}getRequiredApiKeyName(){return`PERPLEXITY_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e,r=this.createProxyFetch();return F({apiKey:t,baseURL:n||`https://api.perplexity.ai`,...r&&{fetch:r}})}catch(e){this.handleError(`client initialization`,e)}}async generateObject(e){return super.generateObject({...e,mode:`json`})}},Fe=class extends K{constructor(){super(),this.name=`Google`}getRequiredApiKeyName(){return`GOOGLE_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e,r=this.createProxyFetch();return re({apiKey:t,...n&&{baseURL:n},...r&&{fetch:r}})}catch(e){this.handleError(`client initialization`,e)}}},Ie=class extends K{constructor(){super(),this.name=`OpenAI`}getRequiredApiKeyName(){return`OPENAI_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e,r=this.createProxyFetch();return ie({apiKey:t,...n&&{baseURL:n},...r&&{fetch:r}})}catch(e){this.handleError(`client initialization`,e)}}},Le=class extends K{constructor(){super(),this.name=`xAI`}getRequiredApiKeyName(){return`XAI_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e;return ae({apiKey:t,baseURL:n||`https://api.x.ai/v1`})}catch(e){this.handleError(`client initialization`,e)}}},Re=class extends K{constructor(){super(),this.name=`Groq`}getRequiredApiKeyName(){return`GROQ_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e;return oe({apiKey:t,...n&&{baseURL:n}})}catch(e){this.handleError(`client initialization`,e)}}},ze=class extends K{constructor(){super(),this.name=`OpenRouter`}getRequiredApiKeyName(){return`OPENROUTER_API_KEY`}getClient(e){try{let{apiKey:t,baseURL:n}=e;return se({apiKey:t,...n&&{baseURL:n}})}catch(e){this.handleError(`client initialization`,e)}}},Be=class extends K{constructor(){super(),this.name=`Ollama`}validateAuth(e){}getClient(e){try{let{baseURL:t}=e;return ce({...t&&{baseURL:t}})}catch(e){this.handleError(`client initialization`,e)}}isRequiredApiKey(){return!1}getRequiredApiKeyName(){return`OLLAMA_API_KEY`}},Ve=class extends K{constructor(){super(),this.name=`Bedrock`}isRequiredApiKey(){return!1}getRequiredApiKeyName(){return`AWS_ACCESS_KEY_ID`}validateAuth(e){}getClient(e){try{let e=ue(),t=this.createProxyFetch();return le({credentialProvider:e,...t&&{fetch:t}})}catch(e){this.handleError(`client initialization`,e)}}},He=class extends K{constructor(){super(),this.name=`Azure OpenAI`}getRequiredApiKeyName(){return`AZURE_OPENAI_API_KEY`}validateAuth(e){if(!e.apiKey)throw Error(`Azure API key is required`);if(!e.baseURL)throw Error(`Azure endpoint URL is required. Set it in .taskmasterconfig global.azureBaseURL or models.[role].baseURL`)}normalizeBaseURL(e){if(!e)return e;try{let t=new URL(e),n=t.pathname.replace(/\/+$/,``);return n.endsWith(`/openai`)||(n=`${n}/openai`),t.pathname=n,t.toString()}catch{let t=e.replace(/\/+$/,``);return t.endsWith(`/openai`)?t:`${t}/openai`}}getClient(e){try{let{apiKey:t,baseURL:n}=e,r=this.normalizeBaseURL(n),i=this.createProxyFetch();return de({apiKey:t,baseURL:r,...i&&{fetch:i}})}catch(e){this.handleError(`client initialization`,e)}}},q=class extends Error{constructor(e){super(e),this.name=`VertexAuthError`,this.code=`vertex_auth_error`}},J=class extends Error{constructor(e){super(e),this.name=`VertexConfigError`,this.code=`vertex_config_error`}},Ue=class extends Error{constructor(e,t){super(e),this.name=`VertexApiError`,this.code=`vertex_api_error`,this.statusCode=t}},We=class extends K{constructor(){super(),this.name=`Google Vertex AI`}getRequiredApiKeyName(){return`GOOGLE_API_KEY`}isRequiredApiKey(){return!1}isAuthenticationRequired(){return!0}isValidCredential(e){return e?typeof e==`string`?e.trim().length>0:typeof e==`object`:!1}validateAuth(e){let{apiKey:t,projectId:n,location:r,credentials:i}=e,a=this.isValidCredential(t),o=this.isValidCredential(i);if(!a&&!o)throw new q(`Vertex AI requires authentication. Provide one of the following:
2
+ • GOOGLE_API_KEY environment variable (typical for API-based auth), OR
3
+ • GOOGLE_APPLICATION_CREDENTIALS pointing to a service account JSON file (recommended for production)`);if(!n||typeof n==`string`&&n.trim().length===0)throw new J(`Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.`);if(!r||typeof r==`string`&&r.trim().length===0)throw new J(`Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., "us-central1").`)}getClient(e){try{let{apiKey:t,projectId:n,location:r,credentials:i,baseURL:a}=e,o=this.createProxyFetch(),s={};return t?s.googleAuthOptions={...i,apiKey:t}:i&&(s.googleAuthOptions=i),fe({...s,project:n,location:r,...a&&{baseURL:a},...o&&{fetch:o}})}catch(e){this.handleError(`client initialization`,e)}}handleError(e,t){if(S(`error`,`Vertex AI ${e} error:`,t),t.name===`VertexAuthError`||t.name===`VertexConfigError`||t.name===`VertexApiError`)throw t;if(t.response){let e=t.response.status,n=t.response.data?.error?.message||t.message;throw e===401||e===403?new q(`Authentication failed: ${n}`):e===400?new J(`Invalid request: ${n}`):new Ue(`API error (${e}): ${n}`,e)}throw Error(`Vertex AI ${e} failed: ${t.message}`)}};let Y=!1;var Ge=class extends K{constructor(){super(),this.name=`Claude Code`,this.supportedModels=l(`claude-code`),this.supportedModels.length===0&&S(`warn`,`No supported models found for claude-code provider. Check supported-models.json configuration.`),this.needsExplicitJsonSchema=!0,this.supportsTemperature=!1}getRequiredApiKeyName(){return`CLAUDE_CODE_API_KEY`}isRequiredApiKey(){return!1}validateAuth(e){if(process.env.NODE_ENV!==`test`&&!Y&&!process.env.CLAUDE_CODE_OAUTH_TOKEN)try{k(`claude --version`,{stdio:`pipe`,timeout:1e3})}catch{S(`warn`,`Claude Code CLI not detected. Install it with: npm install -g @anthropic-ai/claude-code`)}finally{Y=!0}}getClient(e={}){try{let t=v(e.commandName)||{},n=process.env.ANTHROPIC_API_KEY,r=process.env.CLAUDE_CODE_API_KEY;try{return r?process.env.ANTHROPIC_API_KEY=r:n&&delete process.env.ANTHROPIC_API_KEY,pe({defaultSettings:{systemPrompt:{type:`preset`,preset:`claude_code`},settingSources:[`user`,`project`,`local`],...t}})}finally{n?process.env.ANTHROPIC_API_KEY=n:delete process.env.ANTHROPIC_API_KEY}}catch(e){let t=String(e?.message||``);if(e?.code===`ENOENT`||/claude/i.test(t)){let t=Error(`Claude Code CLI not available. Please install Claude Code CLI first. Original error: ${e.message}`);t.cause=e,this.handleError(`Claude Code CLI initialization`,t)}else this.handleError(`client initialization`,e)}}getSupportedModels(){return this.supportedModels}isModelSupported(e){return e?this.supportedModels.includes(String(e).toLowerCase()):!1}},Ke=class extends K{constructor(){super(),this.name=`Gemini CLI`,this.supportsTemperature=!1}validateAuth(e){}async getClient(e){try{let t={};return t=e.apiKey&&e.apiKey!==`gemini-cli-no-key-required`?{authType:`api-key`,apiKey:e.apiKey}:{authType:`oauth-personal`},e.baseURL&&(t.baseURL=e.baseURL),me(t)}catch(e){this.handleError(`client initialization`,e)}}getRequiredApiKeyName(){return`GEMINI_API_KEY`}isRequiredApiKey(){return!1}};function X({message:e,code:t,exitCode:n,stderr:r,stdout:i,promptExcerpt:a,isRetryable:o=!1}){return new I({message:e,isRetryable:o,url:`grok-cli://command`,requestBodyValues:a?{prompt:a}:void 0,data:{code:t,exitCode:n,stderr:r,stdout:i,promptExcerpt:a}})}function Z({message:e}){return new he({message:e||`Authentication failed. Please ensure Grok CLI is properly configured with API key.`})}function qe({message:e,promptExcerpt:t,timeoutMs:n}){return new I({message:e,isRetryable:!0,url:`grok-cli://command`,requestBodyValues:t?{prompt:t}:void 0,data:{code:`TIMEOUT`,promptExcerpt:t,timeoutMs:n}})}function Je({message:e}){return new I({message:e||`Grok CLI is not installed or not found in PATH. Please install with: npm install -g @vibe-kit/grok-cli`,isRetryable:!1,url:`grok-cli://installation`,requestBodyValues:void 0})}function Ye(e){let t=e.trim(),n=/```(?:json)?\s*([\s\S]*?)\s*```/i.exec(t);n&&(t=n[1]);let r=/^\s*(?:const|let|var)\s+\w+\s*=\s*([\s\S]*)/i.exec(t);r&&(t=r[1],t.trim().endsWith(`;`)&&(t=t.trim().slice(0,-1)));let i=t.indexOf(`{`),a=t.indexOf(`[`);if(i===-1&&a===-1)return e;let o=a===-1?i:i===-1?a:Math.min(i,a);t=t.slice(o);let s=e=>{let t=[];try{let n=ge(e,t,{allowTrailingComma:!0});if(t.length===0)return JSON.stringify(n,null,2)}catch{}},c=s(t);if(c!==void 0)return c;let l=t[0],u=l===`{`?`}`:`]`,d=[],f=0,p=!1,m=!1;for(let e=0;e<t.length;e++){let n=t[e];if(m){m=!1;continue}if(n===`\\`){m=!0;continue}if(n===`"`&&!p){p=!0;continue}if(n===`"`&&p){p=!1;continue}p||(n===l?f++:n===u&&(f--,f===0&&d.push(e+1)))}for(let e=d.length-1;e>=0;e--){let n=s(t.slice(0,d[e]));if(n!==void 0)return n}let h=Math.max(0,t.length-1e3);for(let e=t.length-1;e>h;e--){let n=s(t.slice(0,e));if(n!==void 0)return n}return e}function Xe(e){return e.map(e=>{let t=``;return typeof e.content==`string`?t=e.content:Array.isArray(e.content)?t=e.content.filter(e=>e.type===`text`).map(e=>e.text||``).join(`
4
+ `):e.content&&typeof e.content==`object`&&(t=e.content.text||JSON.stringify(e.content)),{role:e.role,content:t.trim()}})}function Ze(e){try{let t=e.trim().split(`
5
+ `).filter(e=>e.trim()),n=[];for(let e of t)try{let t=JSON.parse(e);n.push(t)}catch{continue}let r=n.filter(e=>e.role===`assistant`).pop();return r&&r.content?{text:r.content,usage:r.usage?{promptTokens:r.usage.prompt_tokens||0,completionTokens:r.usage.completion_tokens||0,totalTokens:r.usage.total_tokens||0}:void 0}:{text:e.trim(),usage:void 0}}catch{return{text:e.trim(),usage:void 0}}}function Qe(e){return Xe(e).map(e=>{switch(e.role){case`system`:return`System: ${e.content}`;case`user`:return`User: ${e.content}`;case`assistant`:return`Assistant: ${e.content}`;default:return`${e.role}: ${e.content}`}}).join(`
6
+
7
+ `)}function $e(e){return typeof e!=`string`&&(e=String(e)),`'`+e.replace(/'/g,`'\\''`)+`'`}var et=class{specificationVersion=`v2`;defaultObjectGenerationMode=`json`;supportsImageUrls=!1;supportsStructuredOutputs=!1;supportedUrls={};modelId;settings;constructor(e){if(this.modelId=e.id,this.settings=e.settings??{},!this.modelId||typeof this.modelId!=`string`||this.modelId.trim()===``)throw new L({modelId:this.modelId,modelType:`languageModel`})}get provider(){return`grok-cli`}async checkGrokCliInstallation(){return new Promise(e=>{let t=A(`grok`,[`--version`],{stdio:`pipe`});t.on(`error`,()=>e(!1)),t.on(`exit`,t=>e(t===0))})}async getApiKey(){if(this.settings.apiKey)return this.settings.apiKey;if(process.env.GROK_CLI_API_KEY)return process.env.GROK_CLI_API_KEY;try{let e=D(O(),`.grok`,`user-settings.json`),t=await E.readFile(e,`utf8`);return JSON.parse(t).apiKey||null}catch{return null}}async executeGrokCli(e,t={}){let n=12e4;this.modelId.includes(`grok-4`)&&(n=6e5);let r=t.timeout??this.settings.timeout??n;return new Promise((n,i)=>{let a=A(`grok`,e,{stdio:`pipe`,cwd:this.settings.workingDirectory||process.cwd(),env:t.apiKey===void 0?process.env:{...process.env,GROK_CLI_API_KEY:t.apiKey}}),o=``,s=``,c;r>0&&(c=setTimeout(()=>{a.kill(`SIGTERM`),i(qe({message:`Grok CLI command timed out after ${r}ms`,timeoutMs:r,promptExcerpt:e.join(` `).substring(0,200)}))},r)),a.stdout?.on(`data`,e=>{let t=e.toString();o+=t}),a.stderr?.on(`data`,e=>{let t=e.toString();s+=t}),a.on(`error`,e=>{c&&clearTimeout(c),e.code===`ENOENT`?i(Je({})):i(X({message:`Failed to execute Grok CLI: ${e.message}`,code:e.code,stderr:e.message,isRetryable:!1}))}),a.on(`exit`,e=>{c&&clearTimeout(c),n({stdout:o.trim(),stderr:s.trim(),exitCode:e||0})})})}generateAllWarnings(e,t){let n=[],r=[];if(e.temperature!==void 0&&r.push(`temperature`),e.topP!==void 0&&r.push(`topP`),e.topK!==void 0&&r.push(`topK`),e.presencePenalty!==void 0&&r.push(`presencePenalty`),e.frequencyPenalty!==void 0&&r.push(`frequencyPenalty`),e.stopSequences!==void 0&&e.stopSequences.length>0&&r.push(`stopSequences`),e.seed!==void 0&&r.push(`seed`),r.length>0)for(let e of r)n.push({type:`unsupported-setting`,setting:e,details:`Grok CLI does not support the ${e} parameter. It will be ignored.`});return(!this.modelId||this.modelId.trim()===``)&&n.push({type:`other`,message:`Model ID is empty or invalid`}),(!t||t.trim()===``)&&n.push({type:`other`,message:`Prompt is empty`}),n}async doGenerate(e){if(e.abortSignal?.aborted)throw e.abortSignal.reason||Error(`Request aborted`);if(!await this.checkGrokCliInstallation())throw Je({});let t=await this.getApiKey();if(!t)throw Z({message:`Grok CLI API key not found. Set GROK_CLI_API_KEY environment variable or configure grok-cli.`});let n=Qe(e.prompt),r=this.generateAllWarnings(e,n),i=[`--prompt`,$e(n)];this.modelId&&this.modelId!==`default`&&i.push(`--model`,this.modelId),this.settings.baseURL&&i.push(`--base-url`,this.settings.baseURL),this.settings.workingDirectory&&i.push(`--directory`,this.settings.workingDirectory);try{let a=await this.executeGrokCli(i,{apiKey:t});if(a.exitCode!==0)throw a.stderr.toLowerCase().includes(`unauthorized`)||a.stderr.toLowerCase().includes(`authentication`)?Z({message:`Grok CLI authentication failed: ${a.stderr}`}):X({message:`Grok CLI failed with exit code ${a.exitCode}: ${a.stderr||`Unknown error`}`,exitCode:a.exitCode,stderr:a.stderr,stdout:a.stdout,promptExcerpt:n.substring(0,200),isRetryable:!1});let o=Ze(a.stdout),s=o.text||``;return(e=>!!e&&typeof e==`object`&&`mode`in e&&e.mode?.type===`object-json`)(e)&&s&&(s=Ye(s)),{content:[{type:`text`,text:s||``}],usage:o.usage?{inputTokens:o.usage.promptTokens,outputTokens:o.usage.completionTokens,totalTokens:o.usage.totalTokens}:{inputTokens:0,outputTokens:0,totalTokens:0},finishReason:`stop`,rawCall:{rawPrompt:n,rawSettings:i},warnings:r,response:{id:R(),timestamp:new Date,modelId:this.modelId},request:{body:n},providerMetadata:{"grok-cli":{exitCode:a.exitCode,...a.stderr&&{stderr:a.stderr}}}}}catch(e){throw e.name===`APICallError`||e.name===`LoadAPIKeyError`?e:X({message:`Grok CLI execution failed: ${e.message}`,code:e.code,promptExcerpt:n.substring(0,200),isRetryable:!1})}}async doStream(e){let t=Qe(e.prompt),n=this.generateAllWarnings(e,t);return{stream:new ReadableStream({start:async t=>{let r;try{if(e.abortSignal?.aborted)throw e.abortSignal.reason||Error(`Request aborted`);e.abortSignal&&(r=()=>{t.enqueue({type:`error`,error:e.abortSignal?.reason||Error(`Request aborted`)}),t.close()},e.abortSignal.addEventListener(`abort`,r,{once:!0})),t.enqueue({type:`stream-start`,warnings:n});let i=await this.doGenerate(e);t.enqueue({type:`response-metadata`,id:i.response.id,timestamp:i.response.timestamp,modelId:i.response.modelId});let a=i.content||[],o=a.length>0&&a[0].type===`text`?a[0].text:``,s;o.length>0&&(s=R(),t.enqueue({type:`text-start`,id:s}));for(let n=0;n<o.length;n+=50){if(e.abortSignal?.aborted)throw e.abortSignal.reason||Error(`Request aborted`);let r=o.slice(n,n+50);t.enqueue({type:`text-delta`,id:s,delta:r}),await new Promise(e=>setTimeout(e,20))}s&&t.enqueue({type:`text-end`,id:s}),t.enqueue({type:`finish`,finishReason:i.finishReason,usage:i.usage,providerMetadata:i.providerMetadata}),t.close()}catch(e){t.enqueue({type:`error`,error:e}),t.close()}finally{e.abortSignal&&r&&e.abortSignal.removeEventListener(`abort`,r)}},cancel:()=>{}}),request:{body:t}}}};function tt(e={}){let t=(t,n={})=>new et({id:t,settings:{...e.defaultSettings,...n}}),n=function(e,n){if(new.target)throw Error(`The Grok CLI model function cannot be called with the new keyword.`);return t(e,n)};return n.languageModel=t,n.chat=t,n.textEmbeddingModel=e=>{throw new L({modelId:e,modelType:`textEmbeddingModel`})},n.imageModel=e=>{throw new L({modelId:e,modelType:`imageModel`})},n}tt();var nt=class extends K{constructor(){super(),this.name=`Grok CLI`,this.needsExplicitJsonSchema=!0,this.supportsTemperature=!1}getRequiredApiKeyName(){return`GROK_CLI_API_KEY`}isRequiredApiKey(){return!1}validateAuth(e){}getClient(e){try{let{apiKey:t,baseURL:n,workingDirectory:r,timeout:i,commandName:a}=e,o=h(a);return tt({defaultSettings:{apiKey:t,baseURL:n,workingDirectory:r||o.workingDirectory,timeout:i||o.timeout,defaultModel:o.defaultModel}})}catch(e){this.handleError(`client initialization`,e)}}};const rt={"gpt-5.1":[`none`,`low`,`medium`,`high`],"gpt-5.1-codex-max":[`none`,`low`,`medium`,`high`,`xhigh`],"gpt-5.2":[`none`,`low`,`medium`,`high`,`xhigh`],"gpt-5.3-codex":[`none`,`low`,`medium`,`high`,`xhigh`],"gpt-5.2-pro":[`medium`,`high`,`xhigh`],"gpt-5":[`none`,`low`,`medium`,`high`,`xhigh`]},it=[`none`,`low`,`medium`,`high`],at=[`none`,`low`,`medium`,`high`,`xhigh`];var ot=class extends K{constructor(){super(),this.name=`Codex CLI`,this.needsExplicitJsonSchema=!1,this.supportsTemperature=!1,this.supportedModels=l(`codex-cli`),this.supportedModels.length===0&&S(`warn`,`No supported models found for codex-cli provider. Check supported-models.json configuration.`),this._codexCliChecked=!1,this._codexCliAvailable=null}isRequiredApiKey(){return!1}getRequiredApiKeyName(){return`OPENAI_CODEX_API_KEY`}validateAuth(){if(process.env.NODE_ENV!==`test`&&!this._codexCliChecked)try{k(`codex --version`,{stdio:`pipe`,timeout:1e3}),this._codexCliAvailable=!0}catch{this._codexCliAvailable=!1,S(`warn`,`Codex CLI not detected. Install with: npm i -g @openai/codex or enable fallback with allowNpx.`)}finally{this._codexCliChecked=!0}}_getValidatedReasoningEffort(e,t){let n=rt[e]||it,r=n.reduce((e,t)=>at.indexOf(t)>at.indexOf(e)?t:e,n[0]);return t?n.includes(t)?t:(S(`warn`,`Reasoning effort '${t}' not supported by ${e}. Using '${r}' instead.`),r):(S(`debug`,`No reasoning effort specified for ${e}. Using '${r}'.`),r)}getClient(e={}){try{let t=g(e.commandName)||{},n=this._getValidatedReasoningEffort(e.modelId,t.reasoningEffort);return _e({defaultSettings:{...t,reasoningEffort:n,...e.apiKey?{env:{...t.env||{},OPENAI_API_KEY:e.apiKey}}:{}}})}catch(e){let t=String(e?.message||``);if(e?.code===`ENOENT`||/codex/i.test(t)){let t=Error(`Codex CLI not available. Please install Codex CLI first. Original error: ${e.message}`);t.cause=e,this.handleError(`Codex CLI initialization`,t)}else this.handleError(`client initialization`,e)}}},Q=class extends K{constructor(e){if(super(),!e.name)throw Error(`Provider name is required`);if(!e.apiKeyEnvVar)throw Error(`API key environment variable name is required`);this.name=e.name,this.apiKeyEnvVar=e.apiKeyEnvVar,this.requiresApiKey=e.requiresApiKey!==!1,this.defaultBaseURL=e.defaultBaseURL,this.getBaseURLFromParams=e.getBaseURL,this.supportsStructuredOutputs=e.supportsStructuredOutputs}getRequiredApiKeyName(){return this.apiKeyEnvVar}isRequiredApiKey(){return this.requiresApiKey}validateAuth(e){if(this.requiresApiKey&&!e.apiKey)throw Error(`${this.name} API key is required`)}getBaseURL(e){return e.baseURL?e.baseURL:this.getBaseURLFromParams?this.getBaseURLFromParams(e):this.defaultBaseURL}getClient(e){try{let{apiKey:t}=e,n=this.createProxyFetch(),r=this.getBaseURL(e),i={name:this.name.toLowerCase().replace(/[^a-z0-9]/g,`-`)};return this.requiresApiKey&&t&&(i.apiKey=t),r&&(i.baseURL=r),this.supportsStructuredOutputs!==void 0&&(i.supportsStructuredOutputs=this.supportsStructuredOutputs),n&&(i.fetch=n),ve(i)}catch(e){this.handleError(`client initialization`,e)}}},st=class extends Q{constructor(){super({name:`Z.ai`,apiKeyEnvVar:`ZAI_API_KEY`,requiresApiKey:!0,defaultBaseURL:`https://api.z.ai/api/paas/v4/`,supportsStructuredOutputs:!0})}prepareTokenParam(){return{}}findArrayPropertyInSchema(e){try{let t=e._zod.def;if(!(t?.type===`object`||t?.typeName===`ZodObject`))return null;let n=t.shape;if(typeof n==`function`&&(n=n()),!n||typeof n!=`object`)return null;for(let[e,t]of Object.entries(n)){let n=t._zod.def;if(n?.type===`array`||n?.typeName===`ZodArray`)return e}return null}catch(e){return console.warn(`Failed to introspect Zod schema:`,e.message),null}}async generateObject(e){let t=await super.generateObject(e);if(Array.isArray(t.object)){let n=this.findArrayPropertyInSchema(e.schema);return n?{...t,object:{[n]:t.object}}:(console.warn(`GLM returned a bare array for '${e.objectName}' but could not determine wrapper property from schema. Using objectName as fallback.`),{...t,object:{[e.objectName]:t.object}})}return t}},ct=class extends st{constructor(){super(),this.name=`Z.ai (Coding Plan)`,this.defaultBaseURL=`https://api.z.ai/api/coding/paas/v4/`}},lt=class extends Q{constructor(){super({name:`LM Studio`,apiKeyEnvVar:`LMSTUDIO_API_KEY`,requiresApiKey:!1,defaultBaseURL:`http://localhost:1234/v1`,supportsStructuredOutputs:!0})}};const ut={anthropic:new Ne,perplexity:new Pe,google:new Fe,zai:new st,"zai-coding":new ct,lmstudio:new lt,openai:new Ie,xai:new Le,groq:new Re,openrouter:new ze,ollama:new Be,"openai-compatible":new Q({name:`OpenAI Compatible`,apiKeyEnvVar:`OPENAI_COMPATIBLE_API_KEY`,requiresApiKey:!0}),bedrock:new Ve,azure:new He,vertex:new We,"claude-code":new Ge,"codex-cli":new ot,"gemini-cli":new Ke,"grok-cli":new nt};function dt(e){if(ut[e])return ut[e];let t=H.getInstance();return t.hasProvider(e)?(S(`debug`,`Provider "${e}" found in dynamic registry`),t.getProvider(e)):null}function ft(e,t){let n={inputCost:0,outputCost:0,currency:`USD`,isUnknown:!1};if(!s||!s[e])return S(`warn`,`Provider "${e}" not found in MODEL_MAP. Cannot determine cost for model ${t}.`),{...n,isUnknown:!0};let r=s[e].find(e=>e.id===t);if(!r)return S(`debug`,`Model "${t}" not found under provider "${e}". Assuming unknown cost.`),{...n,isUnknown:!0};if(r.cost_per_1m_tokens===null)return S(`debug`,`Cost data is null for model "${t}" under provider "${e}". Pricing unknown.`),{...n,isUnknown:!0};if(r.cost_per_1m_tokens===void 0)return S(`debug`,`Cost data not found for model "${t}" under provider "${e}". Pricing unknown.`),{...n,isUnknown:!0};let i=r.cost_per_1m_tokens;return{inputCost:i.input||0,outputCost:i.output||0,currency:i.currency||`USD`,isUnknown:!1}}function pt(e,t,n,r){let i=(e||0)/1e6*n+(t||0)/1e6*r;return parseFloat(i.toFixed(6))}function mt(e){let t={currentTag:`master`,availableTags:[`master`]};try{return e?{currentTag:ee(e)||`master`,availableTags:ht(e)}:t}catch(e){return y()&&S(`debug`,`Error getting tag information: ${e.message}`),t}}function ht(e){let t=[`master`];try{let n=z(`path`),r=z(`fs`),i=n.join(e,`.taskmaster`,`tasks`,`tasks.json`);if(!r.existsSync(i))return t;let a=JSON.parse(r.readFileSync(i,`utf8`));if(!a||typeof a!=`object`)return t;let o=Object.keys(a).filter(e=>gt(a[e]));return o.length>0?o:t}catch(e){return y()&&S(`debug`,`Could not read tasks file for available tags: ${e.message}`),t}}function gt(e){return e&&typeof e==`object`&&Array.isArray(e.tasks)}function _t(e){let t=e.message?.toLowerCase()||``;return t.includes(`rate limit`)||t.includes(`overloaded`)||t.includes(`service temporarily unavailable`)||t.includes(`timeout`)||t.includes(`network error`)||e.status===429||e.status>=500}function vt(e){try{if(e?.data?.error?.message)return e.data.error.message;if(e?.error?.message)return e.error.message;if(typeof e?.responseBody==`string`)try{let t=JSON.parse(e.responseBody);if(t?.error?.message)return t.error.message}catch{}return typeof e?.message==`string`&&e.message?e.message:typeof e==`string`?e:`An unknown AI service error occurred.`}catch{return`Failed to extract error message.`}}function yt(n,r){return{main:{provider:t(r),modelId:d(r)},research:{provider:_(r),modelId:e(r)},fallback:{provider:x(r),modelId:p(r)}}[n]||null}function bt(e,t){let n=o(e)||r(`VERTEX_PROJECT_ID`,t,e),a=i(e)||r(`VERTEX_LOCATION`,t,e)||`us-central1`,s=r(`GOOGLE_APPLICATION_CREDENTIALS`,t,e);S(`debug`,`Using Vertex AI configuration: Project ID=${n}, Location=${a}`);let c=s?{keyFile:s}:void 0;return{projectId:n,location:a,...c&&{credentials:c}}}function xt(e,t,n=null){let i=dt(e);if(!i)throw Error(`Unknown provider '${e}' for API key resolution.`);let a=i.getRequiredApiKeyName();if(a===null)return null;let o=r(a,t,n);if(!i.isRequiredApiKey())return o||null;if(!o)throw Error(`Required API key ${a} for provider '${e}' is not set in environment, session, or .env file.`);return o}async function St(e,t,n,r,i,a){let o=0,s=t;for(;o<=2;)try{y()&&S(`info`,`Attempt ${o+1}/3 calling ${s} (Provider: ${r}, Model: ${i}, Role: ${a})`);let c=await e[t](n);return y()&&S(`info`,`${s} succeeded for role ${a} (Provider: ${r}) on attempt ${o+1}`),c}catch(e){if(S(`warn`,`Attempt ${o+1} failed for role ${a} (${s} / ${r}): ${e.message}`),_t(e)&&o<2){o++;let e=1e3*2**(o-1);S(`info`,`Something went wrong on the provider side. Retrying in ${e/1e3}s...`),await new Promise(t=>setTimeout(t,e))}else throw S(`error`,`Something went wrong on the provider side. Max retries reached for role ${a} (${s} / ${r}).`),e}throw Error(`Exhausted all retries for role ${a} (${s} / ${r})`)}async function $(e,t){let{role:r,session:i,projectRoot:a,systemPrompt:o,prompt:s,schema:l,objectName:d,commandName:p,outputType:h,experimental_transform:g,...ee}=t;y()&&S(`info`,`${e}Service called`,{role:r,commandName:p,outputType:h,projectRoot:a});let _=a||b(),v=u(_),x=i?.user?.id||i?.userId,C=i?.context?.briefId||i?.briefId,w;r===`main`?w=[`main`,`fallback`,`research`]:r===`research`?w=[`research`,`fallback`,`main`]:r===`fallback`?w=[`fallback`,`main`,`research`]:(S(`warn`,`Unknown initial role: ${r}. Defaulting to main -> fallback -> research sequence.`),w=[`main`,`fallback`,`research`]);let T=null,E=`AI service call failed for all configured roles.`;for(let t of w){let r,u,y,b,w,D,O,k=null;try{S(`debug`,`New AI service call with role: ${t}`);let E=yt(t,_);if(!E){S(`error`,`Unknown role encountered in _unifiedServiceRunner: ${t}`),T||=Error(`Unknown AI role specified: ${t}`);continue}if(r=E.provider,u=E.modelId,!r||!u){S(`warn`,`Skipping role '${t}': Provider or Model ID not configured.`),T||=Error(`Configuration missing for role '${t}'. Provider: ${r}, Model: ${u}`);continue}if(w=dt(r?.toLowerCase()),!w){S(`warn`,`Skipping role '${t}': Provider '${r}' not supported.`),T||=Error(`Unsupported provider configured: ${r}`);continue}D=te(t,_),r?.toLowerCase()===`azure`&&!D?(D=m(_),S(`debug`,`Using global Azure base URL: ${D}`)):r?.toLowerCase()===`ollama`&&!D?(D=f(_),S(`debug`,`Using global Ollama base URL: ${D}`)):r?.toLowerCase()===`bedrock`&&!D&&(D=ne(_),S(`debug`,`Using global Bedrock base URL: ${D}`)),b=n(t,_),y=xt(r?.toLowerCase(),i,_);let A={};r?.toLowerCase()===`vertex`&&(A=bt(_,i));let j=[],M=`${o} \n\n Always respond in ${c(_)}.`;if(j.push({role:`system`,content:M.trim()}),s)j.push({role:`user`,content:s});else throw Error(`User prompt content is missing.`);let N={apiKey:y,modelId:u,maxTokens:b.maxTokens,temperature:b.temperature,messages:j,...D&&{baseURL:D},...(e===`generateObject`||e===`streamObject`)&&{schema:l,objectName:d},...p&&{commandName:p},...h&&{outputType:h},...a&&{projectRoot:a},...x&&{userId:x},...C&&{briefId:C},...g&&{experimental_transform:g},...A,...ee};if(O=await St(w,e,N,r,u,t),v&&O&&O.usage)try{k=await Dt({userId:v,commandName:p,providerName:r,modelId:u,inputTokens:O.usage.inputTokens,outputTokens:O.usage.outputTokens,outputType:h})}catch{}else v&&O&&!O.usage&&S(`warn`,`Cannot log telemetry for ${p} (${r}/${u}): AI result missing 'usage' data. (May be expected for streams)`);let P;e===`generateText`?P=O.text:e===`generateObject`?P=O.object:(e===`streamText`||e===`streamObject`||S(`error`,`Unknown serviceType in _unifiedServiceRunner: ${e}`),P=O);let F=mt(_);return{mainResult:P,telemetryData:k,tagInfo:F,providerName:r,modelId:u}}catch(n){let i=vt(n);if(S(`error`,`Service call failed for role ${t} (Provider: ${r||`unknown`}, Model: ${u||`unknown`}): ${i}`),T=n,E=i,e===`generateObject`){let e=i.toLowerCase();if(e.includes(`no endpoints found that support tool use`)||e.includes(`does not support tool_use`)||e.includes(`tool use is not supported`)||e.includes(`tools are not supported`)||e.includes(`function calling is not supported`)||e.includes(`tool use is not supported`)){let e=`Model '${u||`unknown`}' via provider '${r||`unknown`}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${t}' role, or use generateTextService if structured output is not strictly required.`;throw S(`error`,`[Tool Support Error] ${e}`),Error(e)}}}}throw S(`error`,`All roles in the sequence [${w.join(`, `)}] failed.`),Error(E)}async function Ct(e){return $(`generateText`,{outputType:`cli`,...e})}async function wt(e){return $(`streamText`,{outputType:`cli`,...e})}async function Tt(e){let t={outputType:`cli`,...e};if(!t.schema)throw Error(`streamObjectService requires a schema parameter`);return $(`streamObject`,t)}async function Et(e){return $(`generateObject`,{objectName:`generated_object`,maxRetries:3,outputType:`cli`,...e})}async function Dt({userId:e,commandName:t,providerName:n,modelId:r,inputTokens:i,outputTokens:a,outputType:o}){try{let o=new Date().toISOString(),s=(i||0)+(a||0),{inputCost:c,outputCost:l,currency:u,isUnknown:d}=ft(n,r),f=pt(i,a,c,l),p={timestamp:o,userId:e,commandName:t,modelUsed:r,providerName:n,inputTokens:i||0,outputTokens:a||0,totalTokens:s,totalCost:f,currency:u,isUnknownCost:d};return y()&&S(`info`,`AI Usage Telemetry:`,p),p}catch(e){return S(`error`,`Failed to log AI usage telemetry: ${e.message}`,{error:e}),null}}export{wt as a,be as c,Tt as i,z as l,Ct as n,K as o,Dt as r,H as s,Et as t};
@@ -0,0 +1 @@
1
+ import{a as e,i as t,n,r,t as i}from"./ai-services-unified-BgdcS4fE.js";import"./config-manager-cjltSxIS.js";import"./git-utils-DllbRE35.js";import"./sentry-CBAZ4LSk.js";export{i as generateObjectService,n as generateTextService,r as logAiUsage,t as streamObjectService,e as streamTextService};