@hhsw2015/task-master-ai 0.43.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +4072 -0
- package/LICENSE +25 -0
- package/README-task-master.md +648 -0
- package/README.md +415 -0
- package/dist/ai-services-unified-BgdcS4fE.js +7 -0
- package/dist/ai-services-unified-DVAKOPK0.js +1 -0
- package/dist/assets/.windsurfrules +524 -0
- package/dist/assets/AGENTS.md +435 -0
- package/dist/assets/GEMINI.md +110 -0
- package/dist/assets/claude/TM_COMMANDS_GUIDE.md +147 -0
- package/dist/assets/config.json +34 -0
- package/dist/assets/env.example +12 -0
- package/dist/assets/example_prd.txt +47 -0
- package/dist/assets/example_prd_rpg.txt +511 -0
- package/dist/assets/gitignore +25 -0
- package/dist/assets/hamster-art.txt +49 -0
- package/dist/assets/kiro-hooks/tm-code-change-task-tracker.kiro.hook +23 -0
- package/dist/assets/kiro-hooks/tm-complexity-analyzer.kiro.hook +16 -0
- package/dist/assets/kiro-hooks/tm-daily-standup-assistant.kiro.hook +13 -0
- package/dist/assets/kiro-hooks/tm-git-commit-task-linker.kiro.hook +13 -0
- package/dist/assets/kiro-hooks/tm-pr-readiness-checker.kiro.hook +13 -0
- package/dist/assets/kiro-hooks/tm-task-dependency-auto-progression.kiro.hook +17 -0
- package/dist/assets/kiro-hooks/tm-test-success-task-completer.kiro.hook +23 -0
- package/dist/assets/roocode/.roo/rules-architect/architect-rules +93 -0
- package/dist/assets/roocode/.roo/rules-ask/ask-rules +89 -0
- package/dist/assets/roocode/.roo/rules-code/code-rules +61 -0
- package/dist/assets/roocode/.roo/rules-debug/debug-rules +68 -0
- package/dist/assets/roocode/.roo/rules-orchestrator/orchestrator-rules +181 -0
- package/dist/assets/roocode/.roo/rules-test/test-rules +61 -0
- package/dist/assets/roocode/.roomodes +63 -0
- package/dist/assets/rules/cursor_rules.mdc +53 -0
- package/dist/assets/rules/dev_workflow.mdc +424 -0
- package/dist/assets/rules/hamster.mdc +173 -0
- package/dist/assets/rules/self_improve.mdc +72 -0
- package/dist/assets/rules/taskmaster.mdc +573 -0
- package/dist/assets/rules/taskmaster_hooks_workflow.mdc +59 -0
- package/dist/assets/scripts_README.md +445 -0
- package/dist/commands-D7m4KWx1.js +329 -0
- package/dist/config-manager-CvbfYtIR.js +1 -0
- package/dist/config-manager-cjltSxIS.js +270 -0
- package/dist/dependency-manager-CyOxi5uo.js +1078 -0
- package/dist/git-utils-DllbRE35.js +1 -0
- package/dist/git-utils-PBP1PRVP.js +1 -0
- package/dist/mcp-server.js +44 -0
- package/dist/profiles-DcD-JxPM.js +3528 -0
- package/dist/research-DN4RyyJY.js +1 -0
- package/dist/response-language-C5AwQSfD.js +1 -0
- package/dist/response-language-LzM2RD6-.js +1 -0
- package/dist/sentry-CBAZ4LSk.js +1 -0
- package/dist/tag-management-6HOtYZMj.js +1 -0
- package/dist/task-manager-BtFURFe0.js +1 -0
- package/dist/task-master.js +2 -0
- package/dist/update-subtask-by-id-DiWMqGfw.js +1 -0
- package/dist/update-task-by-id-eyL-PNVX.js +1 -0
- package/dist/utils-CGk8TL6x.js +1 -0
- package/index.js +160 -0
- package/package.json +183 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import e from"fs";import t from"path";import{exec as n,execSync as r}from"child_process";import{promisify as i}from"util";const a=i(n);async function o(e){if(!e)throw Error(`projectRoot is required for isGitRepository`);try{return await a(`git rev-parse --git-dir`,{cwd:e}),!0}catch{return!1}}async function s(e){if(!e)throw Error(`projectRoot is required for getCurrentBranch`);try{let{stdout:t}=await a(`git rev-parse --abbrev-ref HEAD`,{cwd:e});return t.trim()}catch{return null}}function c(e){return!e||typeof e!=`string`?`unknown-branch`:e.replace(/[^a-zA-Z0-9_-]/g,`-`).replace(/^-+|-+$/g,``).replace(/-+/g,`-`).toLowerCase().substring(0,50)}function l(e){if(!e||typeof e!=`string`||[`main`,`master`,`develop`,`dev`,`HEAD`].includes(e.toLowerCase()))return!1;let t=c(e);return t.length>0&&t!==`unknown-branch`}function u(e,t){}function d(){try{return r(`git rev-parse --is-inside-work-tree`,{stdio:`ignore`,cwd:process.cwd()}),!0}catch{return!1}}export{l as a,o as i,s as n,c as o,d as r,u as t};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{a as e,i as t,n,o as r,r as i,t as a}from"./git-utils-DllbRE35.js";export{a as checkAndAutoSwitchGitTagSync,n as getCurrentBranch,i as insideGitWorkTree,t as isGitRepository,e as isValidBranchForTag,r as sanitizeBranchNameForTag};
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import{o as e,s as t}from"./ai-services-unified-BgdcS4fE.js";import{$t as n,An as r,Dt as i,Ht as a,Kt as o,Mt as s,Nt as c,Qt as l,Sn as u,Tn as d,Tt as f,fn as p,ht as m,in as h,ln as g,m as _,ot as v,pn as y,rn as b,st as x,tn as S,tt as C,vn as ee,vt as w,wn as te,wt as T,x as ne,zt as re}from"./config-manager-cjltSxIS.js";import"./git-utils-DllbRE35.js";import{r as ie}from"./sentry-CBAZ4LSk.js";import{$ as ae,A as oe,B as E,C as se,D as ce,F as le,G as ue,H as de,I as fe,J as pe,M as me,N as he,O as ge,P as _e,Q as ve,R as D,S as ye,U as be,V as xe,W as Se,X as Ce,Y as we,Z as Te,at as Ee,ct as De,dt as Oe,gt as ke,ht as Ae,i as je,it as Me,j as Ne,lt as Pe,n as Fe,pt as Ie,r as Le,rt as Re,st as ze,t as Be,ut as Ve,w as He,z as Ue}from"./dependency-manager-CyOxi5uo.js";import{t as We}from"./response-language-C5AwQSfD.js";import{a as Ge,c as Ke,f as qe,g as Je,h as Ye,l as O,m as k,p as Xe,v as A}from"./profiles-DcD-JxPM.js";import j from"node:path";import M from"chalk";import N from"fs";import P from"path";import Ze from"os";import F from"node:fs";import{z as I}from"zod";import{fileURLToPath as Qe}from"url";import L from"dotenv";import*as R from"@sentry/node";import{FastMCP as $e}from"fastmcp";const z={debug:0,info:1,warn:2,error:3,success:4},et=z[ne().toLowerCase()]??z.info;function B(e,...t){if(w())return;let n={debug:M.gray(`[DEBUG]`),info:M.blue(`[INFO]`),warn:M.yellow(`[WARN]`),error:M.red(`[ERROR]`),success:M.green(`[SUCCESS]`)};if(z[e]!==void 0&&z[e]>=et){let r=n[e]||``,i=t;try{switch(e){case`error`:i=t.map(e=>typeof e==`string`?M.red(e):e);break;case`warn`:i=t.map(e=>typeof e==`string`?M.yellow(e):e);break;case`success`:i=t.map(e=>typeof e==`string`?M.green(e):e);break;case`info`:i=t.map(e=>typeof e==`string`?M.blue(e):e);break;case`debug`:i=t.map(e=>typeof e==`string`?M.gray(e):e);break}}catch(e){console.error(`Internal Logger Error applying chalk color:`,e),i=t}console.error(r,...i)}}function tt(){let e=e=>(...t)=>B(e,...t);return{debug:e(`debug`),info:e(`info`),warn:e(`warn`),error:e(`error`),success:e(`success`),log:B}}var V=tt(),H=class extends Error{constructor(e,t={}){super(e),this.name=`MCPError`,this.code=t.code,this.cause=t.cause,this.mcpResponse=t.mcpResponse}},U=class extends H{constructor(e,t={}){super(e,t),this.name=`MCPSessionError`}},nt=class extends H{constructor(e,t={}){super(e,t),this.name=`MCPSamplingError`}};function W(e){if(e instanceof H)return e;let t=e.message||`Unknown MCP error`,n=e;return t.includes(`session`)||t.includes(`connection`)?new U(t,{cause:n,code:`SESSION_ERROR`}):t.includes(`sampling`)||t.includes(`timeout`)?new nt(t,{cause:n,code:`SAMPLING_ERROR`}):t.includes(`capabilities`)||t.includes(`not supported`)?new U(t,{cause:n,code:`CAPABILITY_ERROR`}):new H(t,{cause:n,code:`UNKNOWN_ERROR`})}function rt(e){let t=e.trim();t=t.replace(/^```json\s*/gm,``),t=t.replace(/^```\s*/gm,``),t=t.replace(/```\s*$/gm,``),t=t.replace(/^const\s+\w+\s*=\s*/,``),t=t.replace(/^let\s+\w+\s*=\s*/,``),t=t.replace(/^var\s+\w+\s*=\s*/,``),t=t.replace(/;?\s*$/,``),t=t.replace(/^.*?(?=\{|\[)/s,``),t.split(`
|
|
3
|
+
`);let n=-1,r=0,i=!1,a=!1;for(let e=0;e<t.length;e++){let o=t[e];if(a){a=!1;continue}if(o===`\\`){a=!0;continue}if(o===`"`&&!a){i=!i;continue}if(!i){if(o===`{`||o===`[`)r++;else if((o===`}`||o===`]`)&&(r--,r===0)){n=e;break}}}if(n>-1&&(t=t.substring(0,n+1)),n===-1){let e=t.match(/{[\s\S]*}/),n=t.match(/\[[\s\S]*\]/);e?t=e[0]:n&&(t=n[0])}try{return JSON.parse(t),t}catch{try{let e=t.replace(/([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g,`$1"$2":`).replace(/'/g,`"`).replace(/,\s*([}\]])/g,`$1`);return JSON.parse(e),e}catch{return e}}}function G(e){let t=[],n=``;for(let r of e)r.role===`system`?n=K(r.content):(r.role===`user`||r.role===`assistant`)&&t.push({role:r.role,content:{type:`text`,text:K(r.content)}});return{messages:t,systemPrompt:n}}function it(e){let t=``,n=null,r=`stop`,i=[];return typeof e==`string`?t=e:e.content?(t=K(e.content),n=e.usage,r=e.finishReason||`stop`):e.text?(t=e.text,n=e.usage,r=e.finishReason||`stop`):(t=JSON.stringify(e),i.push(`Unexpected MCP response format, used JSON fallback`)),{text:t,usage:n,finishReason:r,warnings:i}}function K(e){return typeof e==`string`?e:Array.isArray(e)?e.map(e=>typeof e==`string`?e:e.type===`text`&&e.text||e.text?e.text:``).filter(e=>e.length>0).join(` `):e&&typeof e==`object`&&(e.type===`text`&&e.text||e.text)?e.text:String(e||``)}function at(e,t=`result`){try{let n=q(e);return`
|
|
4
|
+
CRITICAL JSON GENERATION INSTRUCTIONS:
|
|
5
|
+
|
|
6
|
+
You must respond with ONLY valid JSON that matches this exact structure for "${t}":
|
|
7
|
+
|
|
8
|
+
${JSON.stringify(n,null,2)}
|
|
9
|
+
|
|
10
|
+
STRICT REQUIREMENTS:
|
|
11
|
+
1. Response must start with { and end with }
|
|
12
|
+
2. Use double quotes for all strings and property names
|
|
13
|
+
3. Do not include any text before or after the JSON
|
|
14
|
+
4. Do not wrap in markdown code blocks
|
|
15
|
+
5. Do not include explanations or comments
|
|
16
|
+
6. Follow the exact property names and types shown above
|
|
17
|
+
7. All required fields must be present
|
|
18
|
+
|
|
19
|
+
Begin your response immediately with the opening brace {`}catch{return`
|
|
20
|
+
CRITICAL JSON GENERATION INSTRUCTIONS:
|
|
21
|
+
|
|
22
|
+
You must respond with ONLY valid JSON for "${t}".
|
|
23
|
+
|
|
24
|
+
STRICT REQUIREMENTS:
|
|
25
|
+
1. Response must start with { and end with }
|
|
26
|
+
2. Use double quotes for all strings and property names
|
|
27
|
+
3. Do not include any text before or after the JSON
|
|
28
|
+
4. Do not wrap in markdown code blocks
|
|
29
|
+
5. Do not include explanations or comments
|
|
30
|
+
|
|
31
|
+
Begin your response immediately with the opening brace {`}}function q(e){if(!e||e._def===void 0)return{};let t=e._def;switch(t.typeName){case`ZodObject`:let e={},n=t.shape();for(let[t,r]of Object.entries(n))e[t]=q(r);return e;case`ZodString`:if(t.checks){let e=t.checks.find(e=>e.kind===`min`),n=t.checks.find(e=>e.kind===`max`);if(e&&n)return`<string between `+e.value+`-`+n.value+` characters>`;if(e)return`<string with at least `+e.value+` characters>`;if(n)return`<string up to `+n.value+` characters>`}return`<string>`;case`ZodNumber`:if(t.checks){let e=t.checks.find(e=>e.kind===`int`),n=t.checks.find(e=>e.kind===`min`),r=t.checks.find(e=>e.kind===`max`);if(e&&n&&n.value>0)return`<positive integer>`;if(e)return`<integer>`;if(n||r)return`<number`+(n?` >= `+n.value:``)+(r?` <= `+r.value:``)+`>`}return`<number>`;case`ZodBoolean`:return`<boolean>`;case`ZodArray`:return[q(t.type)];case`ZodOptional`:return q(t.innerType);case`ZodNullable`:return q(t.innerType);case`ZodEnum`:return t.values[0]||`enum_value`;case`ZodLiteral`:return t.value;case`ZodUnion`:return t.options&&t.options.length>0?q(t.options[0]):`union_value`;case`ZodRecord`:return{key:q(t.valueType)};default:return`<${t.typeName||`unknown`}>`}}function ot(e,t){let n=[...e],r=n.findIndex(e=>e.role===`system`);if(r>=0){let e=n[r].content;n[r]={...n[r],content:e+`
|
|
32
|
+
|
|
33
|
+
`+t}}else n.unshift({role:`system`,content:t});return n}var st=class{specificationVersion=`v1`;defaultObjectGenerationMode=`json`;supportsImageUrls=!1;supportsStructuredOutputs=!0;constructor(e){this.session=e.session,this.modelId=e.modelId,this.settings=e.settings||{},this.provider=`mcp-ai-sdk`,this.maxTokens=this.settings.maxTokens,this.temperature=this.settings.temperature,this.validateSession()}validateSession(){if(!this.session?.clientCapabilities?.sampling)throw new H(`MCP session must have client sampling capabilities`)}async doGenerate(e){try{let{messages:t,systemPrompt:n}=G(e.prompt),r=await this.session.requestSampling({messages:t,systemPrompt:n,temperature:this.settings.temperature,maxTokens:this.settings.maxTokens,includeContext:`thisServer`},{timeout:24e4}),i=it(r);return{text:i.text,finishReason:i.finishReason||`stop`,usage:{promptTokens:i.usage?.inputTokens||0,completionTokens:i.usage?.outputTokens||0,totalTokens:(i.usage?.inputTokens||0)+(i.usage?.outputTokens||0)},rawResponse:r,warnings:i.warnings}}catch(e){throw W(e)}}async doGenerateObject(e){try{let{schema:t,mode:n=`json`,...r}=e;if(!t)throw new H(`Schema is required for object generation`);let i=at(t,r.objectName||`generated_object`),{messages:a,systemPrompt:o}=G(ot(e.prompt,i)),s=await this.session.requestSampling({messages:a,systemPrompt:o,temperature:this.settings.temperature,maxTokens:this.settings.maxTokens,includeContext:`thisServer`},{timeout:24e4}),c=it(s),l=rt(c.text),u;try{u=JSON.parse(l)}catch(e){throw new H(`Failed to parse JSON response: ${e.message}. Response: ${c.text.substring(0,200)}...`)}try{return{object:t.parse(u),finishReason:c.finishReason||`stop`,usage:{promptTokens:c.usage?.inputTokens||0,completionTokens:c.usage?.outputTokens||0,totalTokens:(c.usage?.inputTokens||0)+(c.usage?.outputTokens||0)},rawResponse:s,warnings:c.warnings}}catch(e){throw new H(`Generated object does not match schema: ${e.message}. Generated: ${JSON.stringify(u,null,2)}`)}}catch(e){throw W(e)}}async doStream(e){try{let t=await this.doGenerate(e);return this.simulateStreaming(t)}catch(e){throw W(e)}}async*simulateStreaming(e){let t=e.text,n=Math.max(1,Math.floor(t.length/10));for(let e=0;e<t.length;e+=n){let r=t.slice(e,e+n);e+n,t.length,yield{type:`text-delta`,textDelta:r},await new Promise(e=>setTimeout(e,50))}yield{type:`finish`,finishReason:e.finishReason,usage:e.usage}}};function ct(e={}){if(!e.session)throw Error(`MCP provider requires session object`);let t=function(t,n={}){if(new.target)throw Error(`The MCP model function cannot be called with the new keyword.`);return new st({session:e.session,modelId:t||`claude-3-5-sonnet-20241022`,settings:{temperature:n.temperature,maxTokens:n.maxTokens,...e.defaultSettings,...n}})};return t.languageModel=(e,n)=>t(e,n),t.chat=(e,n)=>t(e,n),t}var lt=class extends e{constructor(){super(),this.name=`mcp`,this.session=null}getRequiredApiKeyName(){return`MCP_API_KEY`}isRequiredApiKey(){return!1}validateAuth(e){if(!this.session)throw Error(`MCP Provider requires active MCP session`);if(!this.session.clientCapabilities?.sampling)throw Error(`MCP session must have client sampling capabilities`)}getClient(e){try{return ct({session:this.session,defaultSettings:{temperature:e.temperature,maxTokens:e.maxTokens}})}catch(e){this.handleError(`client initialization`,e)}}setSession(e){this.session=e,e?this.logger?.debug(`Updated MCP Provider session`):this.logger?.warn(`Set null session on MCP Provider`)}hasValidSession(){return!!(this.session&&this.session.clientCapabilities?.sampling)}};function ut(){return{version:d||`unknown`,name:te||`task-master-ai`}}function J(e,t,n){t||=ut();let r=`Error: ${e}
|
|
34
|
+
Version: ${t.version}
|
|
35
|
+
Name: ${t.name}`;return n&&(r+=`
|
|
36
|
+
Current Tag: ${n.currentTag}`),{content:[{type:`text`,text:r}],isError:!0}}function dt(e,t){if(typeof e!=`function`){t?.debug?.(`reportProgress not available - operation will run without progress updates`);return}return e}function ft(e){try{let t=j.join(e,`.taskmaster`,`state.json`);return F.existsSync(t)&&JSON.parse(F.readFileSync(t,`utf-8`)).currentTag||`master`}catch{return null}}async function Y(e){let{result:t,log:n,errorPrefix:r=`API error`,projectRoot:i,tag:a}=e,o=ut(),s=a===void 0?i?ft(i):null:a;if(!t.success){let e=t.error?.message||`Unknown ${r}`;n?.error?.(`${r}: ${e}`);let i=`Error: ${e}\nVersion: ${o.version}\nName: ${o.name}`;return s&&(i+=`\nCurrent Tag: ${s}`),{content:[{type:`text`,text:i}],isError:!0}}n?.info?.(`Successfully completed operation`);let c={data:t.data,version:o};return s&&(c.tag=s),{content:[{type:`text`,text:JSON.stringify(c,null,2)}]}}function X(e){if(!e)return process.cwd();try{let t=e;try{t=decodeURIComponent(t)}catch{}return(t.startsWith(`file:///`)||t.startsWith(`file://`))&&(t=t.slice(7)),t.startsWith(`/`)&&/[A-Za-z]:/.test(t.substring(1,3))&&(t=t.substring(1)),t=t.replace(/\\/g,`/`),j.resolve(t)}catch{return j.resolve(e)}}function pt(e){try{return e?.roots?.[0]?.uri?X(e.roots[0].uri):e?.roots?.roots?.[0]?.uri?X(e.roots.roots[0].uri):null}catch{return null}}function Z(e){return async(t,n)=>{let{log:r,session:i}=n,a=null,o=`unknown`;try{if(process.env.TASK_MASTER_PROJECT_ROOT){let e=process.env.TASK_MASTER_PROJECT_ROOT;a=j.isAbsolute(e)?e:j.resolve(process.cwd(),e),o=`TASK_MASTER_PROJECT_ROOT environment variable`,r?.info?.(`Using project root from ${o}: ${a}`)}else if(i?.env?.TASK_MASTER_PROJECT_ROOT){let e=i.env.TASK_MASTER_PROJECT_ROOT;a=j.isAbsolute(e)?e:j.resolve(process.cwd(),e),o=`TASK_MASTER_PROJECT_ROOT session environment variable`,r?.info?.(`Using project root from ${o}: ${a}`)}else if(t.projectRoot)a=X(t.projectRoot),o=`args.projectRoot`,r?.info?.(`Using project root from ${o}: ${a}`);else{let e=pt(i);e&&(a=e,o=`session`,r?.info?.(`Using project root from ${o}: ${a}`))}return a?await e({...t,projectRoot:a},n):(r?.error?.(`Could not determine project root from environment, args, or session.`),Y({result:{success:!1,error:{message:`Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.`}}}))}catch(e){return r?.error?.(`Error within withNormalizedProjectRoot HOF (Normalized Root: ${a}): ${e.message}`),e.stack&&r?.debug&&r.debug(e.stack),Y({result:{success:!1,error:{message:`Operation failed: ${e.message}`}}})}}}function Q(e,t){return Z(async(n,i)=>{let a=j.join(n.projectRoot,`.env`);F.existsSync(a)&&L.config({path:a});let o=await g({projectPath:n.projectRoot,loggerConfig:{mcpMode:!0,logCallback:i.log}});if(r.includes(e)){let t=await o.auth.guardCommand(e,o.tasks.getStorageType());if(t.isBlocked){let e=`You're working on the ${t.briefName} Brief in Hamster so this command is managed for you. This command is only available for local file storage. Log out with 'tm auth logout' to use local commands.`;return i.log.info(e),Y({result:{success:!1,error:{message:e}},log:i.log,projectRoot:n.projectRoot})}}return t(n,{log:i.log,session:i.session,tmCore:o})})}function mt(e,t){if(!e)return{parsedMetadata:null};if(process.env.TASK_MASTER_ALLOW_METADATA_UPDATES!==`true`)return{parsedMetadata:null,error:t(`Metadata updates are disabled. Set TASK_MASTER_ALLOW_METADATA_UPDATES=true in your MCP server environment to enable metadata modifications.`)};try{let n=JSON.parse(e);return typeof n!=`object`||!n||Array.isArray(n)?{parsedMetadata:null,error:t(`Invalid metadata: must be a JSON object (not null or array)`)}:{parsedMetadata:n}}catch(e){return{parsedMetadata:null,error:t(`Invalid metadata JSON: ${e instanceof Error?e.message:`Unknown parse error`}. Provide a valid JSON object string.`)}}}const ht=I.object({taskId:S.describe(`Main task ID to start workflow for (e.g., "1", "2", "HAM-123"). Subtask IDs (e.g., "2.3", "1.1") are not allowed.`),projectRoot:I.string().describe(`Absolute path to the project root directory`),maxAttempts:I.number().optional().default(3).describe(`Maximum attempts per subtask (default: 3)`),force:I.boolean().optional().default(!1).describe(`Force start even if workflow state exists`)});function gt(e){e.addTool({name:`autopilot_start`,description:`Initialize and start a new TDD workflow for a task. Creates a git branch and sets up the workflow state machine.`,parameters:ht,annotations:{title:`Start Autopilot Workflow`,destructiveHint:!0},execute:Q(`autopilot-start`,async(e,{log:t,tmCore:n})=>{let{taskId:r,projectRoot:i,maxAttempts:a,force:o}=e,s=h(r);try{t.info(`Starting autopilot workflow for task ${s} in ${i}`);let e=n.config.getActiveTag(),r=n.auth.getContext()?.orgSlug,c=await n.tasks.get(s);if(!c||!c.task)return Y({result:{success:!1,error:{message:`Task ${s} not found`}},log:t,projectRoot:i});let l=c.task;if(!l.subtasks||l.subtasks.length===0)return Y({result:{success:!1,error:{message:`Task ${s} has no subtasks. Please use expand_task (with id="${s}") to create subtasks first. For improved results, consider running analyze_complexity before expanding the task.`}},log:t,projectRoot:i});if(await n.workflow.hasWorkflow()&&!o)return t.warn(`Workflow state already exists`),Y({result:{success:!1,error:{message:`Workflow already in progress. Use force=true to override or resume the existing workflow. Suggestion: Use autopilot_resume to continue the existing workflow`}},log:t,projectRoot:i});let u=await n.workflow.start({taskId:s,taskTitle:l.title,subtasks:l.subtasks.map(e=>({id:e.id,title:e.title,status:e.status,maxAttempts:a})),maxAttempts:a,force:o,tag:e,orgSlug:r});t.info(`Workflow started successfully for task ${s}`);let d=n.workflow.getNextAction();return Y({result:{success:!0,data:{message:`Workflow started for task ${s}`,taskId:s,branchName:u.branchName,phase:u.phase,tddPhase:u.tddPhase,progress:u.progress,currentSubtask:u.currentSubtask,nextAction:d.action,nextSteps:d.nextSteps}},log:t,projectRoot:i})}catch(e){return t.error(`Error in autopilot-start: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to start workflow: ${e.message}`}},log:t,projectRoot:i})}})})}const _t=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`)});function vt(e){e.addTool({name:`autopilot_resume`,description:`Resume a previously started TDD workflow from saved state. Restores the workflow state machine and continues from where it left off.`,parameters:_t,annotations:{title:`Resume Autopilot Workflow`,readOnlyHint:!0},execute:Q(`autopilot-resume`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r}=e;try{if(t.info(`Resuming autopilot workflow in ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No workflow state found. Start a new workflow with autopilot_start`}},log:t,projectRoot:r});let e=await n.workflow.resume(),i=n.workflow.getNextAction();return t.info(`Workflow resumed successfully for task ${e.taskId}`),Y({result:{success:!0,data:{message:`Workflow resumed`,...e,nextAction:i.action,actionDescription:i.description,nextSteps:i.nextSteps}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-resume: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to resume workflow: ${e.message}`}},log:t,projectRoot:r})}})})}const yt=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`)});function bt(e){e.addTool({name:`autopilot_next`,description:`Get the next action to perform in the TDD workflow. Returns detailed context about what needs to be done next, including the current phase, subtask, and expected actions.`,parameters:yt,annotations:{title:`Get Next Autopilot Action`,readOnlyHint:!0},execute:Q(`autopilot-next`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r}=e;try{if(t.info(`Getting next action for workflow in ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No active workflow found. Start a workflow with autopilot_start`}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getNextAction(),i=n.workflow.getStatus();return t.info(`Next action determined: ${e.action}`),Y({result:{success:!0,data:{action:e.action,actionDescription:e.description,...i,nextSteps:e.nextSteps}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-next: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to get next action: ${e.message}`}},log:t,projectRoot:r})}})})}const xt=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`)});function St(e){e.addTool({name:`autopilot_status`,description:`Get comprehensive workflow status including current phase, progress, subtask details, and activity history.`,parameters:xt,annotations:{title:`Get Autopilot Status`,readOnlyHint:!0},execute:Q(`autopilot-status`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r}=e;try{if(t.info(`Getting workflow status for ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No active workflow found. Start a workflow with autopilot_start`}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getStatus();return t.info(`Workflow status retrieved for task ${e.taskId}`),Y({result:{success:!0,data:e},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-status: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to get workflow status: ${e.message}`}},log:t,projectRoot:r})}})})}const Ct=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`),testResults:I.object({total:I.number().describe(`Total number of tests`),passed:I.number().describe(`Number of passing tests`),failed:I.number().describe(`Number of failing tests`),skipped:I.number().optional().describe(`Number of skipped tests`)}).describe(`Test results from running the test suite`)});function wt(e){e.addTool({name:`autopilot_complete_phase`,description:`Complete the current TDD phase (RED or GREEN) with test result validation. RED phase: expects failures (if 0 failures, feature is already implemented and subtask auto-completes). GREEN phase: expects all tests passing. For COMMIT phase, use autopilot_commit instead.`,parameters:Ct,annotations:{title:`Complete Autopilot Phase`,destructiveHint:!0},execute:Q(`autopilot-complete-phase`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r,testResults:i}=e;try{if(t.info(`Completing current phase in workflow for ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No active workflow found. Start a workflow with autopilot_start`}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getStatus();if(!e.tddPhase)return Y({result:{success:!1,error:{message:`Cannot complete phase: not in a TDD phase (current phase: ${e.phase})`}},log:t,projectRoot:r});if(e.tddPhase===`COMMIT`)return Y({result:{success:!1,error:{message:`Cannot complete COMMIT phase with this tool. Use autopilot_commit instead`}},log:t,projectRoot:r});let a=e.tddPhase,o={total:i.total,passed:i.passed,failed:i.failed,skipped:i.skipped??0,phase:a},s=await n.workflow.completePhase(o),c=n.workflow.getNextAction();return t.info(`Phase completed. New phase: ${s.tddPhase||s.phase}`),Y({result:{success:!0,data:{message:`Phase completed. Transitioned to ${s.tddPhase||s.phase}`,...s,nextAction:c.action,actionDescription:c.description,nextSteps:c.nextSteps}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-complete: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to complete phase: ${e.message}`}},log:t,projectRoot:r})}})})}const Tt=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`),files:I.array(I.string()).optional().describe(`Specific files to stage (relative to project root). If not provided, stages all changes.`),customMessage:I.string().optional().describe(`Custom commit message to use instead of auto-generated message`)});function Et(e){e.addTool({name:`autopilot_commit`,description:`Create a git commit with automatic staging, message generation, and metadata embedding. Generates appropriate commit messages based on subtask context and TDD phase.`,parameters:Tt,annotations:{title:`Commit Autopilot Changes`,destructiveHint:!0},execute:Q(`autopilot-commit`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r,files:i,customMessage:a}=e;try{if(t.info(`Creating commit for workflow in ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No active workflow found. Start a workflow with autopilot_start`}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getStatus(),o=n.workflow.getContext();if(e.tddPhase!==`COMMIT`)return t.warn(`Not in COMMIT phase (currently in ${e.tddPhase})`),Y({result:{success:!1,error:{message:`Cannot commit: currently in ${e.tddPhase} phase. Complete the ${e.tddPhase} phase first using autopilot_complete_phase`}},log:t,projectRoot:r});if(!e.currentSubtask)return Y({result:{success:!1,error:{message:`No active subtask to commit`}},log:t,projectRoot:r});let s=new y(r);try{i&&i.length>0?(await s.stageFiles(i),t.info(`Staged ${i.length} files`)):(await s.stageFiles([`.`]),t.info(`Staged all changes`))}catch(e){return t.error(`Failed to stage files: ${e.message}`),Y({result:{success:!1,error:{message:`Failed to stage files: ${e.message}`}},log:t,projectRoot:r})}if(!await s.hasStagedChanges())return t.warn(`No staged changes to commit`),Y({result:{success:!1,error:{message:`No staged changes to commit. Make code changes before committing`}},log:t,projectRoot:r});let c=await s.getStatus(),l;if(a)l=a,t.info(`Using custom commit message`);else{let n=new p,r={type:e.tddPhase===`COMMIT`?`feat`:`test`,description:e.currentSubtask.title,changedFiles:c.staged,taskId:e.taskId,phase:e.tddPhase,testsPassing:o.lastTestResults?.passed,testsFailing:o.lastTestResults?.failed};l=n.generateMessage(r),t.info(`Generated commit message automatically`)}try{await s.createCommit(l),t.info(`Commit created successfully`)}catch(e){return t.error(`Failed to create commit: ${e.message}`),Y({result:{success:!1,error:{message:`Failed to create commit: ${e.message}`}},log:t,projectRoot:r})}let u=await s.getLastCommit(),d=await n.workflow.commit();t.info(`Commit completed. Current phase: ${d.tddPhase||d.phase}`);let f=d.phase===`COMPLETE`,m=n.workflow.getNextAction();return Y({result:{success:!0,data:{message:f?`Workflow completed successfully`:`Commit created and workflow advanced`,commitSha:u.sha,commitMessage:l,...d,isComplete:f,nextAction:m.action,nextSteps:m.nextSteps}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-commit: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to commit: ${e.message}`}},log:t,projectRoot:r})}})})}const Dt=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`)});function Ot(e){e.addTool({name:`autopilot_finalize`,description:`Finalize and complete the workflow. Validates that all changes are committed and working tree is clean before marking workflow as complete.`,parameters:Dt,annotations:{title:`Finalize Autopilot Workflow`,destructiveHint:!0},execute:Q(`autopilot-finalize`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r}=e;try{if(t.info(`Finalizing workflow in ${r}`),!await n.workflow.hasWorkflow())return Y({result:{success:!1,error:{message:`No active workflow found. Start a workflow with autopilot_start`}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getStatus();if(e.phase!==`FINALIZE`)return Y({result:{success:!1,error:{message:`Cannot finalize: workflow is in ${e.phase} phase. Complete all subtasks first.`}},log:t,projectRoot:r});let i=await n.workflow.finalize();t.info(`Workflow finalized successfully`);let a=n.workflow.getNextAction();return Y({result:{success:!0,data:{message:`Workflow completed successfully`,...i,nextAction:a.action,nextSteps:a.nextSteps}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-finalize: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to finalize workflow: ${e.message}`}},log:t,projectRoot:r})}})})}const kt=I.object({projectRoot:I.string().describe(`Absolute path to the project root directory`)});function At(e){e.addTool({name:`autopilot_abort`,description:`Abort the current TDD workflow and clean up workflow state. This will remove the workflow state file but will NOT delete the git branch or any code changes.`,parameters:kt,annotations:{title:`Abort Autopilot Workflow`,destructiveHint:!0},execute:Q(`autopilot-abort`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r}=e;try{if(t.info(`Aborting autopilot workflow in ${r}`),!await n.workflow.hasWorkflow())return t.warn(`No active workflow to abort`),Y({result:{success:!0,data:{message:`No active workflow to abort`,hadWorkflow:!1}},log:t,projectRoot:r});await n.workflow.resume();let e=n.workflow.getStatus();return await n.workflow.abort(),t.info(`Workflow state deleted`),Y({result:{success:!0,data:{message:`Workflow aborted`,hadWorkflow:!0,taskId:e.taskId,branchName:e.branchName,note:`Git branch and code changes were preserved. You can manually clean them up if needed.`}},log:t,projectRoot:r})}catch(e){return t.error(`Error in autopilot-abort: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to abort workflow: ${e.message}`}},log:t,projectRoot:r})}})})}const jt=I.object({projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),status:I.string().optional().describe(`Filter tasks by status (e.g., 'pending', 'done') or multiple statuses separated by commas (e.g., 'blocked,deferred')`),withSubtasks:I.boolean().optional().describe(`Include subtasks nested within their parent tasks in the response`),tag:I.string().optional().describe(`Tag context to operate on`)});function Mt(e){e.addTool({name:`get_tasks`,description:`Get all tasks from Task Master, optionally filtering by status and including subtasks.`,parameters:jt,annotations:{title:`Get Tasks`,readOnlyHint:!0},execute:Q(`get-tasks`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r,status:i,withSubtasks:a,tag:o}=e;try{t.info(`Getting tasks from ${r}${i?` with status filter: ${i}`:``}${o?` for tag: ${o}`:``}`);let e=i&&i!==`all`?{status:i.split(`,`).map(e=>e.trim())}:void 0,s=await n.tasks.list({tag:o,filter:e,includeSubtasks:a});t.info(`Retrieved ${s.tasks?.length||0} tasks (${s.filtered} filtered, ${s.total} total)`);let c=s.tasks??[],l=s.total,u=c.reduce((e,t)=>(e[t.status]=(e[t.status]||0)+1,e),{}),d=l>0?(u.done||0)/l*100:0,f=c.reduce((e,t)=>(t.subtasks?.forEach(t=>{e.total++,e[t.status]=(e[t.status]||0)+1}),e),{total:0}),p=f.total>0?(f.done||0)/f.total*100:0;return Y({result:{success:!0,data:{tasks:c,filter:i||`all`,stats:{total:l,completed:u.done||0,inProgress:u[`in-progress`]||0,pending:u.pending||0,blocked:u.blocked||0,deferred:u.deferred||0,cancelled:u.cancelled||0,review:u.review||0,completionPercentage:d,subtasks:{total:f.total,completed:f.done||0,inProgress:f[`in-progress`]||0,pending:f.pending||0,blocked:f.blocked||0,deferred:f.deferred||0,cancelled:f.cancelled||0,completionPercentage:p}}}},log:t,projectRoot:r,tag:s.tag})}catch(e){return t.error(`Error in get-tasks: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to get tasks: ${e.message}`}},log:t,projectRoot:r})}})})}const Nt=I.object({id:n.describe(`Task ID(s) to get (can be comma-separated for multiple tasks)`),status:I.string().optional().describe(`Filter subtasks by status (e.g., 'pending', 'done')`),projectRoot:I.string().describe(`Absolute path to the project root directory (Optional, usually from session)`),tag:I.string().optional().describe(`Tag context to operate on`)});function Pt(e){e.addTool({name:`get_task`,description:`Get detailed information about a specific task`,parameters:Nt,annotations:{title:`Get Task`,readOnlyHint:!0},execute:Q(`get-task`,async(e,{log:t,tmCore:n})=>{let{id:r,status:i,projectRoot:a,tag:o}=e;try{t.info(`Getting task details for ID: ${r}${i?` (filtering subtasks by status: ${i})`:``} in root: ${a}`);let e=l(r),s=await Promise.all(e.map(e=>n.tasks.get(e,o))),c=[];for(let e of s)if(e.task)if(i&&e.task.subtasks){let t=i.split(`,`).map(e=>e.trim().toLowerCase()),n=e.task.subtasks.filter(e=>t.includes(String(e.status).toLowerCase()));c.push({...e.task,subtasks:n})}else c.push(e.task);return c.length===0?(t.warn(`No tasks found for ID(s): ${r}`),Y({result:{success:!1,error:{message:`No tasks found for ID(s): ${r}`}},log:t,projectRoot:a})):(t.info(`Successfully retrieved ${c.length} task(s) for ID(s): ${r}`),Y({result:{success:!0,data:e.length===1?c[0]:c},log:t,projectRoot:a,tag:o}))}catch(e){return t.error(`Error in get-task: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to get task: ${e.message}`}},log:t,projectRoot:a})}})})}const Ft=I.object({output:I.string().optional().describe(`Output directory for generated files (default: same directory as tasks file)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)});function It(e){e.addTool({name:`generate`,description:`Generates individual task files in tasks/ directory based on tasks.json. Only works with local file storage.`,parameters:Ft,annotations:{title:`Generate Task Files`,destructiveHint:!0},execute:Q(`generate`,async(e,{log:t,tmCore:n})=>{let{projectRoot:r,tag:i,output:a}=e;try{t.info(`Generating task files with args: ${JSON.stringify(e)}`);let o=a?j.resolve(r,a):void 0,s=await n.tasks.generateTaskFiles({tag:i,outputDir:o});return s.success?(t.info(`Successfully generated ${s.count} task files in ${s.directory}`),s.orphanedFilesRemoved>0&&t.info(`Removed ${s.orphanedFilesRemoved} orphaned task files`)):t.error(`Failed to generate task files: ${s.error||`Unknown error`}`),Y({result:{success:s.success,data:s.success?{message:`Successfully generated ${s.count} task file(s)`,count:s.count,directory:s.directory,orphanedFilesRemoved:s.orphanedFilesRemoved}:void 0,error:s.success?void 0:{message:s.error||`Unknown error`}},log:t,projectRoot:r,tag:i})}catch(e){return t.error(`Error in generate tool: ${e.message}`),e.stack&&t.debug(e.stack),Y({result:{success:!1,error:{message:`Failed to generate task files: ${e.message}`}},log:t,projectRoot:r})}})})}const Lt=I.object({id:n.describe(`Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once.`),status:I.enum(ee).describe(`New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled').`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Optional tag context to operate on`)});function Rt(e){e.addTool({name:`set_task_status`,description:`Set the status of one or more tasks or subtasks.`,parameters:Lt,annotations:{title:`Set Task Status`,destructiveHint:!0},execute:Q(`set-task-status`,async(e,{log:t,tmCore:n})=>{let{id:r,status:i,projectRoot:a,tag:o}=e;try{t.info(`Setting status of task(s) ${r} to: ${i}${o?` in tag: ${o}`:` in current tag`}`);let e=l(r),s=[];for(let r of e){let e=await n.tasks.updateStatus(r,i,o);s.push(e),t.info(`Updated task ${r}: ${e.oldStatus} → ${e.newStatus}`)}return t.info(`Successfully updated status for ${s.length} task(s) to "${i}"`),Y({result:{success:!0,data:{message:`Successfully updated ${s.length} task(s) to "${i}"`,tasks:s}},log:t,projectRoot:a,tag:o})}catch(e){let n=e instanceof Error?e:Error(String(e));return t.error(`Error in set-task-status: ${n.message}`),n.stack&&t.debug(n.stack),Y({result:{success:!1,error:{message:`Failed to set task status: ${n.message}`}},log:t,projectRoot:a,tag:o})}})})}async function zt(e,t){let{tasksJsonPath:n,id:r,dependsOn:i,tag:a,projectRoot:o}=e;try{if(t.info(`Adding dependency with args: ${JSON.stringify(e)}`),!n)return t.error(`addDependencyDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!r)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Task ID (id) is required`}};if(!i)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Dependency ID (dependsOn) is required`}};let s=n,c=r&&r.includes&&r.includes(`.`)?r:parseInt(r,10),l=i&&i.includes&&i.includes(`.`)?i:parseInt(i,10);return t.info(`Adding dependency: task ${c} will depend on ${l}`),x(),await Be(s,c,l,{projectRoot:o,tag:a}),v(),{success:!0,data:{message:`Successfully added dependency: Task ${c} now depends on ${l}`,taskId:c,dependencyId:l}}}catch(e){return v(),t.error(`Error in addDependencyDirect: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}}async function Bt(e,t){let{tasksJsonPath:n,id:r,taskId:i,title:a,description:o,details:s,status:c,dependencies:l,skipGenerate:u,projectRoot:d,tag:f}=e;try{if(t.info(`Adding subtask with args: ${JSON.stringify(e)}`),!n)return t.error(`addSubtaskDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!r)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Parent task ID is required`}};if(!i&&!a)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Either taskId or title must be provided`}};let p=n,m=[];l&&(m=l.split(`,`).map(e=>e.includes(`.`)?e.trim():parseInt(e.trim(),10)));let h=i?parseInt(i,10):null,g=parseInt(r,10),_=!u;x();let y={projectRoot:d,tag:f};if(h){t.info(`Converting task ${h} to a subtask of ${g}`);let e=await ae(p,g,h,null,_,y);return v(),{success:!0,data:{message:`Task ${h} successfully converted to a subtask of task ${g}`,subtask:e}}}else{t.info(`Creating new subtask for parent task ${g}`);let e=await ae(p,g,null,{title:a,description:o||``,details:s||``,status:c||`pending`,dependencies:m},_,y);return v(),{success:!0,data:{message:`New subtask ${g}.${e.id} successfully created`,subtask:e}}}}catch(e){return v(),t.error(`Error in addSubtaskDirect: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}}async function Vt(e,t,n={}){let{tasksJsonPath:r,name:i,copyFromCurrent:a=!1,copyFromTag:o,fromBranch:s=!1,description:c,projectRoot:l}=e,{session:u}=n;x();let d=D(t);try{if(!r)return t.error(`addTagDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(s){t.info(`Creating tag from current git branch`);let e=await import(`./git-utils-PBP1PRVP.js`);if(!await e.isGitRepository(l))return t.error(`Not in a git repository`),v(),{success:!1,error:{code:`NOT_GIT_REPO`,message:`Not in a git repository. Cannot use fromBranch option.`}};let n=await e.getCurrentBranch(l);if(!n)return t.error(`Could not determine current git branch`),v(),{success:!1,error:{code:`NO_CURRENT_BRANCH`,message:`Could not determine current git branch.`}};let i=await Pe(r,n,{copyFromCurrent:a,copyFromTag:o,description:c||`Tag created from git branch "${n}"`},{session:u,mcpLog:d,projectRoot:l},`json`);return v(),{success:!0,data:{branchName:i.branchName,tagName:i.tagName,created:i.created,mappingUpdated:i.mappingUpdated,message:`Successfully created tag "${i.tagName}" from git branch "${i.branchName}"`}}}else{if(!i||typeof i!=`string`)return t.error(`Missing required parameter: name`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Tag name is required and must be a string`}};t.info(`Creating new tag: ${i}`);let e=await De(r,i,{copyFromCurrent:a,copyFromTag:o,description:c},{session:u,mcpLog:d,projectRoot:l},`json`);return v(),{success:!0,data:{tagName:e.tagName,created:e.created,tasksCopied:e.tasksCopied,sourceTag:e.sourceTag,description:e.description,message:`Successfully created tag "${e.tagName}"`}}}}catch(e){return v(),t.error(`Error in addTagDirect: ${e.message}`),{success:!1,error:{code:e.code||`ADD_TAG_ERROR`,message:e.message}}}}async function Ht(e,t,n={}){let{tasksJsonPath:r,prompt:i,dependencies:a,priority:o,research:s,projectRoot:c,tag:l}=e,{session:u}=n;x();let d=D(t);try{if(!r)return t.error(`addTaskDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};let n=r,f=e.title&&e.description;if(!e.prompt&&!f)return t.error(`Missing required parameters: either prompt or title+description must be provided`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Either the prompt parameter or both title and description parameters are required for adding a task`}};let p=Array.isArray(a)?a:a?String(a).split(`,`).map(e=>parseInt(e.trim(),10)):[],m=o||`medium`,h=null,g,_,y;if(f){h={title:e.title,description:e.description,details:e.details||``,testStrategy:e.testStrategy||``},t.info(`Adding new task manually with title: "${e.title}", dependencies: [${p.join(`, `)}], priority: ${o}`);let r=await ve(n,null,p,m,{session:u,mcpLog:d,projectRoot:c,commandName:`add-task`,outputType:`mcp`,tag:l},`json`,h,!1);g=r.newTaskId,_=r.telemetryData,y=r.tagInfo}else{t.info(`Adding new task with prompt: "${i}", dependencies: [${p.join(`, `)}], priority: ${m}, research: ${s}`);let e=await ve(n,i,p,m,{session:u,mcpLog:d,projectRoot:c,commandName:`add-task`,outputType:`mcp`,tag:l},`json`,null,s);g=e.newTaskId,_=e.telemetryData,y=e.tagInfo}return v(),{success:!0,data:{taskId:g,message:`Successfully added new task #${g}`,telemetryData:_,tagInfo:y}}}catch(e){return v(),t.error(`Error in addTaskDirect: ${e.message}`),{success:!1,error:{code:e.code||`ADD_TASK_ERROR`,message:e.message}}}}async function Ut(e,t,n={}){let{session:r}=n,{tasksJsonPath:i,outputPath:a,threshold:o,research:s,projectRoot:c,ids:l,from:u,to:d,tag:f}=e,p=D(t);try{if(t.info(`Analyzing task complexity with args: ${JSON.stringify(e)}`),!i)return t.error(`analyzeTaskComplexityDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!a)return t.error(`analyzeTaskComplexityDirect called without outputPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`outputPath is required`}};let n=i,m=a;if(t.info(`Analyzing task complexity from: ${n}`),t.info(`Output report will be saved to: ${m}`),l)t.info(`Analyzing specific task IDs: ${l}`);else if(u||d){let e=u===void 0?`first`:u,n=d===void 0?`last`:d;t.info(`Analyzing tasks in range: ${e} to ${n}`)}s&&t.info(`Using research role for complexity analysis`);let h={file:i,output:a,threshold:o,research:s===!0,projectRoot:c,id:l,from:u,to:d,tag:f},g=w();g||x();let _;try{_=await Te(h,{session:r,mcpLog:p,commandName:`analyze-complexity`,outputType:`mcp`,projectRoot:c,tag:f}),_.report}catch(e){return t.error(`Error in analyzeTaskComplexity core function: ${e.message}`),!g&&w()&&v(),{success:!1,error:{code:`ANALYZE_CORE_ERROR`,message:`Error running core complexity analysis: ${e.message}`}}}finally{!g&&w()&&v()}if(!N.existsSync(m))return{success:!1,error:{code:`ANALYZE_REPORT_MISSING`,message:`Analysis completed but no report file was created at the expected path.`}};if(!_||!_.report||typeof _.report!=`object`)return t.error(`Core analysis function returned an invalid or undefined response.`),{success:!1,error:{code:`INVALID_CORE_RESPONSE`,message:`Core analysis function returned an invalid response.`}};try{let e=Array.isArray(_.report.complexityAnalysis)?_.report.complexityAnalysis:[],t=e.filter(e=>e.complexityScore>=8).length,n=e.filter(e=>e.complexityScore>=5&&e.complexityScore<8).length,r=e.filter(e=>e.complexityScore<5).length;return{success:!0,data:{message:`Task complexity analysis complete. Report saved to ${a}`,reportPath:a,reportSummary:{taskCount:e.length,highComplexityTasks:t,mediumComplexityTasks:n,lowComplexityTasks:r},fullReport:_.report,telemetryData:_.telemetryData,tagInfo:_.tagInfo}}}catch(e){return t.error(`Internal error processing report data: ${e.message}`),{success:!1,error:{code:`REPORT_PROCESS_ERROR`,message:`Internal error processing complexity report: ${e.message}`}}}}catch(e){return w()&&v(),t.error(`Error in analyzeTaskComplexityDirect setup: ${e.message}`),{success:!1,error:{code:`DIRECT_FUNCTION_SETUP_ERROR`,message:e.message}}}}async function Wt(e,t){let{tasksJsonPath:n,id:r,all:i,tag:a,projectRoot:o}=e;try{if(t.info(`Clearing subtasks with args: ${JSON.stringify(e)}`),!n)return t.error(`clearSubtasksDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!r&&!i)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Either task IDs with id parameter or all parameter must be provided`}};let s=n;if(!N.existsSync(s))return{success:!1,error:{code:`FILE_NOT_FOUND_ERROR`,message:`Tasks file not found at ${s}`}};let c,l=f(s,o,a);if(!l||!l.tasks)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`No tasks found in tasks file: ${s}`}};let u=l.tag||a,d=l.tasks;if(i){if(t.info(`Clearing subtasks from all tasks in tag '${u}'`),d.length===0)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`No tasks found in tag context '${u}'`}};c=d.map(e=>e.id).join(`,`)}else c=r;t.info(`Clearing subtasks from tasks: ${c} in tag '${u}'`),x(),Ce(s,c,{projectRoot:o,tag:u}),v();let p=f(s,o,u),m=c.split(`,`).map(e=>parseInt(e.trim(),10)),h=m.length,g=p.tasks||[],_=m.map(e=>{let t=g.find(t=>t.id===e);return t?{id:e,title:t.title}:{id:e,title:`Task not found`}});return{success:!0,data:{message:`Successfully cleared subtasks from ${h} task(s) in tag '${u}'`,tasksCleared:_,tag:u}}}catch(e){return v(),t.error(`Error in clearSubtasksDirect: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}}async function Gt(e,t){let{reportPath:n}=e;try{if(t.info(`Getting complexity report with args: ${JSON.stringify(e)}`),!n)return t.error(`complexityReportDirect called without reportPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`reportPath is required`}};t.info(`Looking for complexity report at: ${n}`),`${n}`;let r=async()=>{try{x();let e=T(n);return v(),e?{success:!0,data:{report:e,reportPath:n}}:(t.warn(`No complexity report found at ${n}`),{success:!1,error:{code:`FILE_NOT_FOUND_ERROR`,message:`No complexity report found at ${n}. Run 'analyze-complexity' first.`}})}catch(e){return v(),t.error(`Error reading complexity report: ${e.message}`),{success:!1,error:{code:`READ_ERROR`,message:e.message}}}};try{let e=await r();return t.info(`complexityReportDirect completed`),e}catch(e){return v(),t.error(`Unexpected error during complexityReport: ${e.message}`),{success:!1,error:{code:`UNEXPECTED_ERROR`,message:e.message}}}}catch(e){return v(),t.error(`Error in complexityReportDirect: ${e.message}`),{success:!1,error:{code:`UNEXPECTED_ERROR`,message:e.message}}}}async function Kt(e,t,n={}){let{tasksJsonPath:r,sourceName:i,targetName:a,description:o,projectRoot:s}=e,{session:c}=n;x();let l=D(t);try{if(!r)return t.error(`copyTagDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i||typeof i!=`string`)return t.error(`Missing required parameter: sourceName`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Source tag name is required and must be a string`}};if(!a||typeof a!=`string`)return t.error(`Missing required parameter: targetName`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Target tag name is required and must be a string`}};t.info(`Copying tag from "${i}" to "${a}"`);let e=await ze(r,i,a,{description:o},{session:c,mcpLog:l,projectRoot:s},`json`);return v(),{success:!0,data:{sourceName:e.sourceName,targetName:e.targetName,copied:e.copied,tasksCopied:e.tasksCopied,description:e.description,message:`Successfully copied tag from "${e.sourceName}" to "${e.targetName}"`}}}catch(e){return v(),t.error(`Error in copyTagDirect: ${e.message}`),{success:!1,error:{code:e.code||`COPY_TAG_ERROR`,message:e.message}}}}async function qt(e,t,n={}){let{tasksJsonPath:r,name:i,yes:a=!1,projectRoot:o}=e,{session:s}=n;x();let c=D(t);try{if(!r)return t.error(`deleteTagDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i||typeof i!=`string`)return t.error(`Missing required parameter: name`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Tag name is required and must be a string`}};t.info(`Deleting tag: ${i}`);let e=await Ve(r,i,{yes:a},{session:s,mcpLog:c,projectRoot:o},`json`);return v(),{success:!0,data:{tagName:e.tagName,deleted:e.deleted,tasksDeleted:e.tasksDeleted,wasCurrentTag:e.wasCurrentTag,switchedToMaster:e.switchedToMaster,message:`Successfully deleted tag "${e.tagName}"`}}}catch(e){return v(),t.error(`Error in deleteTagDirect: ${e.message}`),{success:!1,error:{code:e.code||`DELETE_TAG_ERROR`,message:e.message}}}}async function Jt(e,t,n={}){let{session:r}=n,{tasksJsonPath:i,num:a,research:o,prompt:s,force:c,projectRoot:l,tag:u,complexityReportPath:d}=e,f=D(t),p=d||C(null,{projectRoot:l,tag:u},t);if(t.info(`Expand all tasks will use complexity report at: ${p}`),!i)return t.error(`expandAllTasksDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};x();try{t.info(`Calling core expandAllTasks with args: ${JSON.stringify({num:a,research:o,prompt:s,force:c,projectRoot:l,tag:u})}`);let e=await pe(i,a?parseInt(a,10):void 0,o===!0,s||``,c===!0,{session:r,mcpLog:f,projectRoot:l,tag:u,complexityReportPath:p},`json`);return{success:!0,data:{message:`Expand all operation completed. Expanded: ${e.expandedCount}, Failed: ${e.failedCount}, Skipped: ${e.skippedCount}`,details:{expandedCount:e.expandedCount,failedCount:e.failedCount,skippedCount:e.skippedCount,tasksToExpand:e.tasksToExpand},telemetryData:e.telemetryData}}}catch(e){return t.error(`Error during core expandAllTasks execution: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}finally{v()}}async function Yt(e,t,n={}){let{session:r}=n,{tasksJsonPath:i,id:a,num:o,research:c,prompt:l,force:u,projectRoot:d,tag:p,complexityReportPath:m}=e;if(t.info(`Session data in expandTaskDirect: ${JSON.stringify({hasSession:!!r,sessionKeys:r?Object.keys(r):[],roots:r?.roots,rootsStr:JSON.stringify(r?.roots)})}`),!i)return t.error(`expandTaskDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};let h=i;t.info(`[expandTaskDirect] Using tasksPath: ${h}`);let g=a?parseInt(a,10):null;if(!g)return t.error(`Task ID is required`),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Task ID is required`}};let _=o?parseInt(o,10):void 0,y=c===!0,b=l||``,S=u===!0;try{t.info(`[expandTaskDirect] Expanding task ${g} into ${_||`default`} subtasks. Research: ${y}, Force: ${S}`),t.info(`[expandTaskDirect] Attempting to read JSON from: ${h}`);let e=f(h,d);if(t.info(`[expandTaskDirect] Result of readJSON: ${e?`Data read successfully`:`readJSON returned null or undefined`}`),!e||!e.tasks)return t.error(`[expandTaskDirect] readJSON failed or returned invalid data for path: ${h}`),{success:!1,error:{code:`INVALID_TASKS_FILE`,message:`No valid tasks found in ${h}. readJSON returned: ${JSON.stringify(e)}`}};t.info(`[expandTaskDirect] Searching for task ID ${g} in data`);let n=e.tasks.find(e=>e.id===g);if(t.info(`[expandTaskDirect] Task found: ${n?`Yes`:`No`}`),!n)return{success:!1,error:{code:`TASK_NOT_FOUND`,message:`Task with ID ${g} not found`}};if(n.status===`done`||n.status===`completed`)return{success:!1,error:{code:`TASK_COMPLETED`,message:`Task ${g} is already marked as ${n.status} and cannot be expanded`}};let i=n.subtasks&&n.subtasks.length>0;if(i&&!S)return t.info(`Task ${g} already has ${n.subtasks.length} subtasks. Use --force to overwrite.`),{success:!0,data:{message:`Task ${g} already has subtasks. Expansion skipped.`,task:n,subtasksAdded:0,hasExistingSubtasks:i}};i&&S&&(t.info(`Force flag set. Clearing existing subtasks for task ${g}.`),n.subtasks=[]),JSON.parse(JSON.stringify(n));let a=n.subtasks?n.subtasks.length:0;n.subtasks||=[],s(h,e,d,p);let o=D(t),c;try{c=w(),c||x();let e=await we(h,g,_,y,b,{complexityReportPath:m,mcpLog:o,session:r,projectRoot:d,commandName:`expand-task`,outputType:`mcp`,tag:p},S);!c&&w()&&v();let n=f(h,d).tasks.find(e=>e.id===g),s=n.subtasks?n.subtasks.length-a:0;return t.info(`Successfully expanded task ${g} with ${s} new subtasks`),{success:!0,data:{task:e.task,subtasksAdded:s,hasExistingSubtasks:i,telemetryData:e.telemetryData,tagInfo:e.tagInfo}}}catch(e){return!c&&w()&&v(),t.error(`Error expanding task: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message||`Failed to expand task`}}}}catch(e){return t.error(`Error expanding task: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message||`Failed to expand task`}}}}async function Xt(e,t){let{tasksJsonPath:n,projectRoot:r,tag:i}=e;try{if(t.info(`Fixing invalid dependencies in tasks: ${n}`),!n)return t.error(`fixDependenciesDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};let e=n;return N.existsSync(e)?(x(),await Fe(e,{projectRoot:r,tag:i}),v(),{success:!0,data:{message:`Dependencies fixed successfully`,tasksPath:e,tag:i||`master`}}):{success:!1,error:{code:`FILE_NOT_FOUND`,message:`Tasks file not found at ${e}`}}}catch(e){return v(),t.error(`Error fixing dependencies: ${e.message}`),{success:!1,error:{code:`FIX_DEPENDENCIES_ERROR`,message:e.message}}}}async function Zt(e,t,n={}){let{session:r}=n,i=Ze.homedir();t.info(`Args received in direct function: ${JSON.stringify(e)}`);let a=e.projectRoot;if(!a||typeof a!=`string`||a===`/`||a===i)return t.error(`Invalid target directory received from tool layer: '${a}'`),{success:!1,error:{code:`INVALID_TARGET_DIRECTORY`,message:`Cannot initialize project: Invalid target directory '${a}' received. Please ensure a valid workspace/folder is open or specified.`,details:`Received args.projectRoot: ${e.projectRoot}`}};t.info(`Validated target directory for initialization: ${a}`);let o=process.cwd(),s,c=!1,l=null;t.info(`Temporarily changing CWD to ${a} for initialization.`),process.chdir(a),x();try{let n={addAliases:e.addAliases,initGit:e.initGit,storeTasksInGit:e.storeTasksInGit,skipInstall:e.skipInstall,yes:!0};Array.isArray(e.rules)&&e.rules.length>0?(n.rules=e.rules,n.rulesExplicitlyProvided=!0,t.info(`Including rules: ${e.rules.join(`, `)}`)):(n.rules=[`cursor`],n.rulesExplicitlyProvided=!0,t.info(`No rule profiles specified, defaulting to: Cursor`)),t.info(`Initializing project with options: ${JSON.stringify(n)}`),s={message:`Project initialized successfully.`,next_step:`Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files (tasks folder will be created when parse-prd is run). The parse-prd tool will require a prd.txt file as input (typically found in .taskmaster/docs/ directory). You can create a prd.txt file by asking the user about their idea, and then using the .taskmaster/templates/example_prd.txt file as a template to generate a prd.txt file in .taskmaster/docs/. You may skip all of this if the user already has a prd.txt file. You can THEN use the parse-prd tool to create the tasks. So: step 1 after initialization is to create a prd.txt file in .taskmaster/docs/prd.txt or confirm the user already has one. Step 2 is to use the parse-prd tool to create the tasks. Do not bother looking for tasks after initialization, just use the parse-prd tool to create the tasks after creating a prd.txt from which to parse the tasks. You do NOT need to reinitialize the project to parse-prd.`,...await qe(n)},c=!0,t.info(`Project initialization completed successfully in ${a}.`)}catch(e){t.error(`Core initializeProject failed: ${e.message}`),l={code:`INITIALIZATION_FAILED`,message:`Core project initialization failed: ${e.message}`,details:e.stack},c=!1}finally{v(),t.info(`Restoring original CWD: ${o}`),process.chdir(o)}return c?{success:!0,data:s}:{success:!1,error:l}}async function Qt(e,t,n={}){let{tasksJsonPath:r,showMetadata:i=!1,projectRoot:a}=e,{session:o}=n;x();let s=D(t);try{if(!r)return t.error(`listTagsDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};t.info(`Listing all tags`);let e=await Ie(r,{showMetadata:i},{session:o,mcpLog:s,projectRoot:a},`json`),n=e.tags.map(e=>{let t=e.tasks||[],n=t.reduce((e,t)=>{let n=t.status||`pending`;return e[n]=(e[n]||0)+1,e},{}),r=t.reduce((e,t)=>(t.subtasks&&t.subtasks.length>0&&(e.totalSubtasks+=t.subtasks.length,t.subtasks.forEach(t=>{let n=t.status||`pending`;e.subtasksByStatus[n]=(e.subtasksByStatus[n]||0)+1})),e),{totalSubtasks:0,subtasksByStatus:{}});return{name:e.name,isCurrent:e.isCurrent,taskCount:t.length,completedTasks:e.completedTasks,statusBreakdown:n,subtaskCounts:r,created:e.created,description:e.description}});return v(),{success:!0,data:{tags:n,currentTag:e.currentTag,totalTags:e.totalTags,message:`Found ${e.totalTags} tag(s)`}}}catch(e){return v(),t.error(`Error in listTagsDirect: ${e.message}`),{success:!1,error:{code:e.code||`LIST_TAGS_ERROR`,message:e.message}}}}const $t=[`main`,`research`,`fallback`];function en(e){return u.find(t=>e[t])}async function tn(e,t){for(let n of $t){let r=`set${n.charAt(0).toUpperCase()+n.slice(1)}`;if(e[r]){let i=en(e);return await Ee(n,e[r],{...t,providerHint:i,...e.baseURL&&{baseURL:e.baseURL}})}}return null}async function nn(e,t,n={}){let{session:r}=n,{projectRoot:i}=e,a=D(t);if(t.info(`Executing models_direct with args: ${JSON.stringify(e)}`),t.info(`Using project root: ${i}`),u.filter(t=>e[t]).length>1)return t.error(`Error: Cannot use multiple custom provider flags simultaneously.`),{success:!1,error:{code:`INVALID_ARGS`,message:`Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, vertex, or openai-compatible.`}};try{x();try{return e.listAvailableModels===!0?await Re({session:r,mcpLog:a,projectRoot:i}):await tn(e,{session:r,mcpLog:a,projectRoot:i})||await Me({session:r,mcpLog:a,projectRoot:i})}finally{v()}}catch(e){return t.error(`Error in models_direct: ${e.message}`),{success:!1,error:{code:`DIRECT_FUNCTION_ERROR`,message:e.message,details:e.stack}}}}async function rn(e,t,n={}){let{session:r}=n,{projectRoot:i}=e;if(t.info(`moveTaskCrossTagDirect called with args: ${JSON.stringify(e)}`),!e.sourceIds)return{success:!1,error:{message:`Source IDs are required`,code:`MISSING_SOURCE_IDS`}};if(!e.sourceTag)return{success:!1,error:{message:`Source tag is required for cross-tag moves`,code:`MISSING_SOURCE_TAG`}};if(!e.targetTag)return{success:!1,error:{message:`Target tag is required for cross-tag moves`,code:`MISSING_TARGET_TAG`}};if(e.sourceTag===e.targetTag)return{success:!1,error:{message:`Source and target tags are the same ("${e.sourceTag}")`,code:`SAME_SOURCE_TARGET_TAG`,suggestions:[`Use different tags for cross-tag moves`,`Use within-tag move: task-master move --from=<id> --to=<id> --tag=<tag>`,`Check available tags: task-master tags`]}};try{let n=e.tasksJsonPath||e.file;if(!n){if(!e.projectRoot)return{success:!1,error:{message:`Project root is required if tasksJsonPath is not provided`,code:`MISSING_PROJECT_ROOT`}};n=E(e,t)}x();try{let t=e.sourceIds.split(`,`).map(e=>e.trim()),r={withDependencies:e.withDependencies||!1,ignoreDependencies:e.ignoreDependencies||!1};return{success:!0,data:{...await le(n,t,e.sourceTag,e.targetTag,r,{projectRoot:i}),message:`Successfully moved ${t.length} task(s) from "${e.sourceTag}" to "${e.targetTag}"`,moveOptions:r,sourceTag:e.sourceTag,targetTag:e.targetTag}}}finally{v()}}catch(e){t.error(`Failed to move tasks between tags: ${e.message}`),t.error(`Error code: ${e.code}, Error name: ${e.name}`);let n=`MOVE_TASK_CROSS_TAG_ERROR`,r=[];return e.code===`CROSS_TAG_DEPENDENCY_CONFLICTS`?(n=`CROSS_TAG_DEPENDENCY_CONFLICT`,r=[`Use --with-dependencies to move dependent tasks together`,`Use --ignore-dependencies to break cross-tag dependencies`,`Run task-master validate-dependencies to check for issues`,`Move dependencies first, then move the main task`]):e.code===`CANNOT_MOVE_SUBTASK`?(n=`SUBTASK_MOVE_RESTRICTION`,r=[`Promote subtask to full task first: task-master remove-subtask --id=<subtaskId> --convert`,`Move the parent task with all subtasks using --with-dependencies`]):e.code===`TASK_NOT_FOUND`||e.code===`INVALID_SOURCE_TAG`||e.code===`INVALID_TARGET_TAG`?(n=`TAG_OR_TASK_NOT_FOUND`,r=[`Check available tags: task-master tags`,`Verify task IDs exist: task-master list`,`Check task details: task-master show <id>`]):e.message.includes(`cross-tag dependency conflicts`)?(n=`CROSS_TAG_DEPENDENCY_CONFLICT`,r=[`Use --with-dependencies to move dependent tasks together`,`Use --ignore-dependencies to break cross-tag dependencies`,`Run task-master validate-dependencies to check for issues`,`Move dependencies first, then move the main task`]):e.message.includes(`Cannot move subtask`)?(n=`SUBTASK_MOVE_RESTRICTION`,r=[`Promote subtask to full task first: task-master remove-subtask --id=<subtaskId> --convert`,`Move the parent task with all subtasks using --with-dependencies`]):e.message.includes(`not found`)?(n=`TAG_OR_TASK_NOT_FOUND`,r=[`Check available tags: task-master tags`,`Verify task IDs exist: task-master list`,`Check task details: task-master show <id>`]):(e.code===`TASK_ALREADY_EXISTS`||e.message?.includes(`already exists in target tag`))&&(n=`TASK_ALREADY_EXISTS`,r=[`Choose a different target tag without conflicting IDs`,`Move a different set of IDs (avoid existing ones)`,`If needed, move within-tag to a new ID first, then cross-tag move`]),{success:!1,error:{message:e.message,code:n,suggestions:r}}}}async function an(e,t,n={}){let{session:r}=n,{projectRoot:i,tag:a}=e;if(!e.sourceId)return{success:!1,error:{message:`Source ID is required`,code:`MISSING_SOURCE_ID`}};if(!e.destinationId)return{success:!1,error:{message:`Destination ID is required`,code:`MISSING_DESTINATION_ID`}};try{let n=e.tasksJsonPath||e.file;if(!n){if(!e.projectRoot)return{success:!1,error:{message:`Project root is required if tasksJsonPath is not provided`,code:`MISSING_PROJECT_ROOT`}};n=E(e,t)}x();let r=e.generateFiles!==!1,o=await fe(n,e.sourceId,e.destinationId,r,{projectRoot:i,tag:a});return v(),{success:!0,data:{...o,message:`Successfully moved task/subtask ${e.sourceId} to ${e.destinationId}`}}}catch(e){return v(),t.error(`Failed to move task: ${e.message}`),{success:!1,error:{message:e.message,code:`MOVE_TASK_ERROR`}}}}async function on(e,t,n={}){let{tasksJsonPath:r,reportPath:i,projectRoot:a,tag:o}=e,{session:s}=n;if(!r)return t.error(`nextTaskDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};let c=async()=>{try{x(),t.info(`Finding next task from ${r}`);let e=f(r,a,o);if(!e||!e.tasks)return v(),{success:!1,error:{code:`INVALID_TASKS_FILE`,message:`No valid tasks found in ${r}`}};let n=T(i),s=ke(e.tasks,n);if(!s)return t.info(`No eligible next task found. All tasks are either completed or have unsatisfied dependencies`),{success:!0,data:{message:`No eligible next task found. All tasks are either completed or have unsatisfied dependencies`,nextTask:null}};let c=typeof s.id==`string`&&s.id.includes(`.`),l=c?`subtask`:`task`,u=c?`Subtasks can be updated with timestamped details as you implement them. This is useful for tracking progress, marking milestones and insights (of successful or successive falures in attempting to implement the subtask). Research can be used when updating the subtask to collect up-to-date information, and can be helpful to solve a repeating problem the agent is unable to solve. It is a good idea to get-task the parent task to collect the overall context of the task, and to get-task the subtask to collect the specific details of the subtask.`:`Tasks can be updated to reflect a change in the direction of the task, or to reformulate the task per your prompt. Research can be used when updating the task to collect up-to-date information. It is best to update subtasks as you work on them, and to update the task for more high-level changes that may affect pending subtasks or the general direction of the task.`;return v(),t.info(`Successfully found next task ${s.id}: ${s.title}. Is subtask: ${c}`),{success:!0,data:{nextTask:s,isSubtask:c,nextSteps:`When ready to work on the ${l}, use set-status to set the status to "in progress" ${u}`}}}catch(e){return v(),t.error(`Error finding next task: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message||`Failed to find next task`}}}};try{let e=await c();return t.info(`nextTaskDirect completed.`),e}catch(e){return t.error(`Unexpected error during nextTask: ${e.message}`),{success:!1,error:{code:`UNEXPECTED_ERROR`,message:e.message}}}}async function sn(e,t,n={}){let{session:r,reportProgress:i}=n,{input:a,output:s,numTasks:c,force:l,append:u,research:d,projectRoot:f,tag:p}=e,m=D(t);if(!f)return m.error(`parsePRDDirect requires a projectRoot argument.`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`projectRoot is required.`}};let h;if(a)try{h=be({input:a,projectRoot:f},r)}catch(e){return m.error(`Error resolving PRD path: ${e.message}`),{success:!1,error:{code:`FILE_NOT_FOUND`,message:e.message}}}else return m.error(`parsePRDDirect called without input path`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`Input path is required`}};let g=s?P.isAbsolute(s)?s:P.resolve(f,s):Se(o,e)||P.resolve(f,o);if(!N.existsSync(h)){let e=`Input PRD file not found at resolved path: ${h}`;return m.error(e),{success:!1,error:{code:`FILE_NOT_FOUND`,message:e}}}let y=P.dirname(g);try{N.existsSync(y)||(m.info(`Creating output directory: ${y}`),N.mkdirSync(y,{recursive:!0}))}catch(e){let t=`Failed to create output directory ${y}: ${e.message}`;return m.error(t),{success:!1,error:{code:`DIRECTORY_CREATE_FAILED`,message:t}}}let b=_(f);c&&(b=typeof c==`string`?parseInt(c,10):c,(Number.isNaN(b)||b<0)&&(b=_(f),m.warn(`Invalid numTasks value: ${c}. Using default: ${b}`))),u&&(m.info(`Append mode enabled.`),l&&m.warn(`Both --force and --append flags were provided. --force takes precedence; append mode will be ignored.`)),d&&m.info(`Research mode enabled. Using Perplexity AI for enhanced PRD analysis.`),m.info(`Parsing PRD via direct function. Input: ${h}, Output: ${g}, NumTasks: ${b}, Force: ${l}, Append: ${u}, Research: ${d}, ProjectRoot: ${f}`);let S=w();S||x();try{let e=await _e(h,g,b,{session:r,mcpLog:m,projectRoot:f,tag:p,force:l,append:u,research:d,reportProgress:i,commandName:`parse-prd`,outputType:`mcp`},`json`);if(e&&e.success){let t=`Successfully parsed PRD and generated tasks in ${e.tasksPath}`;return m.success(t),{success:!0,data:{message:t,outputPath:e.tasksPath,telemetryData:e.telemetryData,tagInfo:e.tagInfo}}}else return m.error(`Core parsePRD function did not return a successful structure.`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e?.message||`Core function failed to parse PRD or returned unexpected result.`}}}catch(e){return m.error(`Error executing core parsePRD: ${e.message}`),{success:!1,error:{code:`PARSE_PRD_CORE_ERROR`,message:e.message||`Unknown error parsing PRD`}}}finally{!S&&w()&&v()}}async function cn(e,t){let{tasksJsonPath:n,id:r,dependsOn:i,projectRoot:a,tag:o}=e;try{if(t.info(`Removing dependency with args: ${JSON.stringify(e)}`),!n)return t.error(`removeDependencyDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!r)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Task ID (id) is required`}};if(!i)return{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Dependency ID (dependsOn) is required`}};let s=n,c=r&&r.includes&&r.includes(`.`)?r:parseInt(r,10),l=i&&i.includes&&i.includes(`.`)?i:parseInt(i,10);return t.info(`Removing dependency: task ${c} no longer depends on ${l}`),x(),await Le(s,c,l,{projectRoot:a,tag:o}),v(),{success:!0,data:{message:`Successfully removed dependency: Task ${c} no longer depends on ${l}`,taskId:c,dependencyId:l}}}catch(e){return v(),t.error(`Error in removeDependencyDirect: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}}async function ln(e,t){let{tasksJsonPath:n,id:r,convert:i,skipGenerate:a,projectRoot:o,tag:s}=e;try{if(x(),t.info(`Removing subtask with args: ${JSON.stringify(e)}`),!n)return t.error(`removeSubtaskDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!r)return v(),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Subtask ID is required and must be in format "parentId.subtaskId"`}};if(!r.includes(`.`))return v(),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Invalid subtask ID format: ${r}. Expected format: "parentId.subtaskId"`}};let c=n,l=i===!0,u=!a;t.info(`Removing subtask ${r} (convertToTask: ${l}, generateFiles: ${u})`);let d=await he(c,r,l,u,{projectRoot:o,tag:s});return v(),l&&d?{success:!0,data:{message:`Subtask ${r} successfully converted to task #${d.id}`,task:d}}:{success:!0,data:{message:`Subtask ${r} successfully removed`}}}catch(e){return v(),t.error(`Error in removeSubtaskDirect: ${e.message}`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e.message}}}}async function un(e,t,n={}){let{tasksJsonPath:r,id:i,projectRoot:a,tag:o}=e,{session:s}=n;try{if(!r)return t.error(`removeTaskDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i)return t.error(`Task ID is required`),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:`Task ID is required`}};let e=i.split(`,`).map(e=>e.trim());t.info(`Removing ${e.length} task(s) with ID(s): ${e.join(`, `)} from ${r}${o?` in tag '${o}'`:``}`);let n=f(r,a,o);if(!n||!n.tasks)return{success:!1,error:{code:`INVALID_TASKS_FILE`,message:`No valid tasks found in ${r}${o?` for tag '${o}'`:``}`}};let s=e.filter(e=>!me(n.tasks,e));if(s.length>0)return{success:!1,error:{code:`INVALID_TASK_ID`,message:`The following tasks were not found${o?` in tag '${o}'`:``}: ${s.join(`, `)}`}};x();try{let n=await Ne(r,i,{projectRoot:a,tag:o});return n.success?(t.info(`Successfully removed ${n.removedTasks.length} task(s)`),{success:!0,data:{totalTasks:e.length,successful:n.removedTasks.length,failed:e.length-n.removedTasks.length,removedTasks:n.removedTasks,message:n.message,tasksPath:r,tag:o}}):{success:!1,error:{code:`REMOVE_TASK_ERROR`,message:n.error||`Failed to remove tasks`}}}finally{v()}}catch(e){return v(),t.error(`Unexpected error in removeTaskDirect: ${e.message}`),{success:!1,error:{code:`UNEXPECTED_ERROR`,message:e.message}}}}async function dn(e,t,n={}){let{tasksJsonPath:r,oldName:i,newName:a,projectRoot:o}=e,{session:s}=n;x();let c=D(t);try{if(!r)return t.error(`renameTagDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i||typeof i!=`string`)return t.error(`Missing required parameter: oldName`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Old tag name is required and must be a string`}};if(!a||typeof a!=`string`)return t.error(`Missing required parameter: newName`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`New tag name is required and must be a string`}};t.info(`Renaming tag from "${i}" to "${a}"`);let e=await Oe(r,i,a,{},{session:s,mcpLog:c,projectRoot:o},`json`);return v(),{success:!0,data:{oldName:e.oldName,newName:e.newName,renamed:e.renamed,taskCount:e.taskCount,wasCurrentTag:e.wasCurrentTag,message:`Successfully renamed tag from "${e.oldName}" to "${e.newName}"`}}}catch(e){return v(),t.error(`Error in renameTagDirect: ${e.message}`),{success:!1,error:{code:e.code||`RENAME_TAG_ERROR`,message:e.message}}}}async function fn(e,t,n={}){let{query:r,taskIds:i,filePaths:a,customContext:o,includeProjectTree:s=!1,detailLevel:c=`medium`,saveTo:l,saveToFile:u=!1,projectRoot:d,tag:f}=e,{session:p}=n;x();let m=D(t);try{if(!r||typeof r!=`string`||r.trim().length===0)return t.error(`Missing or invalid required parameter: query`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`The query parameter is required and must be a non-empty string`}};let e=i?i.split(`,`).map(e=>e.trim()).filter(e=>e.length>0):[],n=a?a.split(`,`).map(e=>e.trim()).filter(e=>e.length>0):[],h=[`low`,`medium`,`high`];if(!h.includes(c))return t.error(`Invalid detail level: ${c}`),v(),{success:!1,error:{code:`INVALID_PARAMETER`,message:`Detail level must be one of: ${h.join(`, `)}`}};t.info(`Performing research query: "${r.substring(0,100)}${r.length>100?`...`:``}", taskIds: [${e.join(`, `)}], filePaths: [${n.join(`, `)}], detailLevel: ${c}, includeProjectTree: ${s}, projectRoot: ${d}`);let g={taskIds:e,filePaths:n,customContext:o||``,includeProjectTree:s,detailLevel:c,projectRoot:d,tag:f,saveToFile:u},_={session:p,mcpLog:m,commandName:`research`,outputType:`mcp`},y=await oe(r.trim(),g,_,`json`,!1);if(l)try{let e=l.includes(`.`),n=`## Research Query: ${r.trim()}
|
|
37
|
+
|
|
38
|
+
**Detail Level:** ${y.detailLevel}
|
|
39
|
+
**Context Size:** ${y.contextSize} characters
|
|
40
|
+
**Timestamp:** ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}
|
|
41
|
+
|
|
42
|
+
### Results
|
|
43
|
+
|
|
44
|
+
${y.result}`;if(e){let{updateSubtaskById:e}=await import(`./update-subtask-by-id-DiWMqGfw.js`);await e(P.join(d,`.taskmaster`,`tasks`,`tasks.json`),l,n,!1,{session:p,mcpLog:m,commandName:`research-save`,outputType:`mcp`,projectRoot:d,tag:f},`json`),t.info(`Research saved to subtask ${l}`)}else{let e=(await import(`./update-task-by-id-eyL-PNVX.js`)).default,r=parseInt(l,10);await e(P.join(d,`.taskmaster`,`tasks`,`tasks.json`),r,n,!1,{session:p,mcpLog:m,commandName:`research-save`,outputType:`mcp`,projectRoot:d,tag:f},`json`,!0),t.info(`Research saved to task ${l}`)}}catch(e){t.warn(`Error saving research to task/subtask: ${e.message}`)}return v(),{success:!0,data:{query:y.query,result:y.result,contextSize:y.contextSize,contextTokens:y.contextTokens,tokenBreakdown:y.tokenBreakdown,systemPromptTokens:y.systemPromptTokens,userPromptTokens:y.userPromptTokens,totalInputTokens:y.totalInputTokens,detailLevel:y.detailLevel,telemetryData:y.telemetryData,tagInfo:y.tagInfo,savedFilePath:y.savedFilePath}}}catch(e){return v(),t.error(`Error in researchDirect: ${e.message}`),{success:!1,error:{code:e.code||`RESEARCH_ERROR`,message:e.message}}}}async function pn(e,t,n={}){let{tasksJsonPath:r,id:i,strength:a=`regular`,prompt:o,research:s=!1,projectRoot:c,tag:l}=e,{session:u}=n;x();let d=D(t);try{if(!r)return t.error(`scopeDownDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i)return t.error(`Missing required parameter: id`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`The id parameter is required for scoping down tasks`}};let e=i.split(`,`).map(e=>parseInt(e.trim(),10));t.info(`Scoping down tasks: ${e.join(`, `)}, strength: ${a}, research: ${s}`);let n=await ce(r,e,a,o,{session:u,mcpLog:d,projectRoot:c,commandName:`scope-down`,outputType:`mcp`,tag:l,research:s},`json`);return v(),{success:!0,data:{updatedTasks:n.updatedTasks,tasksUpdated:n.updatedTasks.length,message:`Successfully scoped down ${n.updatedTasks.length} task(s)`,telemetryData:n.telemetryData}}}catch(e){return v(),t.error(`Error in scopeDownDirect: ${e.message}`),{success:!1,error:{code:e.code||`SCOPE_DOWN_ERROR`,message:e.message}}}}async function mn(e,t,n={}){let{tasksJsonPath:r,id:i,strength:a=`regular`,prompt:o,research:s=!1,projectRoot:c,tag:l}=e,{session:u}=n;x();let d=D(t);try{if(!r)return t.error(`scopeUpDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i)return t.error(`Missing required parameter: id`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`The id parameter is required for scoping up tasks`}};let e=i.split(`,`).map(e=>parseInt(e.trim(),10));t.info(`Scoping up tasks: ${e.join(`, `)}, strength: ${a}, research: ${s}`);let n=await ge(r,e,a,o,{session:u,mcpLog:d,projectRoot:c,commandName:`scope-up`,outputType:`mcp`,tag:l,research:s},`json`);return v(),{success:!0,data:{updatedTasks:n.updatedTasks,tasksUpdated:n.updatedTasks.length,message:`Successfully scoped up ${n.updatedTasks.length} task(s)`,telemetryData:n.telemetryData}}}catch(e){return v(),t.error(`Error in scopeUpDirect: ${e.message}`),{success:!1,error:{code:e.code||`SCOPE_UP_ERROR`,message:e.message}}}}async function hn(e,t,n={}){let{session:r}=n,{tasksJsonPath:i,id:a,prompt:o,research:s,metadata:c,projectRoot:l,tag:u}=e,d=D(t);try{if(d.info(`Updating subtask by ID via direct function. ID: ${a}, ProjectRoot: ${l}`),!i){let e=`tasksJsonPath is required but was not provided.`;return d.error(e),{success:!1,error:{code:`MISSING_ARGUMENT`,message:e}}}if(!a||typeof a!=`string`||!a.trim()){let e=`Subtask ID cannot be empty.`;return d.error(e),{success:!1,error:{code:`INVALID_SUBTASK_ID`,message:e}}}if(!o&&!c){let e=`No prompt or metadata specified. Please provide information to append or metadata to update.`;return d.error(e),{success:!1,error:{code:`MISSING_PROMPT`,message:e}}}let e=String(a).trim(),n=i,f=s===!0;t.info(`Updating subtask with ID ${e} with prompt "${o||`(metadata-only)`}" and research: ${f}`);let p=w();p||x();try{let t=await He(n,e,o,f,{mcpLog:d,session:r,projectRoot:l,tag:u,commandName:`update-subtask`,outputType:`mcp`,metadata:c},`json`);if(!t||t.updatedSubtask===null){let e=`Subtask ${a} or its parent task not found.`;return d.error(e),{success:!1,error:{code:`SUBTASK_NOT_FOUND`,message:e}}}let i=e.split(`.`)[0],s=`Successfully updated subtask with ID ${e}`;return d.success(s),{success:!0,data:{message:`Successfully updated subtask with ID ${e}`,subtaskId:e,parentId:i,subtask:t.updatedSubtask,tasksPath:n,useResearch:f,telemetryData:t.telemetryData,tagInfo:t.tagInfo}}}catch(e){return d.error(`Error updating subtask by ID: ${e.message}`),{success:!1,error:{code:`UPDATE_SUBTASK_CORE_ERROR`,message:e.message||`Unknown error updating subtask`}}}finally{!p&&w()&&v()}}catch(e){return d.error(`Setup error in updateSubtaskByIdDirect: ${e.message}`),w()&&v(),{success:!1,error:{code:`DIRECT_FUNCTION_SETUP_ERROR`,message:e.message||`Unknown setup error`}}}}async function gn(e,t,n={}){let{session:r}=n,{tasksJsonPath:i,id:a,prompt:o,research:s,append:c,metadata:l,projectRoot:u,tag:d}=e,f=D(t);try{if(f.info(`Updating task by ID via direct function. ID: ${a}, ProjectRoot: ${u}`),!a){let e=`No task ID specified. Please provide a task ID to update.`;return f.error(e),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:e}}}if(!o&&!l){let e=`No prompt or metadata specified. Please provide a prompt with new information or metadata for the task update.`;return f.error(e),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:e}}}let t;if(typeof a==`string`)t=a;else if(typeof a==`number`)t=String(a);else{let e=`Invalid task ID type: ${typeof a}. Task ID must be a string or number.`;return f.error(e),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:e}}}let n=i||E({projectRoot:u,file:e.file},f);if(!n){let e=`tasks.json path could not be resolved.`;return f.error(e),{success:!1,error:{code:`INPUT_VALIDATION_ERROR`,message:e}}}let p=s===!0;f.info(`Updating task with ID ${t} with prompt "${o||`(metadata-only)`}" and research: ${p}`);let m=w();m||x();try{let e=await se(n,t,o,p,{mcpLog:f,session:r,projectRoot:u,tag:d,commandName:`update-task`,outputType:`mcp`,metadata:l},`json`,c||!1);if(!e||e.updatedTask===null){let n=`Task ${t} was not updated (likely already completed).`;return f.info(n),{success:!0,data:{message:n,taskId:t,updated:!1,telemetryData:e?.telemetryData,tagInfo:e?.tagInfo}}}let i=`Successfully updated task with ID ${t} based on the prompt`;return f.info(i),{success:!0,data:{message:i,taskId:t,tasksPath:n,useResearch:p,updated:!0,updatedTask:e.updatedTask,telemetryData:e.telemetryData,tagInfo:e.tagInfo}}}catch(e){return f.error(`Error updating task by ID: ${e.message}`),{success:!1,error:{code:`UPDATE_TASK_CORE_ERROR`,message:e.message||`Unknown error updating task`}}}finally{!m&&w()&&v()}}catch(e){return f.error(`Setup error in updateTaskByIdDirect: ${e.message}`),w()&&v(),{success:!1,error:{code:`DIRECT_FUNCTION_SETUP_ERROR`,message:e.message||`Unknown setup error`}}}}async function _n(e,t,n={}){let{session:r}=n,{from:i,prompt:a,research:o,tasksJsonPath:s,projectRoot:c,tag:l}=e,u=D(t);if(!c)return u.error(`updateTasksDirect requires a projectRoot argument.`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`projectRoot is required.`}};if(!i)return u.error(`updateTasksDirect called without from ID`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`Starting task ID (from) is required`}};if(!a)return u.error(`updateTasksDirect called without prompt`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`Update prompt is required`}};u.info(`Updating tasks via direct function. From: ${i}, Research: ${o}, File: ${s}, ProjectRoot: ${c}`),x();try{let e=await ye(s,i,a,o,{session:r,mcpLog:u,projectRoot:c,tag:l},`json`);return e&&e.success&&Array.isArray(e.updatedTasks)?(u.success(`Successfully updated ${e.updatedTasks.length} tasks.`),{success:!0,data:{message:`Successfully updated ${e.updatedTasks.length} tasks.`,tasksPath:s,updatedCount:e.updatedTasks.length,telemetryData:e.telemetryData,tagInfo:e.tagInfo}}):(u.error(`Core updateTasks function did not return a successful structure.`),{success:!1,error:{code:`CORE_FUNCTION_ERROR`,message:e?.message||`Core function failed to update tasks or returned unexpected result.`}})}catch(e){return u.error(`Error executing core updateTasks: ${e.message}`),{success:!1,error:{code:`UPDATE_TASKS_CORE_ERROR`,message:e.message||`Unknown error updating tasks`}}}finally{v()}}async function vn(e,t,n={}){let{tasksJsonPath:r,name:i,projectRoot:a}=e,{session:o}=n;x();let s=D(t);try{if(!r)return t.error(`useTagDirect called without tasksJsonPath`),v(),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};if(!i||typeof i!=`string`)return t.error(`Missing required parameter: name`),v(),{success:!1,error:{code:`MISSING_PARAMETER`,message:`Tag name is required and must be a string`}};t.info(`Switching to tag: ${i}`);let e=await Ae(r,i,{},{session:o,mcpLog:s,projectRoot:a},`json`);return v(),{success:!0,data:{tagName:e.currentTag,switched:e.switched,previousTag:e.previousTag,taskCount:e.taskCount,message:`Successfully switched to tag "${e.currentTag}"`}}}catch(e){return v(),t.error(`Error in useTagDirect: ${e.message}`),{success:!1,error:{code:e.code||`USE_TAG_ERROR`,message:e.message}}}}async function yn(e,t){let{tasksJsonPath:n,projectRoot:r,tag:i}=e;if(!n)return t.error(`validateDependenciesDirect called without tasksJsonPath`),{success:!1,error:{code:`MISSING_ARGUMENT`,message:`tasksJsonPath is required`}};try{t.info(`Validating dependencies in tasks: ${n}`);let e=n;return N.existsSync(e)?(x(),await je(e,{projectRoot:r,tag:i}),v(),{success:!0,data:{message:`Dependencies validated successfully`,tasksPath:e}}):{success:!1,error:{code:`FILE_NOT_FOUND`,message:`Tasks file not found at ${e}`}}}catch(e){return v(),t.error(`Error validating dependencies: ${e.message}`),{success:!1,error:{code:`VALIDATION_ERROR`,message:e.message}}}}function bn(e){e.addTool({name:`add_dependency`,description:`Add a dependency relationship between two tasks`,parameters:I.object({id:I.string().describe(`ID of task that will depend on another task`),dependsOn:I.string().describe(`ID of task that will become a dependency`),file:I.string().optional().describe(`Absolute path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Add Dependency`,destructiveHint:!0},execute:Q(`add-dependency`,async(e,{log:t,session:n})=>{try{t.info(`Adding dependency for task ${e.id} to depend on ${e.dependsOn}`);let n=i({projectRoot:e.projectRoot,tag:e.tag}),r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let a=await zt({tasksJsonPath:r,id:e.id,dependsOn:e.dependsOn,projectRoot:e.projectRoot,tag:n},t);return a.success?t.info(`Successfully added dependency: ${a.data.message}`):t.error(`Failed to add dependency: ${a.error.message}`),Y({result:a,log:t,errorPrefix:`Error adding dependency`,projectRoot:e.projectRoot,tag:n})}catch(e){return t.error(`Error in addDependency tool: ${e.message}`),J(e.message)}})})}function xn(e){e.addTool({name:`add_subtask`,description:`Add a subtask to an existing task`,parameters:I.object({id:I.string().describe(`Parent task ID (required)`),taskId:I.string().optional().describe(`Existing task ID to convert to subtask`),title:I.string().optional().describe(`Title for the new subtask (when creating a new subtask)`),description:I.string().optional().describe(`Description for the new subtask`),details:I.string().optional().describe(`Implementation details for the new subtask`),status:I.string().optional().describe(`Status for the new subtask (default: 'pending')`),dependencies:I.string().optional().describe(`Comma-separated list of dependency IDs for the new subtask`),file:I.string().optional().describe(`Absolute path to the tasks file (default: tasks/tasks.json)`),skipGenerate:I.boolean().optional().describe(`Skip regenerating task files`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Add Subtask`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{let r=i({projectRoot:e.projectRoot,tag:e.tag});t.info(`Adding subtask with args: ${JSON.stringify(e)}`);let a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let o=await Bt({tasksJsonPath:a,id:e.id,taskId:e.taskId,title:e.title,description:e.description,details:e.details,status:e.status,dependencies:e.dependencies,skipGenerate:e.skipGenerate,projectRoot:e.projectRoot,tag:r},t,{session:n});return o.success?t.info(`Subtask added successfully: ${o.data.message}`):t.error(`Failed to add subtask: ${o.error.message}`),Y({result:o,log:t,errorPrefix:`Error adding subtask`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in addSubtask tool: ${e.message}`),J(e.message)}})})}function Sn(e){e.addTool({name:`add_tag`,description:`Create a new tag for organizing tasks in different contexts`,parameters:I.object({name:I.string().describe(`Name of the new tag to create`),copyFromCurrent:I.boolean().optional().describe(`Whether to copy tasks from the current tag (default: false)`),copyFromTag:I.string().optional().describe(`Specific tag to copy tasks from`),fromBranch:I.boolean().optional().describe(`Create tag name from current git branch (ignores name parameter)`),description:I.string().optional().describe(`Optional description for the tag`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Add Tag`,destructiveHint:!1},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting add-tag with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await Vt({tasksJsonPath:r,name:e.name,copyFromCurrent:e.copyFromCurrent,copyFromTag:e.copyFromTag,fromBranch:e.fromBranch,description:e.description,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error creating tag`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in add-tag tool: ${e.message}`),J(e.message)}})})}function Cn(e){e.addTool({name:`add_task`,description:`Add a new task using AI`,parameters:I.object({prompt:I.string().optional().describe(`Description of the task to add (required if not using manual fields)`),title:I.string().optional().describe(`Task title (for manual task creation)`),description:I.string().optional().describe(`Task description (for manual task creation)`),details:I.string().optional().describe(`Implementation details (for manual task creation)`),testStrategy:I.string().optional().describe(`Test strategy (for manual task creation)`),dependencies:I.string().optional().describe(`Comma-separated list of task IDs this task depends on`),priority:I.string().optional().describe(`Task priority (high, medium, low)`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`),research:I.boolean().optional().describe(`Whether to use research capabilities for task creation`)}),annotations:{title:`Add Task`,destructiveHint:!1},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting add-task with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await Ht({tasksJsonPath:a,prompt:e.prompt,title:e.title,description:e.description,details:e.details,testStrategy:e.testStrategy,dependencies:e.dependencies,priority:e.priority,research:e.research,projectRoot:e.projectRoot,tag:r},t,{session:n}),log:t,errorPrefix:`Error adding task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in add-task tool: ${e.message}`),J(e.message)}})})}function wn(e){e.addTool({name:`analyze_project_complexity`,description:`Analyze task complexity and generate expansion recommendations.`,parameters:I.object({threshold:I.coerce.number().int().min(1).max(10).optional().default(5).describe(`Complexity score threshold (1-10) to recommend expansion.`),research:I.boolean().optional().default(!1).describe(`Use Perplexity AI for research-backed analysis.`),output:I.string().optional().describe(`Output file path relative to project root (default: ${c}).`),file:I.string().optional().describe(`Path to the tasks file relative to project root (default: tasks/tasks.json).`),ids:I.string().optional().describe(`Comma-separated list of task IDs to analyze specifically (e.g., "1,3,5").`),from:I.coerce.number().int().positive().optional().describe(`Starting task ID in a range to analyze.`),to:I.coerce.number().int().positive().optional().describe(`Ending task ID in a range to analyze.`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Analyze Project Complexity`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{let r=`analyze_project_complexity`;try{t.info(`Executing ${r} tool with args: ${JSON.stringify(e)}`);let a=i({projectRoot:e.projectRoot,tag:e.tag}),o;try{o=E({projectRoot:e.projectRoot,file:e.file},t),t.info(`${r}: Resolved tasks path: ${o}`)}catch(n){return t.error(`${r}: Error finding tasks.json: ${n.message}`),J(`Failed to find tasks.json within project root '${e.projectRoot}': ${n.message}`)}let s=C(e.output,{projectRoot:e.projectRoot,tag:a},t);t.info(`${r}: Report output path: ${s}`);let c=P.dirname(s);try{N.existsSync(c)||(N.mkdirSync(c,{recursive:!0}),t.info(`${r}: Created output directory: ${c}`))}catch(e){return t.error(`${r}: Failed to create output directory ${c}: ${e.message}`),J(`Failed to create output directory: ${e.message}`)}let l=await Ut({tasksJsonPath:o,outputPath:s,threshold:e.threshold,research:e.research,projectRoot:e.projectRoot,tag:a,ids:e.ids,from:e.from,to:e.to},t,{session:n});return t.info(`${r}: Direct function result: success=${l.success}`),Y({result:l,log:t,errorPrefix:`Error analyzing task complexity`,projectRoot:e.projectRoot})}catch(e){return t.error(`Critical error in ${r} tool execute: ${e.message}`),J(`Internal tool error (${r}): ${e.message}`)}})})}function Tn(e){e.addTool({name:`clear_subtasks`,description:`Clear subtasks from specified tasks`,parameters:I.object({id:I.string().optional().describe(`Task IDs (comma-separated) to clear subtasks from`),all:I.boolean().optional().describe(`Clear subtasks from all tasks`),file:I.string().optional().describe(`Absolute path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}).refine(e=>e.id||e.all,{message:`Either 'id' or 'all' parameter must be provided`,path:[`id`,`all`]}),annotations:{title:`Clear Subtasks`,destructiveHint:!0},execute:Q(`clear-subtasks`,async(e,t)=>{try{t.log.info(`Clearing subtasks with args: ${JSON.stringify(e)}`);let n=i({projectRoot:e.projectRoot,tag:e.tag}),r;try{r=E({projectRoot:e.projectRoot,file:e.file},t.log)}catch(e){return t.log.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let a=await Wt({tasksJsonPath:r,id:e.id,all:e.all,projectRoot:e.projectRoot,tag:n},t.log,{session:t.session});return a.success?t.log.info(`Subtasks cleared successfully: ${a.data.message}`):t.log.error(`Failed to clear subtasks: ${a.error.message}`),Y({result:a,log:t.log,errorPrefix:`Error clearing subtasks`,projectRoot:e.projectRoot})}catch(e){return t.log.error(`Error in clearSubtasks tool: ${e.message}`),J(e.message)}})})}function En(e){e.addTool({name:`complexity_report`,description:`Display the complexity analysis report in a readable format`,parameters:I.object({file:I.string().optional().describe(`Path to the report file (default: ${c})`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Complexity Report`,readOnlyHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Getting complexity report with args: ${JSON.stringify(e)}`);let n=m(e.projectRoot),r=Ue({projectRoot:e.projectRoot,complexityReport:e.file,tag:n},t);if(t.info(`Reading complexity report from path: `,r),!r)return J(`No complexity report found. Run task-master analyze-complexity first.`);let i=await Gt({reportPath:r},t);return i.success?t.info(`Successfully retrieved complexity report`):t.error(`Failed to retrieve complexity report: ${i.error.message}`),Y({result:i,log:t,errorPrefix:`Error retrieving complexity report`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in complexity-report tool: ${e.message}`),J(`Failed to retrieve complexity report: ${e.message}`)}})})}function Dn(e){e.addTool({name:`copy_tag`,description:`Copy an existing tag to create a new tag with all tasks and metadata`,parameters:I.object({sourceName:I.string().describe(`Name of the source tag to copy from`),targetName:I.string().describe(`Name of the new tag to create`),description:I.string().optional().describe(`Optional description for the new tag`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Copy Tag`,destructiveHint:!1},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting copy-tag with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await Kt({tasksJsonPath:r,sourceName:e.sourceName,targetName:e.targetName,description:e.description,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error copying tag`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in copy-tag tool: ${e.message}`),J(e.message)}})})}function On(e){e.addTool({name:`delete_tag`,description:`Delete an existing tag and all its tasks`,parameters:I.object({name:I.string().describe(`Name of the tag to delete`),yes:I.boolean().optional().describe(`Skip confirmation prompts (default: true for MCP)`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Delete Tag`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting delete-tag with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await qt({tasksJsonPath:r,name:e.name,yes:e.yes===void 0?!0:e.yes,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error deleting tag`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in delete-tag tool: ${e.message}`),J(e.message)}})})}function kn(e){e.addTool({name:`expand_all`,description:`Expand all pending tasks into subtasks based on complexity or defaults`,parameters:I.object({num:I.string().optional().describe(`Target number of subtasks per task (uses complexity/defaults otherwise)`),research:I.boolean().optional().describe(`Enable research-backed subtask generation (e.g., using Perplexity)`),prompt:I.string().optional().describe(`Additional context to guide subtask generation for all tasks`),force:I.boolean().optional().describe(`Force regeneration of subtasks for tasks that already have them`),file:I.string().optional().describe(`Absolute path to the tasks file in the /tasks folder inside the project root (default: tasks/tasks.json)`),projectRoot:I.string().optional().describe(`Absolute path to the project root directory (derived from session if possible)`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Expand All Tasks`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Tool expand_all execution started with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t),t.info(`Resolved tasks.json path: ${a}`)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let o=xe(null,{projectRoot:e.projectRoot,tag:r},t);return t.info(`Using complexity report path: ${o}`),Y({result:await Jt({tasksJsonPath:a,num:e.num,research:e.research,prompt:e.prompt,force:e.force,projectRoot:e.projectRoot,tag:r,complexityReportPath:o},t,{session:n}),log:t,errorPrefix:`Error expanding all tasks`,projectRoot:e.projectRoot})}catch(e){return t.error(`Unexpected error in expand_all tool execute: ${e.message}`),e.stack&&t.error(e.stack),J(`An unexpected error occurred: ${e.message}`)}})})}function An(e){e.addTool({name:`expand_task`,description:`Expand a task into subtasks for detailed implementation`,parameters:I.object({id:I.string().describe(`ID of task to expand`),num:I.string().optional().describe(`Number of subtasks to generate`),research:I.boolean().optional().default(!1).describe(`Use research role for generation`),prompt:I.string().optional().describe(`Additional context for subtask generation`),file:I.string().optional().describe(`Path to the tasks file relative to project root (e.g., tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),force:I.boolean().optional().default(!1).describe(`Force expansion even if subtasks exist`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Expand Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting expand-task with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let o=Ue({...e,tag:r},t);return Y({result:await Yt({tasksJsonPath:a,id:e.id,num:e.num,research:e.research,prompt:e.prompt,force:e.force,complexityReportPath:o,projectRoot:e.projectRoot,tag:r},t,{session:n}),log:t,errorPrefix:`Error expanding task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in expand-task tool: ${e.message}`),J(e.message)}})})}function jn(e){e.addTool({name:`fix_dependencies`,description:`Fix invalid dependencies in tasks automatically`,parameters:I.object({file:I.string().optional().describe(`Absolute path to the tasks file`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Fix Dependencies`,destructiveHint:!0},execute:Q(`fix-dependencies`,async(e,t)=>{try{t.log.info(`Fixing dependencies with args: ${JSON.stringify(e)}`);let n=i({projectRoot:e.projectRoot,tag:e.tag}),r;try{r=E({projectRoot:e.projectRoot,file:e.file},t.log)}catch(e){return t.log.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let a=await Xt({tasksJsonPath:r,projectRoot:e.projectRoot,tag:n},t.log);return a.success?t.log.info(`Successfully fixed dependencies: ${a.data.message}`):t.log.error(`Failed to fix dependencies: ${a.error.message}`),Y({result:a,log:t.log,errorPrefix:`Error fixing dependencies`,projectRoot:e.projectRoot})}catch(e){return t.log.error(`Error in fixDependencies tool: ${e.message}`),J(e.message)}})})}function Mn(e){e.addTool({name:`initialize_project`,description:`Initializes a new Task Master project structure by calling the core initialization logic. Creates necessary folders and configuration files for Task Master in the current directory.`,parameters:I.object({skipInstall:I.boolean().optional().default(!1).describe(`Skip installing dependencies automatically. Never do this unless you are sure the project is already installed.`),addAliases:I.boolean().optional().default(!0).describe(`Add shell aliases (tm, taskmaster, hamster, ham) to shell config file.`),initGit:I.boolean().optional().default(!0).describe(`Initialize Git repository in project root.`),storeTasksInGit:I.boolean().optional().default(!0).describe(`Store tasks in Git (tasks.json and tasks/ directory).`),yes:I.boolean().optional().default(!0).describe(`Skip prompts and use default values. Always set to true for MCP tools.`),projectRoot:I.string().describe(`The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.`),rules:I.array(I.enum(A)).optional().describe(`List of rule profiles to include at initialization. If omitted, defaults to Cursor profile only. Available options: ${A.join(`, `)}`)}),annotations:{title:`Initialize Project`,destructiveHint:!0},execute:Z(async(e,t)=>{let{log:n}=t,r=t.session;try{return n.info(`Executing initialize_project tool with args: ${JSON.stringify(e)}`),Y({result:await Zt(e,n,{session:r}),log:n,errorPrefix:`Initialization failed`,projectRoot:e.projectRoot})}catch(e){let t=`Project initialization tool failed: ${e.message||`Unknown error`}`;return n.error(t,e),J(t,{details:e.stack})}})})}function Nn(e){e.addTool({name:`list_tags`,description:`List all available tags with task counts and metadata`,parameters:I.object({showMetadata:I.boolean().optional().describe(`Whether to include metadata in the output (default: false)`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`List Tags`,readOnlyHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting list-tags with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await Qt({tasksJsonPath:r,showMetadata:e.showMetadata,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error listing tags`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in list-tags tool: ${e.message}`),J(e.message)}})})}function Pn(e){e.addTool({name:`models`,description:`Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.`,parameters:I.object({setMain:I.string().optional().describe(`Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.`),setResearch:I.string().optional().describe(`Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.`),setFallback:I.string().optional().describe(`Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.`),listAvailableModels:I.boolean().optional().describe(`List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),openrouter:I.boolean().optional().describe(`Indicates the set model ID is a custom OpenRouter model.`),ollama:I.boolean().optional().describe(`Indicates the set model ID is a custom Ollama model.`),bedrock:I.boolean().optional().describe(`Indicates the set model ID is a custom AWS Bedrock model.`),azure:I.boolean().optional().describe(`Indicates the set model ID is a custom Azure OpenAI model.`),vertex:I.boolean().optional().describe(`Indicates the set model ID is a custom Google Vertex AI model.`),"openai-compatible":I.boolean().optional().describe(`Indicates the set model ID is a custom OpenAI-compatible model. Requires baseURL parameter.`),baseURL:I.string().optional().describe(`Custom base URL for providers that support it (e.g., https://api.example.com/v1).`)}),annotations:{title:`Models`,destructiveHint:!0},execute:Q(`models`,async(e,t)=>{try{return t.log.info(`Starting models tool with args: ${JSON.stringify(e)}`),Y({result:await nn({...e,projectRoot:e.projectRoot},t.log,{session:t.session}),log:t.log,errorPrefix:`Error managing models`,projectRoot:e.projectRoot})}catch(e){return t.log.error(`Error in models tool: ${e.message}`),J(e.message)}})})}function Fn(e){e.addTool({name:`move_task`,description:`Move a task or subtask to a new position`,parameters:I.object({from:I.string().describe(`ID of the task/subtask to move (e.g., "5" or "5.2"). Can be comma-separated to move multiple tasks (e.g., "5,6,7")`),to:I.string().optional().describe(`ID of the destination (e.g., "7" or "7.3"). Required for within-tag moves. For cross-tag moves, if omitted, task will be moved to the target tag maintaining its ID`),file:I.string().optional().describe(`Custom path to tasks.json file`),projectRoot:I.string().describe(`Root directory of the project (typically derived from session)`),tag:I.string().optional().describe(`Tag context to operate on`),fromTag:I.string().optional().describe(`Source tag for cross-tag moves`),toTag:I.string().optional().describe(`Target tag for cross-tag moves`),withDependencies:I.boolean().optional().describe(`Move dependent tasks along with main task`),ignoreDependencies:I.boolean().optional().describe(`Break cross-tag dependencies during move`)}),annotations:{title:`Move Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{if(e.fromTag&&e.toTag&&e.fromTag!==e.toTag){if(!e.from)return J(`Source IDs are required for cross-tag moves`,`MISSING_SOURCE_IDS`);e.to&&t.warn(`The "to" parameter is not used for cross-tag moves and will be ignored. Tasks retain their original IDs in the target tag.`);let r=e.file;return r||=E(e,t),Y({result:await rn({sourceIds:e.from,sourceTag:e.fromTag,targetTag:e.toTag,withDependencies:e.withDependencies||!1,ignoreDependencies:e.ignoreDependencies||!1,tasksJsonPath:r,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error moving tasks between tags`,projectRoot:e.projectRoot})}else{if(!e.to)return J(`Destination ID is required for within-tag moves`,`MISSING_DESTINATION_ID`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a=e.file;a||=E(e,t);let o=e.from.split(`,`).map(e=>e.trim()),s=e.to.split(`,`).map(e=>e.trim());if(o.length!==s.length){if(o.length>1){let i=[],c=[];for(let l=0;l<o.length;l++){let u=o[l],d=s[l];if(u===d){t.info(`Skipping ${u} -> ${d} (same ID)`),c.push({fromId:u,toId:d,reason:`same ID`});continue}let f=l===o.length-1,p=await an({sourceId:u,destinationId:d,tasksJsonPath:a,projectRoot:e.projectRoot,tag:r,generateFiles:f},t,{session:n});p.success?i.push(p.data):t.error(`Failed to move ${u} to ${d}: ${p.error.message}`)}return Y({result:{success:!0,data:{moves:i,skipped:c.length>0?c:void 0,message:`Successfully moved ${i.length} tasks${c.length>0?`, skipped ${c.length}`:``}`}},log:t,errorPrefix:`Error moving multiple tasks`,projectRoot:e.projectRoot})}return Y({result:{success:!0,data:{moves:results,skippedMoves,message:`Successfully moved ${results.length} tasks${skippedMoves.length>0?`, skipped ${skippedMoves.length} moves`:``}`}},log:t,errorPrefix:`Error moving multiple tasks`,projectRoot:e.projectRoot})}else return Y({result:await an({sourceId:e.from,destinationId:e.to,tasksJsonPath:a,projectRoot:e.projectRoot,tag:r,generateFiles:!0},t,{session:n}),log:t,errorPrefix:`Error moving task`,projectRoot:e.projectRoot})}}catch(e){return J(`Failed to move task: ${e.message}`,`MOVE_TASK_ERROR`)}})})}function In(e){e.addTool({name:`next_task`,description:`Find the next task to work on based on dependencies and status`,parameters:I.object({file:I.string().optional().describe(`Absolute path to the tasks file`),complexityReport:I.string().optional().describe(`Path to the complexity report file (relative to project root or absolute)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Next Task`,readOnlyHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Finding next task with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=ue(e,n)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let o;try{o=de({...e,tag:r},n)}catch(e){t.error(`Error finding complexity report: ${e.message}`),o=null}let s=await on({tasksJsonPath:a,reportPath:o,projectRoot:e.projectRoot,tag:r},t,{session:n});return t.info(`Next task result: ${s.success?`found`:`none`}`),Y({result:s,log:t,errorPrefix:`Error finding next task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error finding next task: ${e.message}`),J(e.message)}})})}function Ln(e){e.addTool({name:`parse_prd`,description:`Parse a Product Requirements Document (PRD) text file to automatically generate initial tasks. Reinitializing the project is not necessary to run this tool. It is recommended to run parse-prd after initializing the project and creating/importing a prd.txt file in the project root's ${a} directory.`,parameters:I.object({input:I.string().optional().default(re).describe(`Absolute path to the PRD document file (.txt, .md, etc.)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`),output:I.string().optional().describe(`Output path for tasks.json file (default: ${o})`),numTasks:I.string().optional().describe(`Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Setting to 0 will allow Taskmaster to determine the appropriate number of tasks based on the complexity of the PRD. Avoid entering numbers above 50 due to context window limitations.`),force:I.boolean().optional().default(!1).describe(`Overwrite existing output file without prompting.`),research:I.boolean().optional().describe(`Enable Taskmaster to use the research role for potentially more informed task generation. Requires appropriate API key.`),append:I.boolean().optional().describe(`Append generated tasks to existing file.`)}),annotations:{title:`Parse PRD`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n,reportProgress:r})=>{try{let a=i({projectRoot:e.projectRoot,tag:e.tag}),o=dt(r,t);return Y({result:await sn({...e,tag:a},t,{session:n,reportProgress:o}),log:t,errorPrefix:`Error parsing PRD`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in parse_prd: ${e.message}`),J(`Failed to parse PRD: ${e.message}`)}})})}function Rn(e){e.addTool({name:`remove_dependency`,description:`Remove a dependency from a task`,parameters:I.object({id:I.string().describe(`Task ID to remove dependency from`),dependsOn:I.string().describe(`Task ID to remove as a dependency`),file:I.string().optional().describe(`Absolute path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Remove Dependency`,destructiveHint:!0},execute:Q(`remove-dependency`,async(e,t)=>{try{let n=i({projectRoot:e.projectRoot,tag:e.tag});t.log.info(`Removing dependency for task ${e.id} from ${e.dependsOn} with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t.log)}catch(e){return t.log.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let a=await cn({tasksJsonPath:r,id:e.id,dependsOn:e.dependsOn,projectRoot:e.projectRoot,tag:n},t.log);return a.success?t.log.info(`Successfully removed dependency: ${a.data.message}`):t.log.error(`Failed to remove dependency: ${a.error.message}`),Y({result:a,log:t.log,errorPrefix:`Error removing dependency`,projectRoot:e.projectRoot})}catch(e){return t.log.error(`Error in removeDependency tool: ${e.message}`),J(e.message)}})})}function zn(e){e.addTool({name:`remove_subtask`,description:`Remove a subtask from its parent task`,parameters:I.object({id:I.string().describe(`Subtask ID to remove in format 'parentId.subtaskId' (required)`),convert:I.boolean().optional().describe(`Convert the subtask to a standalone task instead of deleting it`),file:I.string().optional().describe(`Absolute path to the tasks file (default: tasks/tasks.json)`),skipGenerate:I.boolean().optional().describe(`Skip regenerating task files`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Remove Subtask`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{let r=i({projectRoot:e.projectRoot,tag:e.tag});t.info(`Removing subtask with args: ${JSON.stringify(e)}`);let a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let o=await ln({tasksJsonPath:a,id:e.id,convert:e.convert,skipGenerate:e.skipGenerate,projectRoot:e.projectRoot,tag:r},t,{session:n});return o.success?t.info(`Subtask removed successfully: ${o.data.message}`):t.error(`Failed to remove subtask: ${o.error.message}`),Y({result:o,log:t,errorPrefix:`Error removing subtask`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in removeSubtask tool: ${e.message}`),J(e.message)}})})}function Bn(e){e.addTool({name:`remove_task`,description:`Remove a task or subtask permanently from the tasks list`,parameters:I.object({id:I.string().describe(`ID of the task or subtask to remove (e.g., '5' or '5.2'). Can be comma-separated to update multiple tasks/subtasks at once.`),file:I.string().optional().describe(`Absolute path to the tasks file`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),confirm:I.boolean().optional().describe(`Whether to skip confirmation prompt (default: false)`),tag:I.string().optional().describe(`Specify which tag context to operate on. Defaults to the current active tag.`)}),annotations:{title:`Remove Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Removing task(s) with ID(s): ${e.id}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}t.info(`Using tasks file path: ${a}`);let o=await un({tasksJsonPath:a,id:e.id,projectRoot:e.projectRoot,tag:r},t,{session:n});return o.success?t.info(`Successfully removed task: ${e.id}`):t.error(`Failed to remove task: ${o.error.message}`),Y({result:o,log:t,errorPrefix:`Error removing task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in remove-task tool: ${e.message}`),J(`Failed to remove task: ${e.message}`)}})})}function Vn(e){e.addTool({name:`rename_tag`,description:`Rename an existing tag`,parameters:I.object({oldName:I.string().describe(`Current name of the tag to rename`),newName:I.string().describe(`New name for the tag`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Rename Tag`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting rename-tag with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await dn({tasksJsonPath:r,oldName:e.oldName,newName:e.newName,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error renaming tag`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in rename-tag tool: ${e.message}`),J(e.message)}})})}function Hn(e){e.addTool({name:`research`,description:`Perform AI-powered research queries with project context`,parameters:I.object({query:I.string().describe(`Research query/prompt (required)`),taskIds:I.string().optional().describe(`Comma-separated list of task/subtask IDs for context (e.g., "15,16.2,17")`),filePaths:I.string().optional().describe(`Comma-separated list of file paths for context (e.g., "src/api.js,docs/readme.md")`),customContext:I.string().optional().describe(`Additional custom context text to include in the research`),includeProjectTree:I.boolean().optional().describe(`Include project file tree structure in context (default: false)`),detailLevel:I.enum([`low`,`medium`,`high`]).optional().describe(`Detail level for the research response (default: medium)`),saveTo:I.string().optional().describe(`Automatically save research results to specified task/subtask ID (e.g., "15" or "15.2")`),saveToFile:I.boolean().optional().describe(`Save research results to .taskmaster/docs/research/ directory (default: false)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Research`,destructiveHint:!0,openWorldHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{let r=i({projectRoot:e.projectRoot,tag:e.tag});return t.info(`Starting research with query: "${e.query.substring(0,100)}${e.query.length>100?`...`:``}"`),Y({result:await fn({query:e.query,taskIds:e.taskIds,filePaths:e.filePaths,customContext:e.customContext,includeProjectTree:e.includeProjectTree||!1,detailLevel:e.detailLevel||`medium`,saveTo:e.saveTo,saveToFile:e.saveToFile||!1,projectRoot:e.projectRoot,tag:r},t,{session:n}),log:t,errorPrefix:`Error performing research`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in research tool: ${e.message}`),J(e.message)}})})}async function Un(e,t,n={}){let{projectRoot:r,language:i}=e,a=D(t);t.info(`Executing response-language_direct with args: ${JSON.stringify(e)}`),t.info(`Using project root: ${r}`);try{return x(),We(i,{mcpLog:a,projectRoot:r})}catch(e){return{success:!1,error:{code:`DIRECT_FUNCTION_ERROR`,message:e.message,details:e.stack}}}finally{v()}}function Wn(e){e.addTool({name:`response-language`,description:`Get or set the response language for the project`,parameters:I.object({projectRoot:I.string().describe(`The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.`),language:I.string().describe(`The new response language to set. like "中文" "English" or "español".`)}),annotations:{title:`Response Language`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{return t.info(`Executing response-language tool with args: ${JSON.stringify(e)}`),Y({result:await Un({...e,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error setting response language`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in response-language tool: ${e.message}`),J(e.message)}})})}async function Gn(e,t,n={}){x();try{let{action:t,profiles:n,projectRoot:r,yes:i,force:a}=e;if(!t||!Array.isArray(n)||n.length===0||!r)return{success:!1,error:{code:`MISSING_ARGUMENT`,message:`action, profiles, and projectRoot are required.`}};let o=[],s=[];if(t===O.REMOVE){if(!a&&Ke(r,n)){let e=Ge(r);return e.filter(e=>!n.includes(e)),{success:!1,error:{code:`CRITICAL_REMOVAL_BLOCKED`,message:`CRITICAL: This operation would remove ALL remaining rule profiles (${n.join(`, `)}), leaving your project with no rules configurations. This could significantly impact functionality. Currently installed profiles: ${e.join(`, `)}. If you're certain you want to proceed, set force: true or use the CLI with --force flag.`}}}for(let e of n){if(!Ye(e)){o.push({profileName:e,success:!1,error:`The requested rule profile for '${e}' is unavailable. Supported profiles are: ${A.join(`, `)}.`});continue}let t=Je(r,k(e));o.push(t)}let e=o.filter(e=>e.success).map(e=>e.profileName),t=o.filter(e=>e.skipped).map(e=>e.profileName),i=o.filter(e=>e.error&&!e.success&&!e.skipped),s=o.filter(e=>e.notice),c=``;return e.length>0&&(c+=`Successfully removed Task Master rules: ${e.join(`, `)}.`),t.length>0&&(c+=`Skipped (default or protected): ${t.join(`, `)}.`),i.length>0&&(c+=i.map(e=>`Error removing ${e.profileName}: ${e.error}`).join(` `)),s.length>0&&(c+=` Notices: ${s.map(e=>`${e.profileName} - ${e.notice}`).join(`; `)}.`),v(),{success:i.length===0,data:{summary:c,results:o}}}else if(t===O.ADD){for(let e of n){if(!Ye(e)){s.push({profileName:e,success:!1,error:`Profile not found: static import missing for '${e}'. Valid profiles: ${A.join(`, `)}`});continue}let t=k(e),{success:n,failed:i}=Xe(r,t),a=t.rulesDir,o=P.join(r,a),c=t.profileDir,l=t.mcpConfig!==!1,u=l&&t.mcpConfigPath?P.join(r,t.mcpConfigPath):null,d=l&&u?N.existsSync(u):void 0,f=N.existsSync(o),p=N.existsSync(P.join(r,c)),m=i>0?`${i} rule files failed to convert.`:null,h={profileName:e,mcpConfigCreated:d,rulesDirCreated:f,profileFolderCreated:p,skipped:!1,error:m,success:(l?d:!0)&&f&&n>0&&!m};s.push(h)}let e=s.filter(e=>e.success).map(e=>e.profileName),t=s.filter(e=>e.error&&!e.success),i=``;return e.length>0&&(i+=`Successfully added rules: ${e.join(`, `)}.`),t.length>0&&(i+=t.map(e=>` Error adding ${e.profileName}: ${e.error}`).join(` `)),v(),{success:t.length===0,data:{summary:i,results:s}}}else return v(),{success:!1,error:{code:`INVALID_ACTION`,message:`Unknown action. Use "${O.ADD}" or "${O.REMOVE}".`}}}catch(e){return v(),t.error(`[rulesDirect] Error: ${e.message}`),{success:!1,error:{code:e.code||`RULES_ERROR`,message:e.message}}}}function Kn(e){e.addTool({name:`rules`,description:`Add or remove rule profiles from the project.`,parameters:I.object({action:I.enum([`add`,`remove`]).describe(`Whether to add or remove rule profiles.`),profiles:I.array(I.enum(A)).min(1).describe(`List of rule profiles to add or remove (e.g., [\"cursor\", \"roo\"]). Available options: ${A.join(`, `)}`),projectRoot:I.string().describe(`The root directory of the project. Must be an absolute path.`),force:I.boolean().optional().default(!1).describe(`DANGEROUS: Force removal even if it would leave no rule profiles. Only use if you are absolutely certain.`)}),annotations:{title:`Rules`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{return t.info(`[rules tool] Executing action: ${e.action} for profiles: ${e.profiles.join(`, `)} in ${e.projectRoot}`),Y({result:await Gn(e,t,{session:n}),log:t,projectRoot:e.projectRoot})}catch(e){return t.error(`[rules tool] Error: ${e.message}`),J(e.message,{details:e.stack})}})})}function qn(e){e.addTool({name:`scope_down_task`,description:`Decrease the complexity of one or more tasks using AI`,parameters:I.object({id:I.string().describe(`Comma-separated list of task IDs to scope down (e.g., "1,3,5")`),strength:I.string().optional().describe(`Strength level: light, regular, or heavy (default: regular)`),prompt:I.string().optional().describe(`Custom prompt for specific scoping adjustments`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`),research:I.boolean().optional().describe(`Whether to use research capabilities for scoping`)}),annotations:{title:`Scope Down Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting scope-down with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await pn({tasksJsonPath:a,id:e.id,strength:e.strength,prompt:e.prompt,research:e.research,projectRoot:e.projectRoot,tag:r},t,{session:n}),log:t,errorPrefix:`Error scoping down task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in scope-down tool: ${e.message}`),J(e.message)}})})}function Jn(e){e.addTool({name:`scope_up_task`,description:`Increase the complexity of one or more tasks using AI`,parameters:I.object({id:I.string().describe(`Comma-separated list of task IDs to scope up (e.g., "1,3,5")`),strength:I.string().optional().describe(`Strength level: light, regular, or heavy (default: regular)`),prompt:I.string().optional().describe(`Custom prompt for specific scoping adjustments`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`),research:I.boolean().optional().describe(`Whether to use research capabilities for scoping`)}),annotations:{title:`Scope Up Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting scope-up with args: ${JSON.stringify(e)}`);let r=i({projectRoot:e.projectRoot,tag:e.tag}),a;try{a=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await mn({tasksJsonPath:a,id:e.id,strength:e.strength,prompt:e.prompt,research:e.research,projectRoot:e.projectRoot,tag:r},t,{session:n}),log:t,errorPrefix:`Error scoping up task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in scope-up tool: ${e.message}`),J(e.message)}})})}function Yn(e){e.addTool({name:`update_subtask`,description:`Appends timestamped information to a specific subtask without replacing existing content. If you just want to update the subtask status, use set_task_status instead.`,parameters:I.object({id:b.describe(`ID of the subtask to update in format "parentId.subtaskId" (e.g., "5.2"). Parent ID is the ID of the task that contains the subtask.`),prompt:I.string().optional().describe(`Information to add to the subtask. Required unless only updating metadata.`),research:I.boolean().optional().describe(`Use Perplexity AI for research-backed updates`),metadata:I.string().optional().describe(`JSON string of metadata to merge into subtask metadata. Example: '{"ticketId": "JIRA-456", "reviewed": true}'. Requires TASK_MASTER_ALLOW_METADATA_UPDATES=true in MCP environment.`),file:I.string().optional().describe(`Absolute path to the tasks file`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Update Subtask`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{let r=`update_subtask`;try{let a=i({projectRoot:e.projectRoot,tag:e.tag});t.info(`Updating subtask with args: ${JSON.stringify(e)}`);let o;try{o=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`${r}: Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let s=mt(e.metadata,J);if(s.error)return s.error;let c=s.parsedMetadata;if(!e.prompt&&!c)return J(`Either prompt or metadata must be provided for update-subtask`);let l=await hn({tasksJsonPath:o,id:e.id,prompt:e.prompt,research:e.research,metadata:c,projectRoot:e.projectRoot,tag:a},t,{session:n});return l.success?t.info(`Successfully updated subtask with ID ${e.id}`):t.error(`Failed to update subtask: ${l.error?.message||`Unknown error`}`),Y({result:l,log:t,errorPrefix:`Error updating subtask`,projectRoot:e.projectRoot})}catch(e){return t.error(`Critical error in ${r} tool execute: ${e.message}`),J(`Internal tool error (${r}): ${e.message}`)}})})}function Xn(e){e.addTool({name:`update_task`,description:`Updates a single task by ID with new information or context provided in the prompt.`,parameters:I.object({id:I.string().describe(`ID of the task (e.g., '15') to update. Subtasks are supported using the update-subtask tool.`),prompt:I.string().optional().describe(`New information or context to incorporate into the task. Required unless only updating metadata.`),research:I.boolean().optional().describe(`Use Perplexity AI for research-backed updates`),append:I.boolean().optional().describe(`Append timestamped information to task details instead of full update`),metadata:I.string().optional().describe(`JSON string of metadata to merge into task metadata. Example: '{"githubIssue": 42, "sprint": "Q1-S3"}'. Requires TASK_MASTER_ALLOW_METADATA_UPDATES=true in MCP environment.`),file:I.string().optional().describe(`Absolute path to the tasks file`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Update Task`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{let r=`update_task`;try{let a=i({projectRoot:e.projectRoot,tag:e.tag});t.info(`Executing ${r} tool with args: ${JSON.stringify(e)}`);let o;try{o=E({projectRoot:e.projectRoot,file:e.file},t),t.info(`${r}: Resolved tasks path: ${o}`)}catch(e){return t.error(`${r}: Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let s=mt(e.metadata,J);if(s.error)return s.error;let c=s.parsedMetadata;if(!e.prompt&&!c)return J(`Either prompt or metadata must be provided for update-task`);let l=await gn({tasksJsonPath:o,id:e.id,prompt:e.prompt,research:e.research,append:e.append,metadata:c,projectRoot:e.projectRoot,tag:a},t,{session:n});return t.info(`${r}: Direct function result: success=${l.success}`),Y({result:l,log:t,errorPrefix:`Error updating task`,projectRoot:e.projectRoot})}catch(e){return t.error(`Critical error in ${r} tool execute: ${e.message}`),J(`Internal tool error (${r}): ${e.message}`)}})})}function Zn(e){e.addTool({name:`update`,description:`Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task or 'update_subtask' for subtasks.`,parameters:I.object({from:I.string().describe(`Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'`),prompt:I.string().describe(`Explanation of changes or new context to apply`),research:I.boolean().optional().describe(`Use Perplexity AI for research-backed updates`),file:I.string().optional().describe(`Path to the tasks file relative to project root`),projectRoot:I.string().optional().describe(`The directory of the project. (Optional, usually from session)`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Update Tasks`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{let r=`update`,{from:a,prompt:o,research:s,file:c,projectRoot:l,tag:u}=e,d=i({projectRoot:e.projectRoot,tag:e.tag});try{t.info(`Executing ${r} tool with normalized root: ${l}`);let i;try{i=E({projectRoot:l,file:c},t),t.info(`${r}: Resolved tasks path: ${i}`)}catch(e){return t.error(`${r}: Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json within project root '${l}': ${e.message}`)}let u=await _n({tasksJsonPath:i,from:a,prompt:o,research:s,projectRoot:l,tag:d},t,{session:n});return t.info(`${r}: Direct function result: success=${u.success}`),Y({result:u,log:t,errorPrefix:`Error updating tasks`,projectRoot:e.projectRoot})}catch(e){return t.error(`Critical error in ${r} tool execute: ${e.message}`),J(`Internal tool error (${r}): ${e.message}`)}})})}function Qn(e){e.addTool({name:`use_tag`,description:`Switch to a different tag context for task operations`,parameters:I.object({name:I.string().describe(`Name of the tag to switch to`),file:I.string().optional().describe(`Path to the tasks file (default: tasks/tasks.json)`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`)}),annotations:{title:`Use Tag`,destructiveHint:!0},execute:Z(async(e,{log:t,session:n})=>{try{t.info(`Starting use-tag with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}return Y({result:await vn({tasksJsonPath:r,name:e.name,projectRoot:e.projectRoot},t,{session:n}),log:t,errorPrefix:`Error switching tag`,projectRoot:e.projectRoot})}catch(e){return t.error(`Error in use-tag tool: ${e.message}`),J(e.message)}})})}function $n(e){e.addTool({name:`validate_dependencies`,description:`Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.`,parameters:I.object({file:I.string().optional().describe(`Absolute path to the tasks file`),projectRoot:I.string().describe(`The directory of the project. Must be an absolute path.`),tag:I.string().optional().describe(`Tag context to operate on`)}),annotations:{title:`Validate Dependencies`,readOnlyHint:!0},execute:Q(`validate-dependencies`,async(e,{log:t,session:n})=>{try{let n=i({projectRoot:e.projectRoot,tag:e.tag});t.info(`Validating dependencies with args: ${JSON.stringify(e)}`);let r;try{r=E({projectRoot:e.projectRoot,file:e.file},t)}catch(e){return t.error(`Error finding tasks.json: ${e.message}`),J(`Failed to find tasks.json: ${e.message}`)}let a=await yn({tasksJsonPath:r,projectRoot:e.projectRoot,tag:n},t);return a.success?t.info(`Successfully validated dependencies: ${a.data.message}`):t.error(`Failed to validate dependencies: ${a.error.message}`),Y({result:a,log:t,errorPrefix:`Error validating dependencies`,projectRoot:e.projectRoot,tag:n})}catch(e){return t.error(`Error in validateDependencies tool: ${e.message}`),J(e.message)}})})}const $={initialize_project:Mn,models:Pn,rules:Kn,parse_prd:Ln,"response-language":Wn,analyze_project_complexity:wn,expand_task:An,expand_all:kn,scope_up_task:Jn,scope_down_task:qn,get_tasks:Mt,get_task:Pt,next_task:In,complexity_report:En,set_task_status:Rt,add_task:Cn,add_subtask:xn,update:Zn,update_task:Xn,update_subtask:Yn,remove_task:Bn,remove_subtask:zn,clear_subtasks:Tn,move_task:Fn,add_dependency:bn,remove_dependency:Rn,validate_dependencies:$n,fix_dependencies:jn,list_tags:Nn,add_tag:Sn,delete_tag:On,use_tag:Qn,rename_tag:Vn,copy_tag:Dn,research:Hn,autopilot_start:gt,autopilot_resume:vt,autopilot_next:bt,autopilot_status:St,autopilot_complete:wt,autopilot_commit:Et,autopilot_finalize:Ot,autopilot_abort:At,generate:It},er=[`get_tasks`,`next_task`,`get_task`,`set_task_status`,`update_subtask`,`parse_prd`,`expand_task`],tr=[...er,`initialize_project`,`analyze_project_complexity`,`expand_all`,`add_subtask`,`remove_task`,`add_task`,`complexity_report`];function nr(e){return $[e]||null}function rr(){let e=process.env.TASK_MASTER_TOOLS;if(!e||e.trim()===``)return V.debug(`No TASK_MASTER_TOOLS env var found, defaulting to "core"`),`core`;let t=e.trim();return V.debug(`TASK_MASTER_TOOLS env var: "${t}"`),t}function ir(e,t=`core`){let n=[],r=[];try{let i=t.trim(),a=[],o=i.toLowerCase();switch(o){case`all`:a=Object.keys($),V.info(`Loading all available tools`);break;case`core`:case`lean`:a=er,V.info(`Loading core tools only`);break;case`standard`:a=tr,V.info(`Loading standard tools`);break;default:let e=i.split(`,`).map(e=>e.trim()).filter(e=>e.length>0),t=new Set,n=[],r={response_language:`response-language`};for(let i of e){let e=null,a=i.toLowerCase();if(r[a]){let t=r[a];for(let n of Object.keys($))if(n.toLowerCase()===t.toLowerCase()){e=n;break}}if(!e){for(let t of Object.keys($))if(t.toLowerCase()===a){e=t;break}}if(!e){let t=a.replace(/_/g,`-`);for(let n of Object.keys($))if(n.toLowerCase()===t){e=n;break}}if(!e){let t=a.replace(/-/g,`_`);for(let n of Object.keys($))if(n.toLowerCase()===t){e=n;break}}e?(t.add(e),V.debug(`Resolved tool "${i}" to "${e}"`)):(n.push(i),V.warn(`Unknown tool specified: "${i}"`))}a=Array.from(t),n.length>0&&V.warn(`Unknown tools: ${n.join(`, `)}`),a.length===0?(V.warn(`No valid tools found in custom list. Loading all tools as fallback.`),a=Object.keys($)):V.info(`Loading ${a.length} custom tools from list (${t.size} unique after normalization)`);break}return V.info(`Registering ${a.length} MCP tools (mode: ${i})`),a.forEach(t=>{try{let i=nr(t);i?(i(e),V.debug(`Registered tool: ${t}`),n.push(t)):(V.warn(`Tool ${t} not found in registry`),r.push(t))}catch(e){e.message&&e.message.includes(`already registered`)?(V.debug(`Tool ${t} already registered, skipping`),n.push(t)):(V.error(`Failed to register tool ${t}: ${e.message}`),r.push(t))}}),V.info(`Successfully registered ${n.length}/${a.length} tools`),r.length>0&&V.warn(`Failed tools: ${r.join(`, `)}`),{registeredTools:n,failedTools:r,normalizedMode:o}}catch(t){V.error(`Error parsing TASK_MASTER_TOOLS environment variable: ${t.message}`),V.info(`Falling back to loading all tools`);let i=Object.keys($);for(let t of i){let i=nr(t);if(i)try{i(e),n.push(t)}catch(e){e.message&&e.message.includes(`already registered`)?(V.debug(`Fallback tool ${t} already registered, skipping`),n.push(t)):(V.warn(`Failed to register fallback tool '${t}': ${e.message}`),r.push(t))}else V.warn(`Tool '${t}' not found in registry`),r.push(t)}return V.info(`Successfully registered ${n.length} fallback tools`),{registeredTools:n,failedTools:r,normalizedMode:`all`}}}L.config(),ie();const ar=Qe(import.meta.url);P.dirname(ar);var or=class{constructor(){this.options={name:`Task Master MCP Server`,version:d};let e=new $e(this.options);if(e._mcpServer&&R.wrapMcpServerWithSentry)try{e._mcpServer=R.wrapMcpServerWithSentry(e._mcpServer)}catch(e){V.warn(`Failed to wrap MCP server with Sentry: ${e.message}`)}this.server=e,this.initialized=!1,this.init=this.init.bind(this),this.start=this.start.bind(this),this.stop=this.stop.bind(this),this.logger=V}async init(){if(this.initialized)return;let e=rr();this.logger.info(`Task Master MCP Server starting...`),this.logger.info(`Tool mode configuration: ${e}`);let t=ir(this.server,e);return this.logger.info(`Normalized tool mode: ${t.normalizedMode}`),this.logger.info(`Registered ${t.registeredTools.length} tools successfully`),t.registeredTools.length>0&&this.logger.debug(`Registered tools: ${t.registeredTools.join(`, `)}`),t.failedTools.length>0&&this.logger.warn(`Failed to register ${t.failedTools.length} tools: ${t.failedTools.join(`, `)}`),this.initialized=!0,this}async start(){return this.initialized||await this.init(),this.server.on(`connect`,e=>{e.session.server.sendLoggingMessage({data:{context:e.session.context,message:`MCP Server connected: ${e.session.name}`},level:`info`}),this.registerRemoteProvider(e.session)}),await this.server.start({transportType:`stdio`,timeout:12e4}),this}registerRemoteProvider(e){if(e){if(!e.clientCapabilities||!e.clientCapabilities.sampling){e.server.sendLoggingMessage({data:{context:e.context,message:`MCP session missing required sampling capabilities, providers not registered`},level:`info`});return}let n=new lt;n.setSession(e),t.getInstance().registerProvider(`mcp`,n),e.server.sendLoggingMessage({data:{context:e.context,message:`MCP Server connected`},level:`info`})}else e.server.sendLoggingMessage({data:{context:e.context,message:`No MCP sessions available, providers not registered`},level:`warn`})}async stop(){this.server&&await this.server.stop()}};L.config(),process.env.TASK_MASTER_MCP=`true`;async function sr(){let e=new or;process.on(`SIGINT`,async()=>{await e.stop(),process.exit(0)}),process.on(`SIGTERM`,async()=>{await e.stop(),process.exit(0)});try{await e.start()}catch(e){V.error(`Failed to start MCP server: ${e.message}`),process.exit(1)}}sr();export{};
|