oh-my-opencode 3.0.0-beta.6 → 3.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -5828,8 +5828,23 @@ var init_jsonc_parser = __esm(() => {
5828
5828
  });
5829
5829
 
5830
5830
  // src/shared/migration.ts
5831
+ var BUILTIN_AGENT_NAMES;
5831
5832
  var init_migration = __esm(() => {
5832
5833
  init_logger();
5834
+ BUILTIN_AGENT_NAMES = new Set([
5835
+ "Sisyphus",
5836
+ "oracle",
5837
+ "librarian",
5838
+ "explore",
5839
+ "frontend-ui-ux-engineer",
5840
+ "document-writer",
5841
+ "multimodal-looker",
5842
+ "Metis (Plan Consultant)",
5843
+ "Momus (Plan Reviewer)",
5844
+ "Prometheus (Planner)",
5845
+ "orchestrator-sisyphus",
5846
+ "build"
5847
+ ]);
5833
5848
  });
5834
5849
 
5835
5850
  // src/shared/opencode-config-dir.ts
@@ -6127,30 +6142,40 @@ function generateOmoConfig(installConfig) {
6127
6142
  };
6128
6143
  const agents = {};
6129
6144
  if (!installConfig.hasClaude) {
6130
- agents["Sisyphus"] = { model: "opencode/glm-4.7-free" };
6145
+ agents["Sisyphus"] = {
6146
+ model: installConfig.hasCopilot ? "github-copilot/claude-opus-4.5" : "opencode/glm-4.7-free"
6147
+ };
6131
6148
  }
6132
6149
  agents["librarian"] = { model: "opencode/glm-4.7-free" };
6133
6150
  if (installConfig.hasGemini) {
6134
6151
  agents["explore"] = { model: "google/antigravity-gemini-3-flash" };
6135
6152
  } else if (installConfig.hasClaude && installConfig.isMax20) {
6136
6153
  agents["explore"] = { model: "anthropic/claude-haiku-4-5" };
6154
+ } else if (installConfig.hasCopilot) {
6155
+ agents["explore"] = { model: "github-copilot/grok-code-fast-1" };
6137
6156
  } else {
6138
6157
  agents["explore"] = { model: "opencode/glm-4.7-free" };
6139
6158
  }
6140
6159
  if (!installConfig.hasChatGPT) {
6141
- agents["oracle"] = {
6142
- model: installConfig.hasClaude ? "anthropic/claude-opus-4-5" : "opencode/glm-4.7-free"
6143
- };
6160
+ const oracleFallback = installConfig.hasCopilot ? "github-copilot/gpt-5.2" : installConfig.hasClaude ? "anthropic/claude-opus-4-5" : "opencode/glm-4.7-free";
6161
+ agents["oracle"] = { model: oracleFallback };
6144
6162
  }
6145
6163
  if (installConfig.hasGemini) {
6146
6164
  agents["frontend-ui-ux-engineer"] = { model: "google/antigravity-gemini-3-pro-high" };
6147
6165
  agents["document-writer"] = { model: "google/antigravity-gemini-3-flash" };
6148
6166
  agents["multimodal-looker"] = { model: "google/antigravity-gemini-3-flash" };
6167
+ } else if (installConfig.hasClaude) {
6168
+ agents["frontend-ui-ux-engineer"] = { model: "anthropic/claude-opus-4-5" };
6169
+ agents["document-writer"] = { model: "anthropic/claude-opus-4-5" };
6170
+ agents["multimodal-looker"] = { model: "anthropic/claude-opus-4-5" };
6171
+ } else if (installConfig.hasCopilot) {
6172
+ agents["frontend-ui-ux-engineer"] = { model: "github-copilot/gemini-3-pro-preview" };
6173
+ agents["document-writer"] = { model: "github-copilot/gemini-3-flash-preview" };
6174
+ agents["multimodal-looker"] = { model: "github-copilot/gemini-3-flash-preview" };
6149
6175
  } else {
6150
- const fallbackModel = installConfig.hasClaude ? "anthropic/claude-opus-4-5" : "opencode/glm-4.7-free";
6151
- agents["frontend-ui-ux-engineer"] = { model: fallbackModel };
6152
- agents["document-writer"] = { model: fallbackModel };
6153
- agents["multimodal-looker"] = { model: fallbackModel };
6176
+ agents["frontend-ui-ux-engineer"] = { model: "opencode/glm-4.7-free" };
6177
+ agents["document-writer"] = { model: "opencode/glm-4.7-free" };
6178
+ agents["multimodal-looker"] = { model: "opencode/glm-4.7-free" };
6154
6179
  }
6155
6180
  if (Object.keys(agents).length > 0) {
6156
6181
  config.agents = agents;
@@ -6161,6 +6186,12 @@ function generateOmoConfig(installConfig) {
6161
6186
  artistry: { model: "google/gemini-3-pro-high" },
6162
6187
  writing: { model: "google/gemini-3-flash-high" }
6163
6188
  };
6189
+ } else if (installConfig.hasCopilot) {
6190
+ config.categories = {
6191
+ "visual-engineering": { model: "github-copilot/gemini-3-pro-preview" },
6192
+ artistry: { model: "github-copilot/gemini-3-pro-preview" },
6193
+ writing: { model: "github-copilot/gemini-3-flash-preview" }
6194
+ };
6164
6195
  }
6165
6196
  return config;
6166
6197
  }
@@ -6261,11 +6292,6 @@ async function addAuthPlugins(config) {
6261
6292
  plugins.push(pluginEntry);
6262
6293
  }
6263
6294
  }
6264
- if (config.hasChatGPT) {
6265
- if (!plugins.some((p2) => p2.startsWith("opencode-openai-codex-auth"))) {
6266
- plugins.push("opencode-openai-codex-auth");
6267
- }
6268
- }
6269
6295
  const newConfig = { ...existingConfig ?? {}, plugin: plugins };
6270
6296
  writeFileSync(path2, JSON.stringify(newConfig, null, 2) + `
6271
6297
  `);
@@ -6336,9 +6362,6 @@ function addProviderConfig(config) {
6336
6362
  if (config.hasGemini) {
6337
6363
  providers.google = ANTIGRAVITY_PROVIDER_CONFIG.google;
6338
6364
  }
6339
- if (config.hasChatGPT) {
6340
- providers.openai = CODEX_PROVIDER_CONFIG.openai;
6341
- }
6342
6365
  if (Object.keys(providers).length > 0) {
6343
6366
  newConfig.provider = providers;
6344
6367
  }
@@ -6355,7 +6378,8 @@ function detectCurrentConfig() {
6355
6378
  hasClaude: true,
6356
6379
  isMax20: true,
6357
6380
  hasChatGPT: true,
6358
- hasGemini: false
6381
+ hasGemini: false,
6382
+ hasCopilot: false
6359
6383
  };
6360
6384
  const { format: format2, path: path2 } = detectConfigFormat();
6361
6385
  if (format2 === "none") {
@@ -6372,7 +6396,6 @@ function detectCurrentConfig() {
6372
6396
  return result;
6373
6397
  }
6374
6398
  result.hasGemini = plugins.some((p2) => p2.startsWith("opencode-antigravity-auth"));
6375
- result.hasChatGPT = plugins.some((p2) => p2.startsWith("opencode-openai-codex-auth"));
6376
6399
  const omoConfigPath = getOmoConfig();
6377
6400
  if (!existsSync3(omoConfigPath)) {
6378
6401
  return result;
@@ -6403,10 +6426,12 @@ function detectCurrentConfig() {
6403
6426
  } else if (agents["oracle"]?.model === "opencode/glm-4.7-free") {
6404
6427
  result.hasChatGPT = false;
6405
6428
  }
6429
+ const hasAnyCopilotModel = Object.values(agents).some((agent) => agent?.model?.startsWith("github-copilot/"));
6430
+ result.hasCopilot = hasAnyCopilotModel;
6406
6431
  } catch {}
6407
6432
  return result;
6408
6433
  }
6409
- var OPENCODE_BINARIES, configContext = null, BUN_INSTALL_TIMEOUT_SECONDS = 60, BUN_INSTALL_TIMEOUT_MS, ANTIGRAVITY_PROVIDER_CONFIG, CODEX_PROVIDER_CONFIG;
6434
+ var OPENCODE_BINARIES, configContext = null, BUN_INSTALL_TIMEOUT_SECONDS = 60, BUN_INSTALL_TIMEOUT_MS, ANTIGRAVITY_PROVIDER_CONFIG;
6410
6435
  var init_config_manager = __esm(() => {
6411
6436
  init_shared();
6412
6437
  OPENCODE_BINARIES = ["opencode", "opencode-desktop"];
@@ -6438,54 +6463,6 @@ var init_config_manager = __esm(() => {
6438
6463
  }
6439
6464
  }
6440
6465
  };
6441
- CODEX_PROVIDER_CONFIG = {
6442
- openai: {
6443
- name: "OpenAI",
6444
- options: {
6445
- reasoningEffort: "medium",
6446
- reasoningSummary: "auto",
6447
- textVerbosity: "medium",
6448
- include: ["reasoning.encrypted_content"],
6449
- store: false
6450
- },
6451
- models: {
6452
- "gpt-5.2": {
6453
- name: "GPT 5.2 (OAuth)",
6454
- limit: { context: 272000, output: 128000 },
6455
- modalities: { input: ["text", "image"], output: ["text"] },
6456
- variants: {
6457
- none: { reasoningEffort: "none", reasoningSummary: "auto", textVerbosity: "medium" },
6458
- low: { reasoningEffort: "low", reasoningSummary: "auto", textVerbosity: "medium" },
6459
- medium: { reasoningEffort: "medium", reasoningSummary: "auto", textVerbosity: "medium" },
6460
- high: { reasoningEffort: "high", reasoningSummary: "detailed", textVerbosity: "medium" },
6461
- xhigh: { reasoningEffort: "xhigh", reasoningSummary: "detailed", textVerbosity: "medium" }
6462
- }
6463
- },
6464
- "gpt-5.2-codex": {
6465
- name: "GPT 5.2 Codex (OAuth)",
6466
- limit: { context: 272000, output: 128000 },
6467
- modalities: { input: ["text", "image"], output: ["text"] },
6468
- variants: {
6469
- low: { reasoningEffort: "low", reasoningSummary: "auto", textVerbosity: "medium" },
6470
- medium: { reasoningEffort: "medium", reasoningSummary: "auto", textVerbosity: "medium" },
6471
- high: { reasoningEffort: "high", reasoningSummary: "detailed", textVerbosity: "medium" },
6472
- xhigh: { reasoningEffort: "xhigh", reasoningSummary: "detailed", textVerbosity: "medium" }
6473
- }
6474
- },
6475
- "gpt-5.1-codex-max": {
6476
- name: "GPT 5.1 Codex Max (OAuth)",
6477
- limit: { context: 272000, output: 128000 },
6478
- modalities: { input: ["text", "image"], output: ["text"] },
6479
- variants: {
6480
- low: { reasoningEffort: "low", reasoningSummary: "detailed", textVerbosity: "medium" },
6481
- medium: { reasoningEffort: "medium", reasoningSummary: "detailed", textVerbosity: "medium" },
6482
- high: { reasoningEffort: "high", reasoningSummary: "detailed", textVerbosity: "medium" },
6483
- xhigh: { reasoningEffort: "xhigh", reasoningSummary: "detailed", textVerbosity: "medium" }
6484
- }
6485
- }
6486
- }
6487
- }
6488
- };
6489
6466
  });
6490
6467
 
6491
6468
  // src/hooks/auto-update-checker/constants.ts
@@ -7059,7 +7036,7 @@ var init_checker = __esm(() => {
7059
7036
  var require_package = __commonJS((exports, module) => {
7060
7037
  module.exports = {
7061
7038
  name: "oh-my-opencode",
7062
- version: "3.0.0-beta.6",
7039
+ version: "3.0.0-beta.7",
7063
7040
  description: "The Best AI Agent Harness - Batteries-Included OpenCode Plugin with Multi-Model Orchestration, Parallel Background Agents, and Crafted LSP/AST Tools",
7064
7041
  main: "dist/index.js",
7065
7042
  types: "dist/index.d.ts",
@@ -7111,8 +7088,8 @@ var require_package = __commonJS((exports, module) => {
7111
7088
  "@code-yeongyu/comment-checker": "^0.6.1",
7112
7089
  "@modelcontextprotocol/sdk": "^1.25.1",
7113
7090
  "@openauthjs/openauth": "^0.4.3",
7114
- "@opencode-ai/plugin": "^1.1.1",
7115
- "@opencode-ai/sdk": "^1.1.1",
7091
+ "@opencode-ai/plugin": "^1.1.19",
7092
+ "@opencode-ai/sdk": "^1.1.19",
7116
7093
  commander: "^14.0.2",
7117
7094
  hono: "^4.10.4",
7118
7095
  "js-yaml": "^4.1.1",
@@ -7759,13 +7736,14 @@ function formatConfigSummary(config) {
7759
7736
  lines.push(formatProvider("Claude", config.hasClaude, claudeDetail));
7760
7737
  lines.push(formatProvider("ChatGPT", config.hasChatGPT));
7761
7738
  lines.push(formatProvider("Gemini", config.hasGemini));
7739
+ lines.push(formatProvider("GitHub Copilot", config.hasCopilot, "fallback provider"));
7762
7740
  lines.push("");
7763
7741
  lines.push(import_picocolors2.default.dim("\u2500".repeat(40)));
7764
7742
  lines.push("");
7765
7743
  lines.push(import_picocolors2.default.bold(import_picocolors2.default.white("Agent Configuration")));
7766
7744
  lines.push("");
7767
- const sisyphusModel = config.hasClaude ? "claude-opus-4-5" : "glm-4.7-free";
7768
- const oracleModel = config.hasChatGPT ? "gpt-5.2" : config.hasClaude ? "claude-opus-4-5" : "glm-4.7-free";
7745
+ const sisyphusModel = config.hasClaude ? "claude-opus-4-5" : config.hasCopilot ? "github-copilot/claude-opus-4.5" : "glm-4.7-free";
7746
+ const oracleModel = config.hasChatGPT ? "gpt-5.2" : config.hasCopilot ? "github-copilot/gpt-5.2" : config.hasClaude ? "claude-opus-4-5" : "glm-4.7-free";
7769
7747
  const librarianModel = "glm-4.7-free";
7770
7748
  const frontendModel = config.hasGemini ? "antigravity-gemini-3-pro-high" : config.hasClaude ? "claude-opus-4-5" : "glm-4.7-free";
7771
7749
  lines.push(` ${SYMBOLS.bullet} Sisyphus ${SYMBOLS.arrow} ${import_picocolors2.default.cyan(sisyphusModel)}`);
@@ -7833,6 +7811,11 @@ function validateNonTuiArgs(args) {
7833
7811
  } else if (!["no", "yes"].includes(args.gemini)) {
7834
7812
  errors.push(`Invalid --gemini value: ${args.gemini} (expected: no, yes)`);
7835
7813
  }
7814
+ if (args.copilot === undefined) {
7815
+ errors.push("--copilot is required (values: no, yes)");
7816
+ } else if (!["no", "yes"].includes(args.copilot)) {
7817
+ errors.push(`Invalid --copilot value: ${args.copilot} (expected: no, yes)`);
7818
+ }
7836
7819
  return { valid: errors.length === 0, errors };
7837
7820
  }
7838
7821
  function argsToConfig(args) {
@@ -7840,7 +7823,8 @@ function argsToConfig(args) {
7840
7823
  hasClaude: args.claude !== "no",
7841
7824
  isMax20: args.claude === "max20",
7842
7825
  hasChatGPT: args.chatgpt === "yes",
7843
- hasGemini: args.gemini === "yes"
7826
+ hasGemini: args.gemini === "yes",
7827
+ hasCopilot: args.copilot === "yes"
7844
7828
  };
7845
7829
  }
7846
7830
  function detectedToInitialValues(detected) {
@@ -7851,7 +7835,8 @@ function detectedToInitialValues(detected) {
7851
7835
  return {
7852
7836
  claude,
7853
7837
  chatgpt: detected.hasChatGPT ? "yes" : "no",
7854
- gemini: detected.hasGemini ? "yes" : "no"
7838
+ gemini: detected.hasGemini ? "yes" : "no",
7839
+ copilot: detected.hasCopilot ? "yes" : "no"
7855
7840
  };
7856
7841
  }
7857
7842
  async function runTuiMode(detected) {
@@ -7893,11 +7878,24 @@ async function runTuiMode(detected) {
7893
7878
  xe("Installation cancelled.");
7894
7879
  return null;
7895
7880
  }
7881
+ const copilot = await ve({
7882
+ message: "Do you have a GitHub Copilot subscription?",
7883
+ options: [
7884
+ { value: "no", label: "No", hint: "Only native providers will be used" },
7885
+ { value: "yes", label: "Yes", hint: "Fallback option when native providers unavailable" }
7886
+ ],
7887
+ initialValue: initial.copilot
7888
+ });
7889
+ if (pD(copilot)) {
7890
+ xe("Installation cancelled.");
7891
+ return null;
7892
+ }
7896
7893
  return {
7897
7894
  hasClaude: claude !== "no",
7898
7895
  isMax20: claude === "max20",
7899
7896
  hasChatGPT: chatgpt === "yes",
7900
- hasGemini: gemini === "yes"
7897
+ hasGemini: gemini === "yes",
7898
+ hasCopilot: copilot === "yes"
7901
7899
  };
7902
7900
  }
7903
7901
  async function runNonTuiInstall(args) {
@@ -7909,7 +7907,7 @@ async function runNonTuiInstall(args) {
7909
7907
  console.log(` ${SYMBOLS.bullet} ${err}`);
7910
7908
  }
7911
7909
  console.log();
7912
- printInfo("Usage: bunx oh-my-opencode install --no-tui --claude=<no|yes|max20> --chatgpt=<no|yes> --gemini=<no|yes>");
7910
+ printInfo("Usage: bunx oh-my-opencode install --no-tui --claude=<no|yes|max20> --chatgpt=<no|yes> --gemini=<no|yes> --copilot=<no|yes>");
7913
7911
  console.log();
7914
7912
  return 1;
7915
7913
  }
@@ -7939,7 +7937,7 @@ async function runNonTuiInstall(args) {
7939
7937
  return 1;
7940
7938
  }
7941
7939
  printSuccess(`Plugin ${isUpdate ? "verified" : "added"} ${SYMBOLS.arrow} ${import_picocolors2.default.dim(pluginResult.configPath)}`);
7942
- if (config.hasGemini || config.hasChatGPT) {
7940
+ if (config.hasGemini) {
7943
7941
  printStep(step++, totalSteps, "Adding auth plugins...");
7944
7942
  const authResult = await addAuthPlugins(config);
7945
7943
  if (!authResult.success) {
@@ -7965,23 +7963,9 @@ async function runNonTuiInstall(args) {
7965
7963
  }
7966
7964
  printSuccess(`Config written ${SYMBOLS.arrow} ${import_picocolors2.default.dim(omoResult.configPath)}`);
7967
7965
  printBox(formatConfigSummary(config), isUpdate ? "Updated Configuration" : "Installation Complete");
7968
- if (!config.hasClaude && !config.hasChatGPT && !config.hasGemini) {
7966
+ if (!config.hasClaude && !config.hasChatGPT && !config.hasGemini && !config.hasCopilot) {
7969
7967
  printWarning("No model providers configured. Using opencode/glm-4.7-free as fallback.");
7970
7968
  }
7971
- if ((config.hasClaude || config.hasChatGPT || config.hasGemini) && !args.skipAuth) {
7972
- console.log(import_picocolors2.default.bold("Next Steps - Authenticate your providers:"));
7973
- console.log();
7974
- if (config.hasClaude) {
7975
- console.log(` ${SYMBOLS.arrow} ${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select Anthropic \u2192 Claude Pro/Max)")}`);
7976
- }
7977
- if (config.hasChatGPT) {
7978
- console.log(` ${SYMBOLS.arrow} ${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select OpenAI \u2192 ChatGPT Plus/Pro)")}`);
7979
- }
7980
- if (config.hasGemini) {
7981
- console.log(` ${SYMBOLS.arrow} ${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select Google \u2192 OAuth with Antigravity)")}`);
7982
- }
7983
- console.log();
7984
- }
7985
7969
  console.log(`${SYMBOLS.star} ${import_picocolors2.default.bold(import_picocolors2.default.green(isUpdate ? "Configuration updated!" : "Installation complete!"))}`);
7986
7970
  console.log(` Run ${import_picocolors2.default.cyan("opencode")} to start!`);
7987
7971
  console.log();
@@ -7993,6 +7977,13 @@ async function runNonTuiInstall(args) {
7993
7977
  console.log();
7994
7978
  console.log(import_picocolors2.default.dim("oMoMoMoMo... Enjoy!"));
7995
7979
  console.log();
7980
+ if ((config.hasClaude || config.hasChatGPT || config.hasGemini || config.hasCopilot) && !args.skipAuth) {
7981
+ printBox(`Run ${import_picocolors2.default.cyan("opencode auth login")} and select your provider:
7982
+ ` + (config.hasClaude ? ` ${SYMBOLS.bullet} Anthropic ${import_picocolors2.default.gray("\u2192 Claude Pro/Max")}
7983
+ ` : "") + (config.hasChatGPT ? ` ${SYMBOLS.bullet} OpenAI ${import_picocolors2.default.gray("\u2192 ChatGPT Plus/Pro")}
7984
+ ` : "") + (config.hasGemini ? ` ${SYMBOLS.bullet} Google ${import_picocolors2.default.gray("\u2192 OAuth with Antigravity")}
7985
+ ` : "") + (config.hasCopilot ? ` ${SYMBOLS.bullet} GitHub ${import_picocolors2.default.gray("\u2192 Copilot")}` : ""), "\uD83D\uDD10 Authenticate Your Providers");
7986
+ }
7996
7987
  return 0;
7997
7988
  }
7998
7989
  async function install(args) {
@@ -8029,7 +8020,7 @@ async function install(args) {
8029
8020
  return 1;
8030
8021
  }
8031
8022
  s.stop(`Plugin added to ${import_picocolors2.default.cyan(pluginResult.configPath)}`);
8032
- if (config.hasGemini || config.hasChatGPT) {
8023
+ if (config.hasGemini) {
8033
8024
  s.start("Adding auth plugins (fetching latest versions)");
8034
8025
  const authResult = await addAuthPlugins(config);
8035
8026
  if (!authResult.success) {
@@ -8055,24 +8046,10 @@ async function install(args) {
8055
8046
  return 1;
8056
8047
  }
8057
8048
  s.stop(`Config written to ${import_picocolors2.default.cyan(omoResult.configPath)}`);
8058
- if (!config.hasClaude && !config.hasChatGPT && !config.hasGemini) {
8049
+ if (!config.hasClaude && !config.hasChatGPT && !config.hasGemini && !config.hasCopilot) {
8059
8050
  M2.warn("No model providers configured. Using opencode/glm-4.7-free as fallback.");
8060
8051
  }
8061
8052
  Me(formatConfigSummary(config), isUpdate ? "Updated Configuration" : "Installation Complete");
8062
- if ((config.hasClaude || config.hasChatGPT || config.hasGemini) && !args.skipAuth) {
8063
- const steps = [];
8064
- if (config.hasClaude) {
8065
- steps.push(`${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select Anthropic \u2192 Claude Pro/Max)")}`);
8066
- }
8067
- if (config.hasChatGPT) {
8068
- steps.push(`${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select OpenAI \u2192 ChatGPT Plus/Pro)")}`);
8069
- }
8070
- if (config.hasGemini) {
8071
- steps.push(`${import_picocolors2.default.dim("opencode auth login")} ${import_picocolors2.default.gray("(select Google \u2192 OAuth with Antigravity)")}`);
8072
- }
8073
- Me(steps.join(`
8074
- `), "Next Steps - Authenticate your providers");
8075
- }
8076
8053
  M2.success(import_picocolors2.default.bold(isUpdate ? "Configuration updated!" : "Installation complete!"));
8077
8054
  M2.message(`Run ${import_picocolors2.default.cyan("opencode")} to start!`);
8078
8055
  Me(`Include ${import_picocolors2.default.cyan("ultrawork")} (or ${import_picocolors2.default.cyan("ulw")}) in your prompt.
@@ -8081,6 +8058,25 @@ async function install(args) {
8081
8058
  M2.message(`${import_picocolors2.default.yellow("\u2605")} If you found this helpful, consider starring the repo!`);
8082
8059
  M2.message(` ${import_picocolors2.default.dim("gh repo star code-yeongyu/oh-my-opencode")}`);
8083
8060
  Se(import_picocolors2.default.green("oMoMoMoMo... Enjoy!"));
8061
+ if ((config.hasClaude || config.hasChatGPT || config.hasGemini || config.hasCopilot) && !args.skipAuth) {
8062
+ const providers = [];
8063
+ if (config.hasClaude)
8064
+ providers.push(`Anthropic ${import_picocolors2.default.gray("\u2192 Claude Pro/Max")}`);
8065
+ if (config.hasChatGPT)
8066
+ providers.push(`OpenAI ${import_picocolors2.default.gray("\u2192 ChatGPT Plus/Pro")}`);
8067
+ if (config.hasGemini)
8068
+ providers.push(`Google ${import_picocolors2.default.gray("\u2192 OAuth with Antigravity")}`);
8069
+ if (config.hasCopilot)
8070
+ providers.push(`GitHub ${import_picocolors2.default.gray("\u2192 Copilot")}`);
8071
+ console.log();
8072
+ console.log(import_picocolors2.default.bold("\uD83D\uDD10 Authenticate Your Providers"));
8073
+ console.log();
8074
+ console.log(` Run ${import_picocolors2.default.cyan("opencode auth login")} and select:`);
8075
+ for (const provider of providers) {
8076
+ console.log(` ${SYMBOLS.bullet} ${provider}`);
8077
+ }
8078
+ console.log();
8079
+ }
8084
8080
  return 0;
8085
8081
  }
8086
8082
  // node_modules/@opencode-ai/sdk/dist/gen/core/serverSentEvents.gen.js
@@ -22849,7 +22845,6 @@ var DynamicContextPruningConfigSchema = exports_external.object({
22849
22845
  "todowrite",
22850
22846
  "todoread",
22851
22847
  "lsp_rename",
22852
- "lsp_code_action_resolve",
22853
22848
  "session_read",
22854
22849
  "session_write",
22855
22850
  "session_search"
@@ -5,6 +5,7 @@ export interface InstallArgs {
5
5
  claude?: ClaudeSubscription;
6
6
  chatgpt?: BooleanArg;
7
7
  gemini?: BooleanArg;
8
+ copilot?: BooleanArg;
8
9
  skipAuth?: boolean;
9
10
  }
10
11
  export interface InstallConfig {
@@ -12,6 +13,7 @@ export interface InstallConfig {
12
13
  isMax20: boolean;
13
14
  hasChatGPT: boolean;
14
15
  hasGemini: boolean;
16
+ hasCopilot: boolean;
15
17
  }
16
18
  export interface ConfigMergeResult {
17
19
  success: boolean;
@@ -24,4 +26,5 @@ export interface DetectedConfig {
24
26
  isMax20: boolean;
25
27
  hasChatGPT: boolean;
26
28
  hasGemini: boolean;
29
+ hasCopilot: boolean;
27
30
  }
@@ -1 +1 @@
1
- export declare const INIT_DEEP_TEMPLATE = "# /init-deep\n\nGenerate hierarchical AGENTS.md files. Root + complexity-scored subdirectories.\n\n## Usage\n\n```\n/init-deep # Update mode: modify existing + create new where warranted\n/init-deep --create-new # Read existing \u2192 remove all \u2192 regenerate from scratch\n/init-deep --max-depth=2 # Limit directory depth (default: 3)\n```\n\n---\n\n## Workflow (High-Level)\n\n1. **Discovery + Analysis** (concurrent)\n - Fire background explore agents immediately\n - Main session: bash structure + LSP codemap + read existing AGENTS.md\n2. **Score & Decide** - Determine AGENTS.md locations from merged findings\n3. **Generate** - Root first, then subdirs in parallel\n4. **Review** - Deduplicate, trim, validate\n\n<critical>\n**TodoWrite ALL phases. Mark in_progress \u2192 completed in real-time.**\n```\nTodoWrite([\n { id: \"discovery\", content: \"Fire explore agents + LSP codemap + read existing\", status: \"pending\", priority: \"high\" },\n { id: \"scoring\", content: \"Score directories, determine locations\", status: \"pending\", priority: \"high\" },\n { id: \"generate\", content: \"Generate AGENTS.md files (root + subdirs)\", status: \"pending\", priority: \"high\" },\n { id: \"review\", content: \"Deduplicate, validate, trim\", status: \"pending\", priority: \"medium\" }\n])\n```\n</critical>\n\n---\n\n## Phase 1: Discovery + Analysis (Concurrent)\n\n**Mark \"discovery\" as in_progress.**\n\n### Fire Background Explore Agents IMMEDIATELY\n\nDon't wait\u2014these run async while main session works.\n\n```\n// Fire all at once, collect results later\nsisyphus_task(agent=\"explore\", prompt=\"Project structure: PREDICT standard patterns for detected language \u2192 REPORT deviations only\")\nsisyphus_task(agent=\"explore\", prompt=\"Entry points: FIND main files \u2192 REPORT non-standard organization\")\nsisyphus_task(agent=\"explore\", prompt=\"Conventions: FIND config files (.eslintrc, pyproject.toml, .editorconfig) \u2192 REPORT project-specific rules\")\nsisyphus_task(agent=\"explore\", prompt=\"Anti-patterns: FIND 'DO NOT', 'NEVER', 'ALWAYS', 'DEPRECATED' comments \u2192 LIST forbidden patterns\")\nsisyphus_task(agent=\"explore\", prompt=\"Build/CI: FIND .github/workflows, Makefile \u2192 REPORT non-standard patterns\")\nsisyphus_task(agent=\"explore\", prompt=\"Test patterns: FIND test configs, test structure \u2192 REPORT unique conventions\")\n```\n\n<dynamic-agents>\n**DYNAMIC AGENT SPAWNING**: After bash analysis, spawn ADDITIONAL explore agents based on project scale:\n\n| Factor | Threshold | Additional Agents |\n|--------|-----------|-------------------|\n| **Total files** | >100 | +1 per 100 files |\n| **Total lines** | >10k | +1 per 10k lines |\n| **Directory depth** | \u22654 | +2 for deep exploration |\n| **Large files (>500 lines)** | >10 files | +1 for complexity hotspots |\n| **Monorepo** | detected | +1 per package/workspace |\n| **Multiple languages** | >1 | +1 per language |\n\n```bash\n# Measure project scale first\ntotal_files=$(find . -type f -not -path '*/node_modules/*' -not -path '*/.git/*' | wc -l)\ntotal_lines=$(find . -type f \\( -name \"*.ts\" -o -name \"*.py\" -o -name \"*.go\" \\) -not -path '*/node_modules/*' -exec wc -l {} + 2>/dev/null | tail -1 | awk '{print $1}')\nlarge_files=$(find . -type f \\( -name \"*.ts\" -o -name \"*.py\" \\) -not -path '*/node_modules/*' -exec wc -l {} + 2>/dev/null | awk '$1 > 500 {count++} END {print count+0}')\nmax_depth=$(find . -type d -not -path '*/node_modules/*' -not -path '*/.git/*' | awk -F/ '{print NF}' | sort -rn | head -1)\n```\n\nExample spawning:\n```\n// 500 files, 50k lines, depth 6, 15 large files \u2192 spawn 5+5+2+1 = 13 additional agents\nsisyphus_task(agent=\"explore\", prompt=\"Large file analysis: FIND files >500 lines, REPORT complexity hotspots\")\nsisyphus_task(agent=\"explore\", prompt=\"Deep modules at depth 4+: FIND hidden patterns, internal conventions\")\nsisyphus_task(agent=\"explore\", prompt=\"Cross-cutting concerns: FIND shared utilities across directories\")\n// ... more based on calculation\n```\n</dynamic-agents>\n\n### Main Session: Concurrent Analysis\n\n**While background agents run**, main session does:\n\n#### 1. Bash Structural Analysis\n```bash\n# Directory depth + file counts\nfind . -type d -not -path '*/\\.*' -not -path '*/node_modules/*' -not -path '*/venv/*' -not -path '*/dist/*' -not -path '*/build/*' | awk -F/ '{print NF-1}' | sort -n | uniq -c\n\n# Files per directory (top 30)\nfind . -type f -not -path '*/\\.*' -not -path '*/node_modules/*' | sed 's|/[^/]*$||' | sort | uniq -c | sort -rn | head -30\n\n# Code concentration by extension\nfind . -type f \\( -name \"*.py\" -o -name \"*.ts\" -o -name \"*.tsx\" -o -name \"*.js\" -o -name \"*.go\" -o -name \"*.rs\" \\) -not -path '*/node_modules/*' | sed 's|/[^/]*$||' | sort | uniq -c | sort -rn | head -20\n\n# Existing AGENTS.md / CLAUDE.md\nfind . -type f \\( -name \"AGENTS.md\" -o -name \"CLAUDE.md\" \\) -not -path '*/node_modules/*' 2>/dev/null\n```\n\n#### 2. Read Existing AGENTS.md\n```\nFor each existing file found:\n Read(filePath=file)\n Extract: key insights, conventions, anti-patterns\n Store in EXISTING_AGENTS map\n```\n\nIf `--create-new`: Read all existing first (preserve context) \u2192 then delete all \u2192 regenerate.\n\n#### 3. LSP Codemap (if available)\n```\nlsp_servers() # Check availability\n\n# Entry points (parallel)\nlsp_document_symbols(filePath=\"src/index.ts\")\nlsp_document_symbols(filePath=\"main.py\")\n\n# Key symbols (parallel)\nlsp_workspace_symbols(filePath=\".\", query=\"class\")\nlsp_workspace_symbols(filePath=\".\", query=\"interface\")\nlsp_workspace_symbols(filePath=\".\", query=\"function\")\n\n# Centrality for top exports\nlsp_find_references(filePath=\"...\", line=X, character=Y)\n```\n\n**LSP Fallback**: If unavailable, rely on explore agents + AST-grep.\n\n### Collect Background Results\n\n```\n// After main session analysis done, collect all task results\nfor each task_id: background_output(task_id=\"...\")\n```\n\n**Merge: bash + LSP + existing + explore findings. Mark \"discovery\" as completed.**\n\n---\n\n## Phase 2: Scoring & Location Decision\n\n**Mark \"scoring\" as in_progress.**\n\n### Scoring Matrix\n\n| Factor | Weight | High Threshold | Source |\n|--------|--------|----------------|--------|\n| File count | 3x | >20 | bash |\n| Subdir count | 2x | >5 | bash |\n| Code ratio | 2x | >70% | bash |\n| Unique patterns | 1x | Has own config | explore |\n| Module boundary | 2x | Has index.ts/__init__.py | bash |\n| Symbol density | 2x | >30 symbols | LSP |\n| Export count | 2x | >10 exports | LSP |\n| Reference centrality | 3x | >20 refs | LSP |\n\n### Decision Rules\n\n| Score | Action |\n|-------|--------|\n| **Root (.)** | ALWAYS create |\n| **>15** | Create AGENTS.md |\n| **8-15** | Create if distinct domain |\n| **<8** | Skip (parent covers) |\n\n### Output\n```\nAGENTS_LOCATIONS = [\n { path: \".\", type: \"root\" },\n { path: \"src/hooks\", score: 18, reason: \"high complexity\" },\n { path: \"src/api\", score: 12, reason: \"distinct domain\" }\n]\n```\n\n**Mark \"scoring\" as completed.**\n\n---\n\n## Phase 3: Generate AGENTS.md\n\n**Mark \"generate\" as in_progress.**\n\n### Root AGENTS.md (Full Treatment)\n\n```markdown\n# PROJECT KNOWLEDGE BASE\n\n**Generated:** {TIMESTAMP}\n**Commit:** {SHORT_SHA}\n**Branch:** {BRANCH}\n\n## OVERVIEW\n{1-2 sentences: what + core stack}\n\n## STRUCTURE\n\\`\\`\\`\n{root}/\n\u251C\u2500\u2500 {dir}/ # {non-obvious purpose only}\n\u2514\u2500\u2500 {entry}\n\\`\\`\\`\n\n## WHERE TO LOOK\n| Task | Location | Notes |\n|------|----------|-------|\n\n## CODE MAP\n{From LSP - skip if unavailable or project <10 files}\n\n| Symbol | Type | Location | Refs | Role |\n|--------|------|----------|------|------|\n\n## CONVENTIONS\n{ONLY deviations from standard}\n\n## ANTI-PATTERNS (THIS PROJECT)\n{Explicitly forbidden here}\n\n## UNIQUE STYLES\n{Project-specific}\n\n## COMMANDS\n\\`\\`\\`bash\n{dev/test/build}\n\\`\\`\\`\n\n## NOTES\n{Gotchas}\n```\n\n**Quality gates**: 50-150 lines, no generic advice, no obvious info.\n\n### Subdirectory AGENTS.md (Parallel)\n\nLaunch document-writer agents for each location:\n\n```\nfor loc in AGENTS_LOCATIONS (except root):\n sisyphus_task(agent=\"document-writer\", prompt=\\`\n Generate AGENTS.md for: ${loc.path}\n - Reason: ${loc.reason}\n - 30-80 lines max\n - NEVER repeat parent content\n - Sections: OVERVIEW (1 line), STRUCTURE (if >5 subdirs), WHERE TO LOOK, CONVENTIONS (if different), ANTI-PATTERNS\n \\`)\n```\n\n**Wait for all. Mark \"generate\" as completed.**\n\n---\n\n## Phase 4: Review & Deduplicate\n\n**Mark \"review\" as in_progress.**\n\nFor each generated file:\n- Remove generic advice\n- Remove parent duplicates\n- Trim to size limits\n- Verify telegraphic style\n\n**Mark \"review\" as completed.**\n\n---\n\n## Final Report\n\n```\n=== init-deep Complete ===\n\nMode: {update | create-new}\n\nFiles:\n \u2713 ./AGENTS.md (root, {N} lines)\n \u2713 ./src/hooks/AGENTS.md ({N} lines)\n\nDirs Analyzed: {N}\nAGENTS.md Created: {N}\nAGENTS.md Updated: {N}\n\nHierarchy:\n ./AGENTS.md\n \u2514\u2500\u2500 src/hooks/AGENTS.md\n```\n\n---\n\n## Anti-Patterns\n\n- **Static agent count**: MUST vary agents based on project size/depth\n- **Sequential execution**: MUST parallel (explore + LSP concurrent)\n- **Ignoring existing**: ALWAYS read existing first, even with --create-new\n- **Over-documenting**: Not every dir needs AGENTS.md\n- **Redundancy**: Child never repeats parent\n- **Generic content**: Remove anything that applies to ALL projects\n- **Verbose style**: Telegraphic or die";
1
+ export declare const INIT_DEEP_TEMPLATE = "# /init-deep\n\nGenerate hierarchical AGENTS.md files. Root + complexity-scored subdirectories.\n\n## Usage\n\n```\n/init-deep # Update mode: modify existing + create new where warranted\n/init-deep --create-new # Read existing \u2192 remove all \u2192 regenerate from scratch\n/init-deep --max-depth=2 # Limit directory depth (default: 3)\n```\n\n---\n\n## Workflow (High-Level)\n\n1. **Discovery + Analysis** (concurrent)\n - Fire background explore agents immediately\n - Main session: bash structure + LSP codemap + read existing AGENTS.md\n2. **Score & Decide** - Determine AGENTS.md locations from merged findings\n3. **Generate** - Root first, then subdirs in parallel\n4. **Review** - Deduplicate, trim, validate\n\n<critical>\n**TodoWrite ALL phases. Mark in_progress \u2192 completed in real-time.**\n```\nTodoWrite([\n { id: \"discovery\", content: \"Fire explore agents + LSP codemap + read existing\", status: \"pending\", priority: \"high\" },\n { id: \"scoring\", content: \"Score directories, determine locations\", status: \"pending\", priority: \"high\" },\n { id: \"generate\", content: \"Generate AGENTS.md files (root + subdirs)\", status: \"pending\", priority: \"high\" },\n { id: \"review\", content: \"Deduplicate, validate, trim\", status: \"pending\", priority: \"medium\" }\n])\n```\n</critical>\n\n---\n\n## Phase 1: Discovery + Analysis (Concurrent)\n\n**Mark \"discovery\" as in_progress.**\n\n### Fire Background Explore Agents IMMEDIATELY\n\nDon't wait\u2014these run async while main session works.\n\n```\n// Fire all at once, collect results later\nsisyphus_task(agent=\"explore\", prompt=\"Project structure: PREDICT standard patterns for detected language \u2192 REPORT deviations only\")\nsisyphus_task(agent=\"explore\", prompt=\"Entry points: FIND main files \u2192 REPORT non-standard organization\")\nsisyphus_task(agent=\"explore\", prompt=\"Conventions: FIND config files (.eslintrc, pyproject.toml, .editorconfig) \u2192 REPORT project-specific rules\")\nsisyphus_task(agent=\"explore\", prompt=\"Anti-patterns: FIND 'DO NOT', 'NEVER', 'ALWAYS', 'DEPRECATED' comments \u2192 LIST forbidden patterns\")\nsisyphus_task(agent=\"explore\", prompt=\"Build/CI: FIND .github/workflows, Makefile \u2192 REPORT non-standard patterns\")\nsisyphus_task(agent=\"explore\", prompt=\"Test patterns: FIND test configs, test structure \u2192 REPORT unique conventions\")\n```\n\n<dynamic-agents>\n**DYNAMIC AGENT SPAWNING**: After bash analysis, spawn ADDITIONAL explore agents based on project scale:\n\n| Factor | Threshold | Additional Agents |\n|--------|-----------|-------------------|\n| **Total files** | >100 | +1 per 100 files |\n| **Total lines** | >10k | +1 per 10k lines |\n| **Directory depth** | \u22654 | +2 for deep exploration |\n| **Large files (>500 lines)** | >10 files | +1 for complexity hotspots |\n| **Monorepo** | detected | +1 per package/workspace |\n| **Multiple languages** | >1 | +1 per language |\n\n```bash\n# Measure project scale first\ntotal_files=$(find . -type f -not -path '*/node_modules/*' -not -path '*/.git/*' | wc -l)\ntotal_lines=$(find . -type f \\( -name \"*.ts\" -o -name \"*.py\" -o -name \"*.go\" \\) -not -path '*/node_modules/*' -exec wc -l {} + 2>/dev/null | tail -1 | awk '{print $1}')\nlarge_files=$(find . -type f \\( -name \"*.ts\" -o -name \"*.py\" \\) -not -path '*/node_modules/*' -exec wc -l {} + 2>/dev/null | awk '$1 > 500 {count++} END {print count+0}')\nmax_depth=$(find . -type d -not -path '*/node_modules/*' -not -path '*/.git/*' | awk -F/ '{print NF}' | sort -rn | head -1)\n```\n\nExample spawning:\n```\n// 500 files, 50k lines, depth 6, 15 large files \u2192 spawn 5+5+2+1 = 13 additional agents\nsisyphus_task(agent=\"explore\", prompt=\"Large file analysis: FIND files >500 lines, REPORT complexity hotspots\")\nsisyphus_task(agent=\"explore\", prompt=\"Deep modules at depth 4+: FIND hidden patterns, internal conventions\")\nsisyphus_task(agent=\"explore\", prompt=\"Cross-cutting concerns: FIND shared utilities across directories\")\n// ... more based on calculation\n```\n</dynamic-agents>\n\n### Main Session: Concurrent Analysis\n\n**While background agents run**, main session does:\n\n#### 1. Bash Structural Analysis\n```bash\n# Directory depth + file counts\nfind . -type d -not -path '*/\\.*' -not -path '*/node_modules/*' -not -path '*/venv/*' -not -path '*/dist/*' -not -path '*/build/*' | awk -F/ '{print NF-1}' | sort -n | uniq -c\n\n# Files per directory (top 30)\nfind . -type f -not -path '*/\\.*' -not -path '*/node_modules/*' | sed 's|/[^/]*$||' | sort | uniq -c | sort -rn | head -30\n\n# Code concentration by extension\nfind . -type f \\( -name \"*.py\" -o -name \"*.ts\" -o -name \"*.tsx\" -o -name \"*.js\" -o -name \"*.go\" -o -name \"*.rs\" \\) -not -path '*/node_modules/*' | sed 's|/[^/]*$||' | sort | uniq -c | sort -rn | head -20\n\n# Existing AGENTS.md / CLAUDE.md\nfind . -type f \\( -name \"AGENTS.md\" -o -name \"CLAUDE.md\" \\) -not -path '*/node_modules/*' 2>/dev/null\n```\n\n#### 2. Read Existing AGENTS.md\n```\nFor each existing file found:\n Read(filePath=file)\n Extract: key insights, conventions, anti-patterns\n Store in EXISTING_AGENTS map\n```\n\nIf `--create-new`: Read all existing first (preserve context) \u2192 then delete all \u2192 regenerate.\n\n#### 3. LSP Codemap (if available)\n```\nlsp_servers() # Check availability\n\n# Entry points (parallel)\nlsp_symbols(filePath=\"src/index.ts\", scope=\"document\")\nlsp_symbols(filePath=\"main.py\", scope=\"document\")\n\n# Key symbols (parallel)\nlsp_symbols(filePath=\".\", scope=\"workspace\", query=\"class\")\nlsp_symbols(filePath=\".\", scope=\"workspace\", query=\"interface\")\nlsp_symbols(filePath=\".\", scope=\"workspace\", query=\"function\")\n\n# Centrality for top exports\nlsp_find_references(filePath=\"...\", line=X, character=Y)\n```\n\n**LSP Fallback**: If unavailable, rely on explore agents + AST-grep.\n\n### Collect Background Results\n\n```\n// After main session analysis done, collect all task results\nfor each task_id: background_output(task_id=\"...\")\n```\n\n**Merge: bash + LSP + existing + explore findings. Mark \"discovery\" as completed.**\n\n---\n\n## Phase 2: Scoring & Location Decision\n\n**Mark \"scoring\" as in_progress.**\n\n### Scoring Matrix\n\n| Factor | Weight | High Threshold | Source |\n|--------|--------|----------------|--------|\n| File count | 3x | >20 | bash |\n| Subdir count | 2x | >5 | bash |\n| Code ratio | 2x | >70% | bash |\n| Unique patterns | 1x | Has own config | explore |\n| Module boundary | 2x | Has index.ts/__init__.py | bash |\n| Symbol density | 2x | >30 symbols | LSP |\n| Export count | 2x | >10 exports | LSP |\n| Reference centrality | 3x | >20 refs | LSP |\n\n### Decision Rules\n\n| Score | Action |\n|-------|--------|\n| **Root (.)** | ALWAYS create |\n| **>15** | Create AGENTS.md |\n| **8-15** | Create if distinct domain |\n| **<8** | Skip (parent covers) |\n\n### Output\n```\nAGENTS_LOCATIONS = [\n { path: \".\", type: \"root\" },\n { path: \"src/hooks\", score: 18, reason: \"high complexity\" },\n { path: \"src/api\", score: 12, reason: \"distinct domain\" }\n]\n```\n\n**Mark \"scoring\" as completed.**\n\n---\n\n## Phase 3: Generate AGENTS.md\n\n**Mark \"generate\" as in_progress.**\n\n### Root AGENTS.md (Full Treatment)\n\n```markdown\n# PROJECT KNOWLEDGE BASE\n\n**Generated:** {TIMESTAMP}\n**Commit:** {SHORT_SHA}\n**Branch:** {BRANCH}\n\n## OVERVIEW\n{1-2 sentences: what + core stack}\n\n## STRUCTURE\n\\`\\`\\`\n{root}/\n\u251C\u2500\u2500 {dir}/ # {non-obvious purpose only}\n\u2514\u2500\u2500 {entry}\n\\`\\`\\`\n\n## WHERE TO LOOK\n| Task | Location | Notes |\n|------|----------|-------|\n\n## CODE MAP\n{From LSP - skip if unavailable or project <10 files}\n\n| Symbol | Type | Location | Refs | Role |\n|--------|------|----------|------|------|\n\n## CONVENTIONS\n{ONLY deviations from standard}\n\n## ANTI-PATTERNS (THIS PROJECT)\n{Explicitly forbidden here}\n\n## UNIQUE STYLES\n{Project-specific}\n\n## COMMANDS\n\\`\\`\\`bash\n{dev/test/build}\n\\`\\`\\`\n\n## NOTES\n{Gotchas}\n```\n\n**Quality gates**: 50-150 lines, no generic advice, no obvious info.\n\n### Subdirectory AGENTS.md (Parallel)\n\nLaunch document-writer agents for each location:\n\n```\nfor loc in AGENTS_LOCATIONS (except root):\n sisyphus_task(agent=\"document-writer\", prompt=\\`\n Generate AGENTS.md for: ${loc.path}\n - Reason: ${loc.reason}\n - 30-80 lines max\n - NEVER repeat parent content\n - Sections: OVERVIEW (1 line), STRUCTURE (if >5 subdirs), WHERE TO LOOK, CONVENTIONS (if different), ANTI-PATTERNS\n \\`)\n```\n\n**Wait for all. Mark \"generate\" as completed.**\n\n---\n\n## Phase 4: Review & Deduplicate\n\n**Mark \"review\" as in_progress.**\n\nFor each generated file:\n- Remove generic advice\n- Remove parent duplicates\n- Trim to size limits\n- Verify telegraphic style\n\n**Mark \"review\" as completed.**\n\n---\n\n## Final Report\n\n```\n=== init-deep Complete ===\n\nMode: {update | create-new}\n\nFiles:\n \u2713 ./AGENTS.md (root, {N} lines)\n \u2713 ./src/hooks/AGENTS.md ({N} lines)\n\nDirs Analyzed: {N}\nAGENTS.md Created: {N}\nAGENTS.md Updated: {N}\n\nHierarchy:\n ./AGENTS.md\n \u2514\u2500\u2500 src/hooks/AGENTS.md\n```\n\n---\n\n## Anti-Patterns\n\n- **Static agent count**: MUST vary agents based on project size/depth\n- **Sequential execution**: MUST parallel (explore + LSP concurrent)\n- **Ignoring existing**: ALWAYS read existing first, even with --create-new\n- **Over-documenting**: Not every dir needs AGENTS.md\n- **Redundancy**: Child never repeats parent\n- **Generic content**: Remove anything that applies to ALL projects\n- **Verbose style**: Telegraphic or die";
@@ -1 +1 @@
1
- export declare const REFACTOR_TEMPLATE = "# Intelligent Refactor Command\n\n## Usage\n```\n/refactor <refactoring-target> [--scope=<file|module|project>] [--strategy=<safe|aggressive>]\n\nArguments:\n refactoring-target: What to refactor. Can be:\n - File path: src/auth/handler.ts\n - Symbol name: \"AuthService class\"\n - Pattern: \"all functions using deprecated API\"\n - Description: \"extract validation logic into separate module\"\n\nOptions:\n --scope: Refactoring scope (default: module)\n - file: Single file only\n - module: Module/directory scope\n - project: Entire codebase\n\n --strategy: Risk tolerance (default: safe)\n - safe: Conservative, maximum test coverage required\n - aggressive: Allow broader changes with adequate coverage\n```\n\n## What This Command Does\n\nPerforms intelligent, deterministic refactoring with full codebase awareness. Unlike blind search-and-replace, this command:\n\n1. **Understands your intent** - Analyzes what you actually want to achieve\n2. **Maps the codebase** - Builds a definitive codemap before touching anything\n3. **Assesses risk** - Evaluates test coverage and determines verification strategy\n4. **Plans meticulously** - Creates a detailed plan with Plan agent\n5. **Executes precisely** - Step-by-step refactoring with LSP and AST-grep\n6. **Verifies constantly** - Runs tests after each change to ensure zero regression\n\n---\n\n# PHASE 0: INTENT GATE (MANDATORY FIRST STEP)\n\n**BEFORE ANY ACTION, classify and validate the request.**\n\n## Step 0.1: Parse Request Type\n\n| Signal | Classification | Action |\n|--------|----------------|--------|\n| Specific file/symbol | Explicit | Proceed to codebase analysis |\n| \"Refactor X to Y\" | Clear transformation | Proceed to codebase analysis |\n| \"Improve\", \"Clean up\" | Open-ended | **MUST ask**: \"What specific improvement?\" |\n| Ambiguous scope | Uncertain | **MUST ask**: \"Which modules/files?\" |\n| Missing context | Incomplete | **MUST ask**: \"What's the desired outcome?\" |\n\n## Step 0.2: Validate Understanding\n\nBefore proceeding, confirm:\n- [ ] Target is clearly identified\n- [ ] Desired outcome is understood\n- [ ] Scope is defined (file/module/project)\n- [ ] Success criteria can be articulated\n\n**If ANY of above is unclear, ASK CLARIFYING QUESTION:**\n\n```\nI want to make sure I understand the refactoring goal correctly.\n\n**What I understood**: [interpretation]\n**What I'm unsure about**: [specific ambiguity]\n\nOptions I see:\n1. [Option A] - [implications]\n2. [Option B] - [implications]\n\n**My recommendation**: [suggestion with reasoning]\n\nShould I proceed with [recommendation], or would you prefer differently?\n```\n\n## Step 0.3: Create Initial Todos\n\n**IMMEDIATELY after understanding the request, create todos:**\n\n```\nTodoWrite([\n {\"id\": \"phase-1\", \"content\": \"PHASE 1: Codebase Analysis - launch parallel explore agents\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-2\", \"content\": \"PHASE 2: Build Codemap - map dependencies and impact zones\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-3\", \"content\": \"PHASE 3: Test Assessment - analyze test coverage and verification strategy\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-4\", \"content\": \"PHASE 4: Plan Generation - invoke Plan agent for detailed refactoring plan\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-5\", \"content\": \"PHASE 5: Execute Refactoring - step-by-step with continuous verification\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-6\", \"content\": \"PHASE 6: Final Verification - full test suite and regression check\", \"status\": \"pending\", \"priority\": \"high\"}\n])\n```\n\n---\n\n# PHASE 1: CODEBASE ANALYSIS (PARALLEL EXPLORATION)\n\n**Mark phase-1 as in_progress.**\n\n## 1.1: Launch Parallel Explore Agents (BACKGROUND)\n\nFire ALL of these simultaneously using `call_omo_agent`:\n\n```\n// Agent 1: Find the refactoring target\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find all occurrences and definitions of [TARGET]. \n Report: file paths, line numbers, usage patterns.\"\n)\n\n// Agent 2: Find related code\ncall_omo_agent(\n subagent_type=\"explore\", \n run_in_background=true,\n prompt=\"Find all code that imports, uses, or depends on [TARGET].\n Report: dependency chains, import graphs.\"\n)\n\n// Agent 3: Find similar patterns\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find similar code patterns to [TARGET] in the codebase.\n Report: analogous implementations, established conventions.\"\n)\n\n// Agent 4: Find tests\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find all test files related to [TARGET].\n Report: test file paths, test case names, coverage indicators.\"\n)\n\n// Agent 5: Architecture context\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find architectural patterns and module organization around [TARGET].\n Report: module boundaries, layer structure, design patterns in use.\"\n)\n```\n\n## 1.2: Direct Tool Exploration (WHILE AGENTS RUN)\n\nWhile background agents are running, use direct tools:\n\n### LSP Tools for Precise Analysis:\n\n```typescript\n// Get symbol information at target location\nlsp_hover(filePath, line, character) // Type info, docs, signatures\n\n// Find definition(s)\nlsp_goto_definition(filePath, line, character) // Where is it defined?\n\n// Find ALL usages across workspace\nlsp_find_references(filePath, line, character, includeDeclaration=true)\n\n// Get file structure\nlsp_document_symbols(filePath) // Hierarchical outline\n\n// Search symbols by name\nlsp_workspace_symbols(filePath, query=\"[target_symbol]\")\n\n// Get current diagnostics\nlsp_diagnostics(filePath) // Errors, warnings before we start\n```\n\n### AST-Grep for Pattern Analysis:\n\n```typescript\n// Find structural patterns\nast_grep_search(\n pattern=\"function $NAME($$$) { $$$ }\", // or relevant pattern\n lang=\"typescript\", // or relevant language\n paths=[\"src/\"]\n)\n\n// Preview refactoring (DRY RUN)\nast_grep_replace(\n pattern=\"[old_pattern]\",\n rewrite=\"[new_pattern]\",\n lang=\"[language]\",\n dryRun=true // ALWAYS preview first\n)\n```\n\n### Grep for Text Patterns:\n\n```\ngrep(pattern=\"[search_term]\", path=\"src/\", include=\"*.ts\")\n```\n\n## 1.3: Collect Background Results\n\n```\nbackground_output(task_id=\"[agent_1_id]\")\nbackground_output(task_id=\"[agent_2_id]\")\n...\n```\n\n**Mark phase-1 as completed after all results collected.**\n\n---\n\n# PHASE 2: BUILD CODEMAP (DEPENDENCY MAPPING)\n\n**Mark phase-2 as in_progress.**\n\n## 2.1: Construct Definitive Codemap\n\nBased on Phase 1 results, build:\n\n```\n## CODEMAP: [TARGET]\n\n### Core Files (Direct Impact)\n- `path/to/file.ts:L10-L50` - Primary definition\n- `path/to/file2.ts:L25` - Key usage\n\n### Dependency Graph\n```\n[TARGET] \n\u251C\u2500\u2500 imports from: \n\u2502 \u251C\u2500\u2500 module-a (types)\n\u2502 \u2514\u2500\u2500 module-b (utils)\n\u251C\u2500\u2500 imported by:\n\u2502 \u251C\u2500\u2500 consumer-1.ts\n\u2502 \u251C\u2500\u2500 consumer-2.ts\n\u2502 \u2514\u2500\u2500 consumer-3.ts\n\u2514\u2500\u2500 used by:\n \u251C\u2500\u2500 handler.ts (direct call)\n \u2514\u2500\u2500 service.ts (dependency injection)\n```\n\n### Impact Zones\n| Zone | Risk Level | Files Affected | Test Coverage |\n|------|------------|----------------|---------------|\n| Core | HIGH | 3 files | 85% covered |\n| Consumers | MEDIUM | 8 files | 70% covered |\n| Edge | LOW | 2 files | 50% covered |\n\n### Established Patterns\n- Pattern A: [description] - used in N places\n- Pattern B: [description] - established convention\n```\n\n## 2.2: Identify Refactoring Constraints\n\nBased on codemap:\n- **MUST follow**: [existing patterns identified]\n- **MUST NOT break**: [critical dependencies]\n- **Safe to change**: [isolated code zones]\n- **Requires migration**: [breaking changes impact]\n\n**Mark phase-2 as completed.**\n\n---\n\n# PHASE 3: TEST ASSESSMENT (VERIFICATION STRATEGY)\n\n**Mark phase-3 as in_progress.**\n\n## 3.1: Detect Test Infrastructure\n\n```bash\n# Check for test commands\ncat package.json | jq '.scripts | keys[] | select(test(\"test\"))'\n\n# Or for Python\nls -la pytest.ini pyproject.toml setup.cfg\n\n# Or for Go\nls -la *_test.go\n```\n\n## 3.2: Analyze Test Coverage\n\n```\n// Find all tests related to target\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=false, // Need this synchronously\n prompt=\"Analyze test coverage for [TARGET]:\n 1. Which test files cover this code?\n 2. What test cases exist?\n 3. Are there integration tests?\n 4. What edge cases are tested?\n 5. Estimated coverage percentage?\"\n)\n```\n\n## 3.3: Determine Verification Strategy\n\nBased on test analysis:\n\n| Coverage Level | Strategy |\n|----------------|----------|\n| HIGH (>80%) | Run existing tests after each step |\n| MEDIUM (50-80%) | Run tests + add safety assertions |\n| LOW (<50%) | **PAUSE**: Propose adding tests first |\n| NONE | **BLOCK**: Refuse aggressive refactoring |\n\n**If coverage is LOW or NONE, ask user:**\n\n```\nTest coverage for [TARGET] is [LEVEL].\n\n**Risk Assessment**: Refactoring without adequate tests is dangerous.\n\nOptions:\n1. Add tests first, then refactor (RECOMMENDED)\n2. Proceed with extra caution, manual verification required\n3. Abort refactoring\n\nWhich approach do you prefer?\n```\n\n## 3.4: Document Verification Plan\n\n```\n## VERIFICATION PLAN\n\n### Test Commands\n- Unit: `bun test` / `npm test` / `pytest` / etc.\n- Integration: [command if exists]\n- Type check: `tsc --noEmit` / `pyright` / etc.\n\n### Verification Checkpoints\nAfter each refactoring step:\n1. lsp_diagnostics \u2192 zero new errors\n2. Run test command \u2192 all pass\n3. Type check \u2192 clean\n\n### Regression Indicators\n- [Specific test that must pass]\n- [Behavior that must be preserved]\n- [API contract that must not change]\n```\n\n**Mark phase-3 as completed.**\n\n---\n\n# PHASE 4: PLAN GENERATION (PLAN AGENT)\n\n**Mark phase-4 as in_progress.**\n\n## 4.1: Invoke Plan Agent\n\n```\nTask(\n subagent_type=\"plan\",\n prompt=\"Create a detailed refactoring plan:\n\n ## Refactoring Goal\n [User's original request]\n\n ## Codemap (from Phase 2)\n [Insert codemap here]\n\n ## Test Coverage (from Phase 3)\n [Insert verification plan here]\n\n ## Constraints\n - MUST follow existing patterns: [list]\n - MUST NOT break: [critical paths]\n - MUST run tests after each step\n\n ## Requirements\n 1. Break down into atomic refactoring steps\n 2. Each step must be independently verifiable\n 3. Order steps by dependency (what must happen first)\n 4. Specify exact files and line ranges for each step\n 5. Include rollback strategy for each step\n 6. Define commit checkpoints\"\n)\n```\n\n## 4.2: Review and Validate Plan\n\nAfter receiving plan from Plan agent:\n\n1. **Verify completeness**: All identified files addressed?\n2. **Verify safety**: Each step reversible?\n3. **Verify order**: Dependencies respected?\n4. **Verify verification**: Test commands specified?\n\n## 4.3: Register Detailed Todos\n\nConvert Plan agent output into granular todos:\n\n```\nTodoWrite([\n // Each step from the plan becomes a todo\n {\"id\": \"refactor-1\", \"content\": \"Step 1: [description]\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"verify-1\", \"content\": \"Verify Step 1: run tests\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"refactor-2\", \"content\": \"Step 2: [description]\", \"status\": \"pending\", \"priority\": \"medium\"},\n {\"id\": \"verify-2\", \"content\": \"Verify Step 2: run tests\", \"status\": \"pending\", \"priority\": \"medium\"},\n // ... continue for all steps\n])\n```\n\n**Mark phase-4 as completed.**\n\n---\n\n# PHASE 5: EXECUTE REFACTORING (DETERMINISTIC EXECUTION)\n\n**Mark phase-5 as in_progress.**\n\n## 5.1: Execution Protocol\n\nFor EACH refactoring step:\n\n### Pre-Step\n1. Mark step todo as `in_progress`\n2. Read current file state\n3. Verify lsp_diagnostics is baseline\n\n### Execute Step\nUse appropriate tool:\n\n**For Symbol Renames:**\n```typescript\nlsp_prepare_rename(filePath, line, character) // Validate rename is possible\nlsp_rename(filePath, line, character, newName) // Execute rename\n```\n\n**For Pattern Transformations:**\n```typescript\n// Preview first\nast_grep_replace(pattern, rewrite, lang, dryRun=true)\n\n// If preview looks good, execute\nast_grep_replace(pattern, rewrite, lang, dryRun=false)\n```\n\n**For Structural Changes:**\n```typescript\n// Use Edit tool for precise changes\nedit(filePath, oldString, newString)\n```\n\n### Post-Step Verification (MANDATORY)\n\n```typescript\n// 1. Check diagnostics\nlsp_diagnostics(filePath) // Must be clean or same as baseline\n\n// 2. Run tests\nbash(\"bun test\") // Or appropriate test command\n\n// 3. Type check\nbash(\"tsc --noEmit\") // Or appropriate type check\n```\n\n### Step Completion\n1. If verification passes \u2192 Mark step todo as `completed`\n2. If verification fails \u2192 **STOP AND FIX**\n\n## 5.2: Failure Recovery Protocol\n\nIf ANY verification fails:\n\n1. **STOP** immediately\n2. **REVERT** the failed change\n3. **DIAGNOSE** what went wrong\n4. **OPTIONS**:\n - Fix the issue and retry\n - Skip this step (if optional)\n - Consult oracle agent for help\n - Ask user for guidance\n\n**NEVER proceed to next step with broken tests.**\n\n## 5.3: Commit Checkpoints\n\nAfter each logical group of changes:\n\n```bash\ngit add [changed-files]\ngit commit -m \"refactor(scope): description\n\n[details of what was changed and why]\"\n```\n\n**Mark phase-5 as completed when all refactoring steps done.**\n\n---\n\n# PHASE 6: FINAL VERIFICATION (REGRESSION CHECK)\n\n**Mark phase-6 as in_progress.**\n\n## 6.1: Full Test Suite\n\n```bash\n# Run complete test suite\nbun test # or npm test, pytest, go test, etc.\n```\n\n## 6.2: Type Check\n\n```bash\n# Full type check\ntsc --noEmit # or equivalent\n```\n\n## 6.3: Lint Check\n\n```bash\n# Run linter\neslint . # or equivalent\n```\n\n## 6.4: Build Verification (if applicable)\n\n```bash\n# Ensure build still works\nbun run build # or npm run build, etc.\n```\n\n## 6.5: Final Diagnostics\n\n```typescript\n// Check all changed files\nfor (file of changedFiles) {\n lsp_diagnostics(file) // Must all be clean\n}\n```\n\n## 6.6: Generate Summary\n\n```markdown\n## Refactoring Complete\n\n### What Changed\n- [List of changes made]\n\n### Files Modified\n- `path/to/file.ts` - [what changed]\n- `path/to/file2.ts` - [what changed]\n\n### Verification Results\n- Tests: PASSED (X/Y passing)\n- Type Check: CLEAN\n- Lint: CLEAN\n- Build: SUCCESS\n\n### No Regressions Detected\nAll existing tests pass. No new errors introduced.\n```\n\n**Mark phase-6 as completed.**\n\n---\n\n# CRITICAL RULES\n\n## NEVER DO\n- Skip lsp_diagnostics check after changes\n- Proceed with failing tests\n- Make changes without understanding impact\n- Use `as any`, `@ts-ignore`, `@ts-expect-error`\n- Delete tests to make them pass\n- Commit broken code\n- Refactor without understanding existing patterns\n\n## ALWAYS DO\n- Understand before changing\n- Preview before applying (ast_grep dryRun=true)\n- Verify after every change\n- Follow existing codebase patterns\n- Keep todos updated in real-time\n- Commit at logical checkpoints\n- Report issues immediately\n\n## ABORT CONDITIONS\nIf any of these occur, **STOP and consult user**:\n- Test coverage is zero for target code\n- Changes would break public API\n- Refactoring scope is unclear\n- 3 consecutive verification failures\n- User-defined constraints violated\n\n---\n\n# Tool Usage Philosophy\n\nYou already know these tools. Use them intelligently:\n\n## LSP Tools\nLeverage the full LSP toolset (`lsp_*`) for precision analysis. Key patterns:\n- **Understand before changing**: `lsp_hover`, `lsp_goto_definition` to grasp context\n- **Impact analysis**: `lsp_find_references` to map all usages before modification\n- **Safe refactoring**: `lsp_prepare_rename` \u2192 `lsp_rename` for symbol renames\n- **Continuous verification**: `lsp_diagnostics` after every change\n\n## AST-Grep\nUse `ast_grep_search` and `ast_grep_replace` for structural transformations.\n**Critical**: Always `dryRun=true` first, review, then execute.\n\n## Agents\n- `explore`: Parallel codebase pattern discovery\n- `plan`: Detailed refactoring plan generation\n- `oracle`: Read-only consultation for complex architectural decisions and debugging\n- `librarian`: **Use proactively** when encountering deprecated methods or library migration tasks. Query official docs and OSS examples for modern replacements.\n\n## Deprecated Code & Library Migration\nWhen you encounter deprecated methods/APIs during refactoring:\n1. Fire `librarian` to find the recommended modern alternative\n2. **DO NOT auto-upgrade to latest version** unless user explicitly requests migration\n3. If user requests library migration, use `librarian` to fetch latest API docs before making changes\n\n---\n\n**Remember: Refactoring without tests is reckless. Refactoring without understanding is destructive. This command ensures you do neither.**\n\n<user-request>\n$ARGUMENTS\n</user-request>\n";
1
+ export declare const REFACTOR_TEMPLATE = "# Intelligent Refactor Command\n\n## Usage\n```\n/refactor <refactoring-target> [--scope=<file|module|project>] [--strategy=<safe|aggressive>]\n\nArguments:\n refactoring-target: What to refactor. Can be:\n - File path: src/auth/handler.ts\n - Symbol name: \"AuthService class\"\n - Pattern: \"all functions using deprecated API\"\n - Description: \"extract validation logic into separate module\"\n\nOptions:\n --scope: Refactoring scope (default: module)\n - file: Single file only\n - module: Module/directory scope\n - project: Entire codebase\n\n --strategy: Risk tolerance (default: safe)\n - safe: Conservative, maximum test coverage required\n - aggressive: Allow broader changes with adequate coverage\n```\n\n## What This Command Does\n\nPerforms intelligent, deterministic refactoring with full codebase awareness. Unlike blind search-and-replace, this command:\n\n1. **Understands your intent** - Analyzes what you actually want to achieve\n2. **Maps the codebase** - Builds a definitive codemap before touching anything\n3. **Assesses risk** - Evaluates test coverage and determines verification strategy\n4. **Plans meticulously** - Creates a detailed plan with Plan agent\n5. **Executes precisely** - Step-by-step refactoring with LSP and AST-grep\n6. **Verifies constantly** - Runs tests after each change to ensure zero regression\n\n---\n\n# PHASE 0: INTENT GATE (MANDATORY FIRST STEP)\n\n**BEFORE ANY ACTION, classify and validate the request.**\n\n## Step 0.1: Parse Request Type\n\n| Signal | Classification | Action |\n|--------|----------------|--------|\n| Specific file/symbol | Explicit | Proceed to codebase analysis |\n| \"Refactor X to Y\" | Clear transformation | Proceed to codebase analysis |\n| \"Improve\", \"Clean up\" | Open-ended | **MUST ask**: \"What specific improvement?\" |\n| Ambiguous scope | Uncertain | **MUST ask**: \"Which modules/files?\" |\n| Missing context | Incomplete | **MUST ask**: \"What's the desired outcome?\" |\n\n## Step 0.2: Validate Understanding\n\nBefore proceeding, confirm:\n- [ ] Target is clearly identified\n- [ ] Desired outcome is understood\n- [ ] Scope is defined (file/module/project)\n- [ ] Success criteria can be articulated\n\n**If ANY of above is unclear, ASK CLARIFYING QUESTION:**\n\n```\nI want to make sure I understand the refactoring goal correctly.\n\n**What I understood**: [interpretation]\n**What I'm unsure about**: [specific ambiguity]\n\nOptions I see:\n1. [Option A] - [implications]\n2. [Option B] - [implications]\n\n**My recommendation**: [suggestion with reasoning]\n\nShould I proceed with [recommendation], or would you prefer differently?\n```\n\n## Step 0.3: Create Initial Todos\n\n**IMMEDIATELY after understanding the request, create todos:**\n\n```\nTodoWrite([\n {\"id\": \"phase-1\", \"content\": \"PHASE 1: Codebase Analysis - launch parallel explore agents\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-2\", \"content\": \"PHASE 2: Build Codemap - map dependencies and impact zones\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-3\", \"content\": \"PHASE 3: Test Assessment - analyze test coverage and verification strategy\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-4\", \"content\": \"PHASE 4: Plan Generation - invoke Plan agent for detailed refactoring plan\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-5\", \"content\": \"PHASE 5: Execute Refactoring - step-by-step with continuous verification\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"phase-6\", \"content\": \"PHASE 6: Final Verification - full test suite and regression check\", \"status\": \"pending\", \"priority\": \"high\"}\n])\n```\n\n---\n\n# PHASE 1: CODEBASE ANALYSIS (PARALLEL EXPLORATION)\n\n**Mark phase-1 as in_progress.**\n\n## 1.1: Launch Parallel Explore Agents (BACKGROUND)\n\nFire ALL of these simultaneously using `call_omo_agent`:\n\n```\n// Agent 1: Find the refactoring target\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find all occurrences and definitions of [TARGET]. \n Report: file paths, line numbers, usage patterns.\"\n)\n\n// Agent 2: Find related code\ncall_omo_agent(\n subagent_type=\"explore\", \n run_in_background=true,\n prompt=\"Find all code that imports, uses, or depends on [TARGET].\n Report: dependency chains, import graphs.\"\n)\n\n// Agent 3: Find similar patterns\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find similar code patterns to [TARGET] in the codebase.\n Report: analogous implementations, established conventions.\"\n)\n\n// Agent 4: Find tests\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find all test files related to [TARGET].\n Report: test file paths, test case names, coverage indicators.\"\n)\n\n// Agent 5: Architecture context\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=true,\n prompt=\"Find architectural patterns and module organization around [TARGET].\n Report: module boundaries, layer structure, design patterns in use.\"\n)\n```\n\n## 1.2: Direct Tool Exploration (WHILE AGENTS RUN)\n\nWhile background agents are running, use direct tools:\n\n### LSP Tools for Precise Analysis:\n\n```typescript\n// Find definition(s)\nlsp_goto_definition(filePath, line, character) // Where is it defined?\n\n// Find ALL usages across workspace\nlsp_find_references(filePath, line, character, includeDeclaration=true)\n\n// Get file structure (scope='document') or search symbols (scope='workspace')\nlsp_symbols(filePath, scope=\"document\") // Hierarchical outline\nlsp_symbols(filePath, scope=\"workspace\", query=\"[target_symbol]\") // Search by name\n\n// Get current diagnostics\nlsp_diagnostics(filePath) // Errors, warnings before we start\n```\n\n### AST-Grep for Pattern Analysis:\n\n```typescript\n// Find structural patterns\nast_grep_search(\n pattern=\"function $NAME($$$) { $$$ }\", // or relevant pattern\n lang=\"typescript\", // or relevant language\n paths=[\"src/\"]\n)\n\n// Preview refactoring (DRY RUN)\nast_grep_replace(\n pattern=\"[old_pattern]\",\n rewrite=\"[new_pattern]\",\n lang=\"[language]\",\n dryRun=true // ALWAYS preview first\n)\n```\n\n### Grep for Text Patterns:\n\n```\ngrep(pattern=\"[search_term]\", path=\"src/\", include=\"*.ts\")\n```\n\n## 1.3: Collect Background Results\n\n```\nbackground_output(task_id=\"[agent_1_id]\")\nbackground_output(task_id=\"[agent_2_id]\")\n...\n```\n\n**Mark phase-1 as completed after all results collected.**\n\n---\n\n# PHASE 2: BUILD CODEMAP (DEPENDENCY MAPPING)\n\n**Mark phase-2 as in_progress.**\n\n## 2.1: Construct Definitive Codemap\n\nBased on Phase 1 results, build:\n\n```\n## CODEMAP: [TARGET]\n\n### Core Files (Direct Impact)\n- `path/to/file.ts:L10-L50` - Primary definition\n- `path/to/file2.ts:L25` - Key usage\n\n### Dependency Graph\n```\n[TARGET] \n\u251C\u2500\u2500 imports from: \n\u2502 \u251C\u2500\u2500 module-a (types)\n\u2502 \u2514\u2500\u2500 module-b (utils)\n\u251C\u2500\u2500 imported by:\n\u2502 \u251C\u2500\u2500 consumer-1.ts\n\u2502 \u251C\u2500\u2500 consumer-2.ts\n\u2502 \u2514\u2500\u2500 consumer-3.ts\n\u2514\u2500\u2500 used by:\n \u251C\u2500\u2500 handler.ts (direct call)\n \u2514\u2500\u2500 service.ts (dependency injection)\n```\n\n### Impact Zones\n| Zone | Risk Level | Files Affected | Test Coverage |\n|------|------------|----------------|---------------|\n| Core | HIGH | 3 files | 85% covered |\n| Consumers | MEDIUM | 8 files | 70% covered |\n| Edge | LOW | 2 files | 50% covered |\n\n### Established Patterns\n- Pattern A: [description] - used in N places\n- Pattern B: [description] - established convention\n```\n\n## 2.2: Identify Refactoring Constraints\n\nBased on codemap:\n- **MUST follow**: [existing patterns identified]\n- **MUST NOT break**: [critical dependencies]\n- **Safe to change**: [isolated code zones]\n- **Requires migration**: [breaking changes impact]\n\n**Mark phase-2 as completed.**\n\n---\n\n# PHASE 3: TEST ASSESSMENT (VERIFICATION STRATEGY)\n\n**Mark phase-3 as in_progress.**\n\n## 3.1: Detect Test Infrastructure\n\n```bash\n# Check for test commands\ncat package.json | jq '.scripts | keys[] | select(test(\"test\"))'\n\n# Or for Python\nls -la pytest.ini pyproject.toml setup.cfg\n\n# Or for Go\nls -la *_test.go\n```\n\n## 3.2: Analyze Test Coverage\n\n```\n// Find all tests related to target\ncall_omo_agent(\n subagent_type=\"explore\",\n run_in_background=false, // Need this synchronously\n prompt=\"Analyze test coverage for [TARGET]:\n 1. Which test files cover this code?\n 2. What test cases exist?\n 3. Are there integration tests?\n 4. What edge cases are tested?\n 5. Estimated coverage percentage?\"\n)\n```\n\n## 3.3: Determine Verification Strategy\n\nBased on test analysis:\n\n| Coverage Level | Strategy |\n|----------------|----------|\n| HIGH (>80%) | Run existing tests after each step |\n| MEDIUM (50-80%) | Run tests + add safety assertions |\n| LOW (<50%) | **PAUSE**: Propose adding tests first |\n| NONE | **BLOCK**: Refuse aggressive refactoring |\n\n**If coverage is LOW or NONE, ask user:**\n\n```\nTest coverage for [TARGET] is [LEVEL].\n\n**Risk Assessment**: Refactoring without adequate tests is dangerous.\n\nOptions:\n1. Add tests first, then refactor (RECOMMENDED)\n2. Proceed with extra caution, manual verification required\n3. Abort refactoring\n\nWhich approach do you prefer?\n```\n\n## 3.4: Document Verification Plan\n\n```\n## VERIFICATION PLAN\n\n### Test Commands\n- Unit: `bun test` / `npm test` / `pytest` / etc.\n- Integration: [command if exists]\n- Type check: `tsc --noEmit` / `pyright` / etc.\n\n### Verification Checkpoints\nAfter each refactoring step:\n1. lsp_diagnostics \u2192 zero new errors\n2. Run test command \u2192 all pass\n3. Type check \u2192 clean\n\n### Regression Indicators\n- [Specific test that must pass]\n- [Behavior that must be preserved]\n- [API contract that must not change]\n```\n\n**Mark phase-3 as completed.**\n\n---\n\n# PHASE 4: PLAN GENERATION (PLAN AGENT)\n\n**Mark phase-4 as in_progress.**\n\n## 4.1: Invoke Plan Agent\n\n```\nTask(\n subagent_type=\"plan\",\n prompt=\"Create a detailed refactoring plan:\n\n ## Refactoring Goal\n [User's original request]\n\n ## Codemap (from Phase 2)\n [Insert codemap here]\n\n ## Test Coverage (from Phase 3)\n [Insert verification plan here]\n\n ## Constraints\n - MUST follow existing patterns: [list]\n - MUST NOT break: [critical paths]\n - MUST run tests after each step\n\n ## Requirements\n 1. Break down into atomic refactoring steps\n 2. Each step must be independently verifiable\n 3. Order steps by dependency (what must happen first)\n 4. Specify exact files and line ranges for each step\n 5. Include rollback strategy for each step\n 6. Define commit checkpoints\"\n)\n```\n\n## 4.2: Review and Validate Plan\n\nAfter receiving plan from Plan agent:\n\n1. **Verify completeness**: All identified files addressed?\n2. **Verify safety**: Each step reversible?\n3. **Verify order**: Dependencies respected?\n4. **Verify verification**: Test commands specified?\n\n## 4.3: Register Detailed Todos\n\nConvert Plan agent output into granular todos:\n\n```\nTodoWrite([\n // Each step from the plan becomes a todo\n {\"id\": \"refactor-1\", \"content\": \"Step 1: [description]\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"verify-1\", \"content\": \"Verify Step 1: run tests\", \"status\": \"pending\", \"priority\": \"high\"},\n {\"id\": \"refactor-2\", \"content\": \"Step 2: [description]\", \"status\": \"pending\", \"priority\": \"medium\"},\n {\"id\": \"verify-2\", \"content\": \"Verify Step 2: run tests\", \"status\": \"pending\", \"priority\": \"medium\"},\n // ... continue for all steps\n])\n```\n\n**Mark phase-4 as completed.**\n\n---\n\n# PHASE 5: EXECUTE REFACTORING (DETERMINISTIC EXECUTION)\n\n**Mark phase-5 as in_progress.**\n\n## 5.1: Execution Protocol\n\nFor EACH refactoring step:\n\n### Pre-Step\n1. Mark step todo as `in_progress`\n2. Read current file state\n3. Verify lsp_diagnostics is baseline\n\n### Execute Step\nUse appropriate tool:\n\n**For Symbol Renames:**\n```typescript\nlsp_prepare_rename(filePath, line, character) // Validate rename is possible\nlsp_rename(filePath, line, character, newName) // Execute rename\n```\n\n**For Pattern Transformations:**\n```typescript\n// Preview first\nast_grep_replace(pattern, rewrite, lang, dryRun=true)\n\n// If preview looks good, execute\nast_grep_replace(pattern, rewrite, lang, dryRun=false)\n```\n\n**For Structural Changes:**\n```typescript\n// Use Edit tool for precise changes\nedit(filePath, oldString, newString)\n```\n\n### Post-Step Verification (MANDATORY)\n\n```typescript\n// 1. Check diagnostics\nlsp_diagnostics(filePath) // Must be clean or same as baseline\n\n// 2. Run tests\nbash(\"bun test\") // Or appropriate test command\n\n// 3. Type check\nbash(\"tsc --noEmit\") // Or appropriate type check\n```\n\n### Step Completion\n1. If verification passes \u2192 Mark step todo as `completed`\n2. If verification fails \u2192 **STOP AND FIX**\n\n## 5.2: Failure Recovery Protocol\n\nIf ANY verification fails:\n\n1. **STOP** immediately\n2. **REVERT** the failed change\n3. **DIAGNOSE** what went wrong\n4. **OPTIONS**:\n - Fix the issue and retry\n - Skip this step (if optional)\n - Consult oracle agent for help\n - Ask user for guidance\n\n**NEVER proceed to next step with broken tests.**\n\n## 5.3: Commit Checkpoints\n\nAfter each logical group of changes:\n\n```bash\ngit add [changed-files]\ngit commit -m \"refactor(scope): description\n\n[details of what was changed and why]\"\n```\n\n**Mark phase-5 as completed when all refactoring steps done.**\n\n---\n\n# PHASE 6: FINAL VERIFICATION (REGRESSION CHECK)\n\n**Mark phase-6 as in_progress.**\n\n## 6.1: Full Test Suite\n\n```bash\n# Run complete test suite\nbun test # or npm test, pytest, go test, etc.\n```\n\n## 6.2: Type Check\n\n```bash\n# Full type check\ntsc --noEmit # or equivalent\n```\n\n## 6.3: Lint Check\n\n```bash\n# Run linter\neslint . # or equivalent\n```\n\n## 6.4: Build Verification (if applicable)\n\n```bash\n# Ensure build still works\nbun run build # or npm run build, etc.\n```\n\n## 6.5: Final Diagnostics\n\n```typescript\n// Check all changed files\nfor (file of changedFiles) {\n lsp_diagnostics(file) // Must all be clean\n}\n```\n\n## 6.6: Generate Summary\n\n```markdown\n## Refactoring Complete\n\n### What Changed\n- [List of changes made]\n\n### Files Modified\n- `path/to/file.ts` - [what changed]\n- `path/to/file2.ts` - [what changed]\n\n### Verification Results\n- Tests: PASSED (X/Y passing)\n- Type Check: CLEAN\n- Lint: CLEAN\n- Build: SUCCESS\n\n### No Regressions Detected\nAll existing tests pass. No new errors introduced.\n```\n\n**Mark phase-6 as completed.**\n\n---\n\n# CRITICAL RULES\n\n## NEVER DO\n- Skip lsp_diagnostics check after changes\n- Proceed with failing tests\n- Make changes without understanding impact\n- Use `as any`, `@ts-ignore`, `@ts-expect-error`\n- Delete tests to make them pass\n- Commit broken code\n- Refactor without understanding existing patterns\n\n## ALWAYS DO\n- Understand before changing\n- Preview before applying (ast_grep dryRun=true)\n- Verify after every change\n- Follow existing codebase patterns\n- Keep todos updated in real-time\n- Commit at logical checkpoints\n- Report issues immediately\n\n## ABORT CONDITIONS\nIf any of these occur, **STOP and consult user**:\n- Test coverage is zero for target code\n- Changes would break public API\n- Refactoring scope is unclear\n- 3 consecutive verification failures\n- User-defined constraints violated\n\n---\n\n# Tool Usage Philosophy\n\nYou already know these tools. Use them intelligently:\n\n## LSP Tools\nLeverage the full LSP toolset (`lsp_*`) for precision analysis. Key patterns:\n- **Understand before changing**: `lsp_goto_definition` to grasp context\n- **Impact analysis**: `lsp_find_references` to map all usages before modification\n- **Safe refactoring**: `lsp_prepare_rename` \u2192 `lsp_rename` for symbol renames\n- **Continuous verification**: `lsp_diagnostics` after every change\n\n## AST-Grep\nUse `ast_grep_search` and `ast_grep_replace` for structural transformations.\n**Critical**: Always `dryRun=true` first, review, then execute.\n\n## Agents\n- `explore`: Parallel codebase pattern discovery\n- `plan`: Detailed refactoring plan generation\n- `oracle`: Read-only consultation for complex architectural decisions and debugging\n- `librarian`: **Use proactively** when encountering deprecated methods or library migration tasks. Query official docs and OSS examples for modern replacements.\n\n## Deprecated Code & Library Migration\nWhen you encounter deprecated methods/APIs during refactoring:\n1. Fire `librarian` to find the recommended modern alternative\n2. **DO NOT auto-upgrade to latest version** unless user explicitly requests migration\n3. If user requests library migration, use `librarian` to fetch latest API docs before making changes\n\n---\n\n**Remember: Refactoring without tests is reckless. Refactoring without understanding is destructive. This command ensures you do neither.**\n\n<user-request>\n$ARGUMENTS\n</user-request>\n";