@danya-ai/cli 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. package/README.md +125 -105
  2. package/dist/REPL-YFM3WKCC.js +42 -0
  3. package/dist/{acp-OF52BP4A.js → acp-LMW4BO5A.js} +26 -26
  4. package/dist/{agentsValidate-WUX5I4FS.js → agentsValidate-5VUACF2G.js} +7 -7
  5. package/dist/{ask-5P2J7UA5.js → ask-C57WQJNZ.js} +25 -25
  6. package/dist/{autoUpdater-6ABURQZA.js → autoUpdater-KEQOIUBC.js} +3 -3
  7. package/dist/{chunk-6KD43S3G.js → chunk-2CRLMZ76.js} +4 -4
  8. package/dist/{chunk-BAYPSZHG.js → chunk-3A4ENL7W.js} +1 -1
  9. package/dist/{chunk-VMJRNHDU.js → chunk-3ONZAVOS.js} +2 -2
  10. package/dist/{chunk-PTQTKIR2.js → chunk-5M3MBCE7.js} +1 -1
  11. package/dist/{chunk-PTQTKIR2.js.map → chunk-5M3MBCE7.js.map} +1 -1
  12. package/dist/{chunk-QJMLHIGS.js → chunk-5TDBDWNG.js} +2 -2
  13. package/dist/{chunk-T6RTYOJB.js → chunk-6JHEJQWY.js} +3 -3
  14. package/dist/{chunk-ESHU3HUM.js → chunk-6LOREEJY.js} +2551 -252
  15. package/dist/chunk-6LOREEJY.js.map +7 -0
  16. package/dist/{chunk-RO73O3Q7.js → chunk-CXOM4XMN.js} +4 -4
  17. package/dist/{chunk-N74L4GAM.js → chunk-DZCV2FEW.js} +1 -1
  18. package/dist/{chunk-SSS2WVMA.js → chunk-E5BAXZSR.js} +1 -1
  19. package/dist/{chunk-KS52NNBY.js → chunk-HIH5HC5H.js} +2 -2
  20. package/dist/{chunk-2VUDETSP.js → chunk-HPSW7NNI.js} +1 -1
  21. package/dist/{chunk-6GABS3DM.js → chunk-HXH5LYLI.js} +1 -1
  22. package/dist/{chunk-6IH7H2LH.js → chunk-IZETEFF5.js} +3 -3
  23. package/dist/{chunk-H3P32G2A.js → chunk-K7QXXI4A.js} +3 -3
  24. package/dist/{chunk-JF5D7ADP.js → chunk-LHNX67NO.js} +3 -3
  25. package/dist/{chunk-CZ5UJ3RL.js → chunk-NMNFFCQ7.js} +1 -1
  26. package/dist/{chunk-BTAVLAZT.js → chunk-RRPXM25U.js} +3 -3
  27. package/dist/{chunk-3GLLMJKA.js → chunk-TWE6H65Q.js} +3 -3
  28. package/dist/{chunk-HNK7M2ZO.js → chunk-UNIJZL2G.js} +1 -1
  29. package/dist/{chunk-7I3UELIX.js → chunk-X36NKBPR.js} +2 -2
  30. package/dist/{chunk-5ONWVNJH.js → chunk-X46SRZQF.js} +1 -1
  31. package/dist/{chunk-HRXRIW33.js → chunk-X7ZDT7EX.js} +2 -2
  32. package/dist/{chunk-JNAMM7A6.js → chunk-XLA6ANZN.js} +11 -11
  33. package/dist/{chunk-M75PDOOM.js → chunk-Y5IRVMDD.js} +3 -3
  34. package/dist/{chunk-QUAOJLG6.js → chunk-YUJ45IMI.js} +1 -1
  35. package/dist/{chunk-U7ZJW3CQ.js → chunk-Z4QNIOFF.js} +2 -2
  36. package/dist/{cli-FE6CUZI3.js → cli-P4CJSCH4.js} +131 -84
  37. package/dist/cli-P4CJSCH4.js.map +7 -0
  38. package/dist/commands-Y7WI2LGN.js +46 -0
  39. package/dist/{config-JEGZHGSM.js → config-5L37WETO.js} +4 -4
  40. package/dist/{context-LEXLLT4O.js → context-SF3X335Q.js} +5 -5
  41. package/dist/{customCommands-RLUKKBRZ.js → customCommands-SPF7HJEH.js} +4 -4
  42. package/dist/{env-4PRTNVDJ.js → env-GR5OKVVR.js} +2 -2
  43. package/dist/index.js +3 -3
  44. package/dist/{kodeAgentSessionLoad-LUKUH23S.js → kodeAgentSessionLoad-L54J2WSU.js} +4 -4
  45. package/dist/{kodeAgentSessionResume-FDZ6H3PX.js → kodeAgentSessionResume-4S4ZW7WI.js} +4 -4
  46. package/dist/{kodeAgentStreamJsonSession-VE2UG5B2.js → kodeAgentStreamJsonSession-44EO542F.js} +1 -1
  47. package/dist/{kodeHooks-ZVNRQTZM.js → kodeHooks-G64RN6MW.js} +4 -4
  48. package/dist/{llm-IFU62ZT4.js → llm-23Z6UDED.js} +26 -26
  49. package/dist/{llmLazy-45EGHX2A.js → llmLazy-PKBZHKOG.js} +1 -1
  50. package/dist/{loader-TITFI6LS.js → loader-BXIFWM6O.js} +4 -4
  51. package/dist/{mcp-DE5HUK63.js → mcp-CYG2WZG5.js} +7 -7
  52. package/dist/{mentionProcessor-SAPYBDCK.js → mentionProcessor-XQ2HNO4M.js} +5 -5
  53. package/dist/{messages-BARXRPB4.js → messages-YOW6NTUG.js} +1 -1
  54. package/dist/{model-PC6MMS2S.js → model-NIOLLP6W.js} +5 -5
  55. package/dist/{openai-AVJO73FG.js → openai-G2AMXRJ2.js} +5 -5
  56. package/dist/{outputStyles-VL7EKGIQ.js → outputStyles-CZDXBWRF.js} +4 -4
  57. package/dist/{pluginRuntime-IG2H3W7C.js → pluginRuntime-ZIT4IL6O.js} +6 -6
  58. package/dist/{pluginValidation-TOBMLJ5A.js → pluginValidation-LO7TNL4T.js} +6 -6
  59. package/dist/prompts-MKPN6WZF.js +50 -0
  60. package/dist/query-MSMRQ2ET.js +50 -0
  61. package/dist/{ripgrep-HID6XW3J.js → ripgrep-XSFDNITT.js} +3 -3
  62. package/dist/{skillMarketplace-YBS7NR7Q.js → skillMarketplace-NX6XZDT4.js} +3 -3
  63. package/dist/{state-5OBXGJX6.js → state-YAYMHZAZ.js} +2 -2
  64. package/dist/{theme-U2POECGP.js → theme-RATH22A4.js} +5 -5
  65. package/dist/{toolPermissionSettings-AYX5MEOW.js → toolPermissionSettings-SFS4Z63J.js} +6 -6
  66. package/dist/tools-2J2DNXWI.js +47 -0
  67. package/dist/{userInput-RXNBLFAH.js → userInput-EQAF4OWN.js} +27 -27
  68. package/package.json +1 -1
  69. package/dist/REPL-URLRZ4T4.js +0 -42
  70. package/dist/chunk-ESHU3HUM.js.map +0 -7
  71. package/dist/cli-FE6CUZI3.js.map +0 -7
  72. package/dist/commands-6DP3QW2F.js +0 -46
  73. package/dist/prompts-JGBWXN2Z.js +0 -50
  74. package/dist/query-4DUM4QP7.js +0 -50
  75. package/dist/tools-JL434UMR.js +0 -47
  76. /package/dist/{REPL-URLRZ4T4.js.map → REPL-YFM3WKCC.js.map} +0 -0
  77. /package/dist/{acp-OF52BP4A.js.map → acp-LMW4BO5A.js.map} +0 -0
  78. /package/dist/{agentsValidate-WUX5I4FS.js.map → agentsValidate-5VUACF2G.js.map} +0 -0
  79. /package/dist/{ask-5P2J7UA5.js.map → ask-C57WQJNZ.js.map} +0 -0
  80. /package/dist/{autoUpdater-6ABURQZA.js.map → autoUpdater-KEQOIUBC.js.map} +0 -0
  81. /package/dist/{chunk-6KD43S3G.js.map → chunk-2CRLMZ76.js.map} +0 -0
  82. /package/dist/{chunk-BAYPSZHG.js.map → chunk-3A4ENL7W.js.map} +0 -0
  83. /package/dist/{chunk-VMJRNHDU.js.map → chunk-3ONZAVOS.js.map} +0 -0
  84. /package/dist/{chunk-QJMLHIGS.js.map → chunk-5TDBDWNG.js.map} +0 -0
  85. /package/dist/{chunk-T6RTYOJB.js.map → chunk-6JHEJQWY.js.map} +0 -0
  86. /package/dist/{chunk-RO73O3Q7.js.map → chunk-CXOM4XMN.js.map} +0 -0
  87. /package/dist/{chunk-N74L4GAM.js.map → chunk-DZCV2FEW.js.map} +0 -0
  88. /package/dist/{chunk-SSS2WVMA.js.map → chunk-E5BAXZSR.js.map} +0 -0
  89. /package/dist/{chunk-KS52NNBY.js.map → chunk-HIH5HC5H.js.map} +0 -0
  90. /package/dist/{chunk-2VUDETSP.js.map → chunk-HPSW7NNI.js.map} +0 -0
  91. /package/dist/{chunk-6GABS3DM.js.map → chunk-HXH5LYLI.js.map} +0 -0
  92. /package/dist/{chunk-6IH7H2LH.js.map → chunk-IZETEFF5.js.map} +0 -0
  93. /package/dist/{chunk-H3P32G2A.js.map → chunk-K7QXXI4A.js.map} +0 -0
  94. /package/dist/{chunk-JF5D7ADP.js.map → chunk-LHNX67NO.js.map} +0 -0
  95. /package/dist/{chunk-CZ5UJ3RL.js.map → chunk-NMNFFCQ7.js.map} +0 -0
  96. /package/dist/{chunk-BTAVLAZT.js.map → chunk-RRPXM25U.js.map} +0 -0
  97. /package/dist/{chunk-3GLLMJKA.js.map → chunk-TWE6H65Q.js.map} +0 -0
  98. /package/dist/{chunk-HNK7M2ZO.js.map → chunk-UNIJZL2G.js.map} +0 -0
  99. /package/dist/{chunk-7I3UELIX.js.map → chunk-X36NKBPR.js.map} +0 -0
  100. /package/dist/{chunk-5ONWVNJH.js.map → chunk-X46SRZQF.js.map} +0 -0
  101. /package/dist/{chunk-HRXRIW33.js.map → chunk-X7ZDT7EX.js.map} +0 -0
  102. /package/dist/{chunk-JNAMM7A6.js.map → chunk-XLA6ANZN.js.map} +0 -0
  103. /package/dist/{chunk-M75PDOOM.js.map → chunk-Y5IRVMDD.js.map} +0 -0
  104. /package/dist/{chunk-QUAOJLG6.js.map → chunk-YUJ45IMI.js.map} +0 -0
  105. /package/dist/{chunk-U7ZJW3CQ.js.map → chunk-Z4QNIOFF.js.map} +0 -0
  106. /package/dist/{commands-6DP3QW2F.js.map → commands-Y7WI2LGN.js.map} +0 -0
  107. /package/dist/{config-JEGZHGSM.js.map → config-5L37WETO.js.map} +0 -0
  108. /package/dist/{context-LEXLLT4O.js.map → context-SF3X335Q.js.map} +0 -0
  109. /package/dist/{customCommands-RLUKKBRZ.js.map → customCommands-SPF7HJEH.js.map} +0 -0
  110. /package/dist/{env-4PRTNVDJ.js.map → env-GR5OKVVR.js.map} +0 -0
  111. /package/dist/{kodeAgentSessionLoad-LUKUH23S.js.map → kodeAgentSessionLoad-L54J2WSU.js.map} +0 -0
  112. /package/dist/{kodeAgentSessionResume-FDZ6H3PX.js.map → kodeAgentSessionResume-4S4ZW7WI.js.map} +0 -0
  113. /package/dist/{kodeAgentStreamJsonSession-VE2UG5B2.js.map → kodeAgentStreamJsonSession-44EO542F.js.map} +0 -0
  114. /package/dist/{kodeHooks-ZVNRQTZM.js.map → kodeHooks-G64RN6MW.js.map} +0 -0
  115. /package/dist/{llm-IFU62ZT4.js.map → llm-23Z6UDED.js.map} +0 -0
  116. /package/dist/{llmLazy-45EGHX2A.js.map → llmLazy-PKBZHKOG.js.map} +0 -0
  117. /package/dist/{loader-TITFI6LS.js.map → loader-BXIFWM6O.js.map} +0 -0
  118. /package/dist/{mcp-DE5HUK63.js.map → mcp-CYG2WZG5.js.map} +0 -0
  119. /package/dist/{mentionProcessor-SAPYBDCK.js.map → mentionProcessor-XQ2HNO4M.js.map} +0 -0
  120. /package/dist/{messages-BARXRPB4.js.map → messages-YOW6NTUG.js.map} +0 -0
  121. /package/dist/{model-PC6MMS2S.js.map → model-NIOLLP6W.js.map} +0 -0
  122. /package/dist/{openai-AVJO73FG.js.map → openai-G2AMXRJ2.js.map} +0 -0
  123. /package/dist/{outputStyles-VL7EKGIQ.js.map → outputStyles-CZDXBWRF.js.map} +0 -0
  124. /package/dist/{pluginRuntime-IG2H3W7C.js.map → pluginRuntime-ZIT4IL6O.js.map} +0 -0
  125. /package/dist/{pluginValidation-TOBMLJ5A.js.map → pluginValidation-LO7TNL4T.js.map} +0 -0
  126. /package/dist/{prompts-JGBWXN2Z.js.map → prompts-MKPN6WZF.js.map} +0 -0
  127. /package/dist/{query-4DUM4QP7.js.map → query-MSMRQ2ET.js.map} +0 -0
  128. /package/dist/{ripgrep-HID6XW3J.js.map → ripgrep-XSFDNITT.js.map} +0 -0
  129. /package/dist/{skillMarketplace-YBS7NR7Q.js.map → skillMarketplace-NX6XZDT4.js.map} +0 -0
  130. /package/dist/{state-5OBXGJX6.js.map → state-YAYMHZAZ.js.map} +0 -0
  131. /package/dist/{theme-U2POECGP.js.map → theme-RATH22A4.js.map} +0 -0
  132. /package/dist/{toolPermissionSettings-AYX5MEOW.js.map → toolPermissionSettings-SFS4Z63J.js.map} +0 -0
  133. /package/dist/{tools-JL434UMR.js.map → tools-2J2DNXWI.js.map} +0 -0
  134. /package/dist/{userInput-RXNBLFAH.js.map → userInput-EQAF4OWN.js.map} +0 -0
@@ -2,7 +2,7 @@ import { createRequire as __kodeCreateRequire } from "node:module";
2
2
  const require = __kodeCreateRequire(import.meta.url);
3
3
  import {
4
4
  listDanyaAgentSessions
5
- } from "./chunk-5ONWVNJH.js";
5
+ } from "./chunk-X46SRZQF.js";
6
6
  import {
7
7
  DEFAULT_TIMEOUT_MS,
8
8
  FallbackToolUseRejectedMessage,
@@ -16,11 +16,11 @@ import {
16
16
  listMCPServers,
17
17
  loadMergedSettings,
18
18
  normalizeSandboxRuntimeConfigFromSettings
19
- } from "./chunk-RO73O3Q7.js";
19
+ } from "./chunk-CXOM4XMN.js";
20
20
  import {
21
21
  formatValidationResult,
22
22
  validatePluginOrMarketplacePath
23
- } from "./chunk-6IH7H2LH.js";
23
+ } from "./chunk-IZETEFF5.js";
24
24
  import {
25
25
  addMarketplace,
26
26
  disableSkillPlugin,
@@ -33,15 +33,15 @@ import {
33
33
  refreshMarketplaceAsync,
34
34
  removeMarketplace,
35
35
  uninstallSkillPlugin
36
- } from "./chunk-6GABS3DM.js";
36
+ } from "./chunk-HXH5LYLI.js";
37
37
  import {
38
38
  loadDanyaAgentSessionMessages
39
- } from "./chunk-HNK7M2ZO.js";
39
+ } from "./chunk-UNIJZL2G.js";
40
40
  import {
41
41
  appendSessionCustomTitleRecord,
42
42
  appendSessionJsonlFromMessage,
43
43
  appendSessionTagRecord
44
- } from "./chunk-U7ZJW3CQ.js";
44
+ } from "./chunk-Z4QNIOFF.js";
45
45
  import {
46
46
  getRequestStatus,
47
47
  setRequestStatus,
@@ -58,7 +58,7 @@ import {
58
58
  runStopHooks,
59
59
  runUserPromptSubmitHooks,
60
60
  updateHookTranscriptForMessages
61
- } from "./chunk-H3P32G2A.js";
61
+ } from "./chunk-K7QXXI4A.js";
62
62
  import {
63
63
  getDanyaAgentSessionId,
64
64
  setDanyaAgentSessionId
@@ -71,34 +71,34 @@ import {
71
71
  getOutputStyleSystemPromptAdditions,
72
72
  resolveOutputStyleName,
73
73
  setCurrentOutputStyle
74
- } from "./chunk-HRXRIW33.js";
74
+ } from "./chunk-X7ZDT7EX.js";
75
75
  import {
76
76
  fetchCustomModels,
77
77
  getModelFeatures
78
- } from "./chunk-VMJRNHDU.js";
78
+ } from "./chunk-3ONZAVOS.js";
79
79
  import {
80
80
  queryLLM,
81
81
  queryQuick,
82
82
  verifyApiKey
83
- } from "./chunk-6KD43S3G.js";
83
+ } from "./chunk-2CRLMZ76.js";
84
84
  import {
85
85
  listAllContentFiles,
86
86
  ripGrep
87
- } from "./chunk-SSS2WVMA.js";
87
+ } from "./chunk-E5BAXZSR.js";
88
88
  import {
89
89
  getCustomCommandDirectories,
90
90
  hasCustomCommands,
91
91
  loadCustomCommands,
92
92
  reloadCustomCommands
93
- } from "./chunk-KS52NNBY.js";
93
+ } from "./chunk-HIH5HC5H.js";
94
94
  import {
95
95
  loadToolPermissionContextFromDisk,
96
96
  persistToolPermissionUpdateToDisk
97
- } from "./chunk-M75PDOOM.js";
97
+ } from "./chunk-Y5IRVMDD.js";
98
98
  import {
99
99
  getSettingsFileCandidates,
100
100
  loadSettingsWithLegacyFallback
101
- } from "./chunk-2VUDETSP.js";
101
+ } from "./chunk-HPSW7NNI.js";
102
102
  import {
103
103
  applyToolPermissionContextUpdate,
104
104
  createDefaultToolPermissionContext
@@ -111,7 +111,7 @@ import {
111
111
  resetReminderSession,
112
112
  setTodos,
113
113
  systemReminderService
114
- } from "./chunk-QJMLHIGS.js";
114
+ } from "./chunk-5TDBDWNG.js";
115
115
  import {
116
116
  getSessionState
117
117
  } from "./chunk-XEYEKVFT.js";
@@ -120,7 +120,7 @@ import {
120
120
  getActiveAgents,
121
121
  getAgentByType,
122
122
  getAllAgents
123
- } from "./chunk-7I3UELIX.js";
123
+ } from "./chunk-X36NKBPR.js";
124
124
  import {
125
125
  getSessionPlugins
126
126
  } from "./chunk-2VQWLLDU.js";
@@ -154,22 +154,22 @@ import {
154
154
  processUserInput,
155
155
  reorderMessages,
156
156
  stripSystemMessages
157
- } from "./chunk-QUAOJLG6.js";
157
+ } from "./chunk-YUJ45IMI.js";
158
158
  import {
159
159
  ModelManager,
160
160
  getModelManager,
161
161
  isDefaultSlowAndCapableModel
162
- } from "./chunk-BTAVLAZT.js";
162
+ } from "./chunk-RRPXM25U.js";
163
163
  import {
164
164
  getCodeStyle,
165
165
  getContext,
166
166
  getGitState,
167
167
  getIsGit,
168
168
  getProjectDocs
169
- } from "./chunk-JF5D7ADP.js";
169
+ } from "./chunk-LHNX67NO.js";
170
170
  import {
171
171
  getTheme
172
- } from "./chunk-N74L4GAM.js";
172
+ } from "./chunk-DZCV2FEW.js";
173
173
  import {
174
174
  DEFAULT_GLOBAL_CONFIG,
175
175
  enableConfigs,
@@ -182,7 +182,7 @@ import {
182
182
  saveGlobalConfig,
183
183
  setAllPointersToModel,
184
184
  setModelPointer
185
- } from "./chunk-T6RTYOJB.js";
185
+ } from "./chunk-6JHEJQWY.js";
186
186
  import {
187
187
  AbortError
188
188
  } from "./chunk-HIIHGKXP.js";
@@ -191,7 +191,7 @@ import {
191
191
  getCurrentRequest,
192
192
  logUserFriendly,
193
193
  markPhase
194
- } from "./chunk-CZ5UJ3RL.js";
194
+ } from "./chunk-NMNFFCQ7.js";
195
195
  import {
196
196
  ASCII_LOGO,
197
197
  BunShell,
@@ -236,11 +236,11 @@ import {
236
236
  renderBashNotification,
237
237
  setActivePlanConversationKey,
238
238
  setCwd
239
- } from "./chunk-BAYPSZHG.js";
239
+ } from "./chunk-3A4ENL7W.js";
240
240
  import {
241
241
  MACRO,
242
242
  init_macros
243
- } from "./chunk-PTQTKIR2.js";
243
+ } from "./chunk-5M3MBCE7.js";
244
244
  import {
245
245
  formatTotalCost,
246
246
  getTotalAPIDuration,
@@ -250,16 +250,15 @@ import {
250
250
  } from "./chunk-LWXT5RGE.js";
251
251
  import {
252
252
  __esm,
253
- __export,
254
- __require
253
+ __export
255
254
  } from "./chunk-M3TKNAUR.js";
256
255
 
257
256
  // src/engine/detect.ts
258
- import { existsSync as existsSync15 } from "fs";
259
- import { join as join13 } from "path";
257
+ import { existsSync as existsSync8, readFileSync as readFileSync6, readdirSync as readdirSync2, statSync as statSync9 } from "fs";
258
+ import { join as join6 } from "path";
260
259
  import { globSync } from "glob";
261
- function detectEngine(projectPath) {
262
- if (existsSync15(join13(projectPath, "ProjectSettings")) && existsSync15(join13(projectPath, "Assets"))) {
260
+ function detectEngine(projectPath, depth = 0) {
261
+ if (existsSync8(join6(projectPath, "ProjectSettings")) && existsSync8(join6(projectPath, "Assets"))) {
263
262
  return "unity";
264
263
  }
265
264
  try {
@@ -269,39 +268,42 @@ function detectEngine(projectPath) {
269
268
  }
270
269
  } catch {
271
270
  }
272
- if (existsSync15(join13(projectPath, "project.godot"))) {
271
+ if (existsSync8(join6(projectPath, "project.godot"))) {
273
272
  return "godot";
274
273
  }
275
- const parentCandidates = ["client", "server", "game-client", "game-server"];
276
- for (const candidate of parentCandidates) {
277
- const subPath = join13(projectPath, candidate);
278
- if (existsSync15(subPath)) {
279
- const subEngine = detectEngine(subPath);
280
- if (subEngine) return subEngine;
274
+ if (depth < 1) {
275
+ const subCandidates = ["client", "server", "game-client", "game-server"];
276
+ for (const candidate of subCandidates) {
277
+ const subPath = join6(projectPath, candidate);
278
+ if (existsSync8(subPath)) {
279
+ const subEngine = detectEngine(subPath, depth + 1);
280
+ if (subEngine) return subEngine;
281
+ }
281
282
  }
282
283
  }
283
284
  return null;
284
285
  }
285
- function detectServerLanguage(projectPath) {
286
- if (existsSync15(join13(projectPath, "go.mod"))) {
286
+ function detectServerLanguage(projectPath, depth = 0) {
287
+ if (existsSync8(join6(projectPath, "go.mod"))) {
287
288
  return "go";
288
289
  }
289
- if (existsSync15(join13(projectPath, "Makefile"))) {
290
+ if (existsSync8(join6(projectPath, "Makefile"))) {
290
291
  try {
291
- const { readFileSync: readFileSync11 } = __require("fs");
292
- const makefile = readFileSync11(join13(projectPath, "Makefile"), "utf-8");
292
+ const makefile = readFileSync6(join6(projectPath, "Makefile"), "utf-8");
293
293
  if (makefile.includes("go build") || makefile.includes("go test")) {
294
294
  return "go";
295
295
  }
296
296
  } catch {
297
297
  }
298
298
  }
299
- if (existsSync15(join13(projectPath, "CMakeLists.txt"))) {
299
+ if (existsSync8(join6(projectPath, "CMakeLists.txt"))) {
300
300
  return "cpp";
301
301
  }
302
- const serverDir = join13(projectPath, "server");
303
- if (existsSync15(serverDir) && projectPath !== serverDir) {
304
- return detectServerLanguage(serverDir);
302
+ if (depth < 1) {
303
+ const serverDir = join6(projectPath, "server");
304
+ if (existsSync8(serverDir)) {
305
+ return detectServerLanguage(serverDir, depth + 1);
306
+ }
305
307
  }
306
308
  return null;
307
309
  }
@@ -316,7 +318,6 @@ function detectLanguages(engine, serverLanguage) {
316
318
  break;
317
319
  case "godot":
318
320
  languages.push("GDScript");
319
- languages.push("C#");
320
321
  break;
321
322
  }
322
323
  switch (serverLanguage) {
@@ -340,6 +341,46 @@ function detectProject(projectPath) {
340
341
  const languages = detectLanguages(engine, serverLanguage);
341
342
  return { engine, serverLanguage, languages };
342
343
  }
344
+ function inferRole(name2, engine, serverLanguage) {
345
+ const lower = name2.toLowerCase();
346
+ if (lower.includes("client") || lower.includes("game-client")) return "client";
347
+ if (lower.includes("server") || lower.includes("game-server")) return "server";
348
+ if (lower.includes("shared") || lower.includes("common")) return "shared";
349
+ if (engine) return "client";
350
+ if (serverLanguage) return "server";
351
+ return "unknown";
352
+ }
353
+ function detectWorkspace(rootPath) {
354
+ const subProjects = [];
355
+ try {
356
+ const entries = readdirSync2(rootPath);
357
+ for (const entry of entries) {
358
+ if (entry.startsWith(".") || entry === "node_modules" || entry === "dist" || entry === "Docs" || entry === "Tools") continue;
359
+ const subPath = join6(rootPath, entry);
360
+ try {
361
+ if (!statSync9(subPath).isDirectory()) continue;
362
+ } catch {
363
+ continue;
364
+ }
365
+ const engine = detectEngine(subPath);
366
+ const serverLanguage = detectServerLanguage(subPath);
367
+ if (engine || serverLanguage) {
368
+ subProjects.push({
369
+ name: entry,
370
+ path: subPath,
371
+ engine,
372
+ serverLanguage,
373
+ role: inferRole(entry, engine, serverLanguage)
374
+ });
375
+ }
376
+ }
377
+ } catch {
378
+ }
379
+ if (subProjects.length >= 2) {
380
+ return { type: "workspace", rootPath, subProjects };
381
+ }
382
+ return { type: "single-project", rootPath, subProjects: [] };
383
+ }
343
384
  var init_detect = __esm({
344
385
  "src/engine/detect.ts"() {
345
386
  }
@@ -351,9 +392,9 @@ init_state();
351
392
  init_product();
352
393
 
353
394
  // src/tools/system/BashTool/BashTool.tsx
354
- import { statSync as statSync12 } from "fs";
395
+ import { statSync as statSync15 } from "fs";
355
396
  import { EOL as EOL3 } from "os";
356
- import { isAbsolute as isAbsolute10, relative as relative13, resolve as resolve10 } from "path";
397
+ import { isAbsolute as isAbsolute10, relative as relative15, resolve as resolve10 } from "path";
357
398
  import * as React108 from "react";
358
399
  import { z as z16 } from "zod";
359
400
  init_product();
@@ -527,7 +568,7 @@ var getCommandSubcommandPrefix = memoize(
527
568
  var getCommandPrefix = memoize(
528
569
  async (command4, abortSignal) => {
529
570
  const { systemPrompt, userPrompt } = buildBashCommandPrefixDetectionPrompt(command4);
530
- const { API_ERROR_MESSAGE_PREFIX: API_ERROR_MESSAGE_PREFIX2, queryQuick: queryQuick2 } = await import("./llm-IFU62ZT4.js");
571
+ const { API_ERROR_MESSAGE_PREFIX: API_ERROR_MESSAGE_PREFIX2, queryQuick: queryQuick2 } = await import("./llm-23Z6UDED.js");
531
572
  const response = await queryQuick2({
532
573
  systemPrompt,
533
574
  userPrompt,
@@ -1623,11 +1664,11 @@ function isSensitiveFilePath(inputPath) {
1623
1664
  if (p.startsWith("\\\\") || p.startsWith("//")) return true;
1624
1665
  const absolutePath = resolveLikeCliPath(p);
1625
1666
  const parts = toPosixPath(absolutePath).split(POSIX_SEP);
1626
- const basename6 = parts[parts.length - 1] ?? "";
1667
+ const basename9 = parts[parts.length - 1] ?? "";
1627
1668
  for (const part of parts) {
1628
1669
  if (SENSITIVE_DIR_NAMES.has(toLower(part))) return true;
1629
1670
  }
1630
- if (basename6 && SENSITIVE_FILE_NAMES.has(toLower(basename6))) return true;
1671
+ if (basename9 && SENSITIVE_FILE_NAMES.has(toLower(basename9))) return true;
1631
1672
  return false;
1632
1673
  }
1633
1674
  function getSettingsPathsForWriteProtection(options) {
@@ -1708,13 +1749,13 @@ function isPathInWorkingDirectories(inputPath, context) {
1708
1749
  toPosixPath(resolvedCandidate)
1709
1750
  );
1710
1751
  const rootPosix = normalizeMacPrivatePrefix(toPosixPath(resolvedRoot));
1711
- const relative14 = posixRelative(
1752
+ const relative16 = posixRelative(
1712
1753
  toLower(rootPosix),
1713
1754
  toLower(candidatePosix)
1714
1755
  );
1715
- if (relative14 === "") return true;
1716
- if (hasParentTraversalSegment(relative14)) return false;
1717
- if (POSIX.isAbsolute(relative14)) return false;
1756
+ if (relative16 === "") return true;
1757
+ if (hasParentTraversalSegment(relative16)) return false;
1758
+ if (POSIX.isAbsolute(relative16)) return false;
1718
1759
  return true;
1719
1760
  });
1720
1761
  });
@@ -1817,9 +1858,9 @@ function matchPermissionRuleForPath(args) {
1817
1858
  }
1818
1859
  for (const [root, patternsMap] of grouped.entries()) {
1819
1860
  const baseRoot = root ?? getCwd();
1820
- const relative14 = posixRelative(baseRoot, targetPosix);
1821
- if (relative14.startsWith(`..${POSIX_SEP}`)) continue;
1822
- if (!relative14) continue;
1861
+ const relative16 = posixRelative(baseRoot, targetPosix);
1862
+ if (relative16.startsWith(`..${POSIX_SEP}`)) continue;
1863
+ if (!relative16) continue;
1823
1864
  const matchAll = patternsMap.get("/**")?.ruleString ?? patternsMap.get("**")?.ruleString ?? null;
1824
1865
  if (matchAll) return matchAll;
1825
1866
  const patterns = Array.from(patternsMap.keys()).map((pattern) => {
@@ -1833,7 +1874,7 @@ function matchPermissionRuleForPath(args) {
1833
1874
  return candidate;
1834
1875
  });
1835
1876
  const matcher = buildIgnoreMatcher(patterns);
1836
- const result = matcher.test(relative14);
1877
+ const result = matcher.test(relative16);
1837
1878
  if (!result.ignored || !result.rule) continue;
1838
1879
  let matched = result.rule.pattern;
1839
1880
  const matchedWithGlob = `${matched}/**`;
@@ -4596,7 +4637,7 @@ function formatParseError(error) {
4596
4637
  return error instanceof Error ? error.message : String(error);
4597
4638
  }
4598
4639
  async function defaultGateQuery(args) {
4599
- const { API_ERROR_MESSAGE_PREFIX: API_ERROR_MESSAGE_PREFIX2, queryLLM: queryLLM2 } = await import("./llm-IFU62ZT4.js");
4640
+ const { API_ERROR_MESSAGE_PREFIX: API_ERROR_MESSAGE_PREFIX2, queryLLM: queryLLM2 } = await import("./llm-23Z6UDED.js");
4600
4641
  const messages = [
4601
4642
  {
4602
4643
  type: "user",
@@ -10532,28 +10573,1893 @@ var help = {
10532
10573
  userFacingName() {
10533
10574
  return "help";
10534
10575
  }
10535
- };
10536
- var help_default = help;
10537
-
10538
- // src/ui/components/ProjectOnboarding.tsx
10539
- import * as React28 from "react";
10540
- import { OrderedList } from "@inkjs/ui";
10541
- import { Box as Box21, Text as Text24 } from "ink";
10542
- import { existsSync as existsSync8 } from "fs";
10543
- import { join as join6 } from "path";
10544
- import { homedir as homedir6 } from "os";
10576
+ };
10577
+ var help_default = help;
10578
+
10579
+ // src/ui/components/ProjectOnboarding.tsx
10580
+ import * as React28 from "react";
10581
+ import { OrderedList } from "@inkjs/ui";
10582
+ import { Box as Box21, Text as Text24 } from "ink";
10583
+ import { existsSync as existsSync13 } from "fs";
10584
+ import { join as join11 } from "path";
10585
+ import { homedir as homedir6 } from "os";
10586
+
10587
+ // src/constants/releaseNotes.ts
10588
+ var RELEASE_NOTES = {
10589
+ "0.1.178": [
10590
+ "New release notes now show you what's changed since you last launched"
10591
+ ]
10592
+ };
10593
+
10594
+ // src/ui/components/ProjectOnboarding.tsx
10595
+ import { gt } from "semver";
10596
+ init_macros();
10597
+ init_product();
10598
+
10599
+ // src/ui/screens/AutoInitHarness.ts
10600
+ import { existsSync as existsSync12 } from "fs";
10601
+ import { join as join10 } from "path";
10602
+
10603
+ // src/commands/initProject.ts
10604
+ init_detect();
10605
+ import { writeFileSync as writeFileSync6, existsSync as existsSync11 } from "fs";
10606
+ import { join as join9, basename as basename4 } from "path";
10607
+
10608
+ // src/templates/templateEngine.ts
10609
+ function renderTemplate(content, ctx) {
10610
+ return content.replace(/\{\{PROJECT_NAME\}\}/g, ctx.projectName).replace(/\{\{ENGINE\}\}/g, ctx.engine ?? "unknown").replace(/\{\{SERVER_LANG\}\}/g, ctx.serverLanguage ?? "none").replace(/\{\{CONFIG_GEN_PATH\}\}/g, ctx.configGenPath).replace(/\{\{FRAMEWORK_PATH\}\}/g, ctx.frameworkPath).replace(/\{\{PROTO_PATH\}\}/g, ctx.protoPath).replace(/\{\{ORM_PATH\}\}/g, ctx.ormPath).replace(/\{\{INSTRUCTIONS_FILE\}\}/g, ctx.instructionsFile);
10611
+ }
10612
+ function buildTemplateContext(projectName, engine, serverLanguage, instructionsFile) {
10613
+ let configGenPath = "Config/Gen/";
10614
+ let frameworkPath = "Scripts/Framework/";
10615
+ let protoPath = "Proto/";
10616
+ let ormPath = "orm/";
10617
+ if (engine === "unity") {
10618
+ configGenPath = "Assets/Scripts/Gameplay/Config/Gen/";
10619
+ frameworkPath = "Assets/Scripts/Framework/";
10620
+ protoPath = "Assets/Scripts/Proto/";
10621
+ } else if (engine === "unreal") {
10622
+ configGenPath = "Source/Generated/";
10623
+ frameworkPath = "Source/Core/";
10624
+ protoPath = "Source/Proto/";
10625
+ } else if (engine === "godot") {
10626
+ configGenPath = "scripts/generated/";
10627
+ frameworkPath = "scripts/core/";
10628
+ protoPath = "proto/";
10629
+ }
10630
+ if (serverLanguage === "go" && !engine) {
10631
+ configGenPath = "common/config/cfg_*.go";
10632
+ ormPath = "orm/(golang|redis|mongo)/";
10633
+ protoPath = "resources/proto/";
10634
+ }
10635
+ return { projectName, engine, serverLanguage, configGenPath, frameworkPath, protoPath, ormPath, instructionsFile };
10636
+ }
10637
+
10638
+ // src/templates/bundleInstaller.ts
10639
+ import { mkdirSync as mkdirSync5, writeFileSync as writeFileSync4, existsSync as existsSync9 } from "fs";
10640
+ import { join as join7, dirname as dirname6 } from "path";
10641
+ function installBundle(targetDir, bundleContent, ctx, options = {}) {
10642
+ const installed = [];
10643
+ for (const [relativePath, content] of Object.entries(bundleContent)) {
10644
+ const isTemplate = relativePath.endsWith(".tmpl");
10645
+ const finalRelPath = isTemplate ? relativePath.replace(/\.tmpl$/, "") : relativePath;
10646
+ const finalPath = join7(targetDir, finalRelPath);
10647
+ const dir = dirname6(finalPath);
10648
+ if (!existsSync9(finalPath) || options.force) {
10649
+ mkdirSync5(dir, { recursive: true });
10650
+ const rendered = isTemplate ? renderTemplate(content, ctx) : content;
10651
+ writeFileSync4(finalPath, rendered, { encoding: "utf-8", mode: relativePath.includes("hooks/") ? 493 : 420 });
10652
+ installed.push(finalRelPath);
10653
+ }
10654
+ }
10655
+ return installed;
10656
+ }
10657
+
10658
+ // src/templates/bundles/common.ts
10659
+ var CMD_AUTO_WORK = `# /auto-work <requirement>
10660
+
10661
+ Full-auto development pipeline. Walks through the entire cycle without manual intervention.
10662
+
10663
+ ## Stages
10664
+
10665
+ ### Stage 0: Classify
10666
+ Determine requirement type: bug | feature | refactor
10667
+
10668
+ ### Stage 1: Plan
10669
+ - List all files to modify with 1-line intent per file
10670
+ - If >3 tasks and parallelizable \u2192 switch to parallel mode
10671
+
10672
+ ### Stage 2: Code
10673
+ - Modify files per plan
10674
+ - After each file \u2192 compile check immediately (fail-fast)
10675
+ - After all: run /verify
10676
+ - Verify fail \u2192 fix (max 3 rounds), else abort
10677
+
10678
+ ### Stage 3: Review
10679
+ - Run /review (100-point scoring)
10680
+ - CRITICAL \u2192 fail; <80 \u2192 fail
10681
+ - Quality ratchet: score must not drop
10682
+ - Pass \u2192 write push-approved marker
10683
+
10684
+ ### Stage 4: Commit
10685
+ - git add + git commit
10686
+ - Pre-commit hook runs lint + test
10687
+ - Fail \u2192 fix and retry (max 2 times)
10688
+
10689
+ ### Stage 5: Knowledge Deposit
10690
+ - Feature \u2192 Docs/Version/<version>/<feature>/summary.md
10691
+ - Bug \u2192 Docs/Bugs/<version>/<bug-name>.md
10692
+ - New module \u2192 Docs/Engine/Business/<module>/
10693
+
10694
+ ### Stage 6: Harness Self-Evolution
10695
+ - Check for errors fixed in Stages 2-3
10696
+ - If found \u2192 update .danya/rules/ to prevent recurrence
10697
+
10698
+ ## Termination Conditions
10699
+ - Verify fail after 3 rounds (Stage 2)
10700
+ - Review score <80 after 3 rounds (Stage 3)
10701
+ - Commit fail after 2 attempts (Stage 4)
10702
+
10703
+ ## Important
10704
+ - Do NOT push. Push is manual after human review.
10705
+ - Do NOT skip stages. Each stage must complete before next.
10706
+ `;
10707
+ var CMD_AUTO_BUGFIX = `# /auto-bugfix <bug-description>
10708
+
10709
+ Autonomous bug-fix pipeline. Must reproduce before fixing.
10710
+
10711
+ ## Flow
10712
+
10713
+ ### Step 1: Reproduce
10714
+ - Analyze bug description
10715
+ - Find reproduction steps
10716
+ - Verify the bug exists (compile, run test, check logs)
10717
+ - If NOT reproducible \u2192 report and STOP
10718
+
10719
+ ### Step 2: Root Cause Analysis
10720
+ - Trace from symptom to root cause
10721
+ - Do NOT guess. Read code, check logs, add debug output if needed.
10722
+
10723
+ ### Step 3: Fix (max 5 rounds)
10724
+ - Modify code to fix root cause
10725
+ - Run /verify after each fix attempt
10726
+ - If verify fails \u2192 analyze why and try again
10727
+ - If 5 rounds exhausted \u2192 report failure
10728
+
10729
+ ### Step 4: Review + Commit
10730
+ - Run /review (must pass \u226580, no CRITICAL)
10731
+ - git commit with descriptive message
10732
+
10733
+ ### Step 5: Knowledge Deposit
10734
+ - Write to Docs/Bugs/<version>/<bug-name>.md
10735
+ - Include: reproduction steps, root cause, fix, lessons learned
10736
+
10737
+ ### Step 6: Harness Evolution
10738
+ - If this bug type isn't in known-pitfalls.md \u2192 add it
10739
+ `;
10740
+ var CMD_REVIEW = `# /review
10741
+
10742
+ Score-based code review. Quantitative, not subjective.
10743
+
10744
+ ## Pre-check
10745
+ Run /verify first. If verify fails, fix before reviewing.
10746
+
10747
+ ## Scoring System
10748
+ - Initial score: 100
10749
+ - CRITICAL: -30 each (any CRITICAL = automatic FAIL)
10750
+ - HIGH: -10 each
10751
+ - MEDIUM: -3 each
10752
+ - Pass threshold: \u226580 AND no CRITICAL
10753
+
10754
+ ## Check Categories
10755
+
10756
+ ### 1. Architecture Compliance (mechanical + AI)
10757
+ - Forbidden file edits (constitution)?
10758
+ - Cross-layer imports?
10759
+ - Package boundary violations?
10760
+
10761
+ ### 2. Coding Standards (mechanical + AI)
10762
+ - Engine-specific style violations?
10763
+ - Error handling patterns?
10764
+ - Naming conventions?
10765
+
10766
+ ### 3. Logic Review (AI only)
10767
+ - Intent clarity
10768
+ - Error propagation
10769
+ - Concurrency safety
10770
+ - Edge cases
10771
+ - Dead code
10772
+
10773
+ ### 4. Harness Completeness
10774
+ - Were errors fixed during development?
10775
+ - Did rules get updated to match?
10776
+
10777
+ ## Quality Ratchet
10778
+ Score must not drop compared to previous review. If it drops, the fix introduced regressions.
10779
+
10780
+ ## Output
10781
+ On PASS: write .danya/push-approved marker (one-time use).
10782
+ On FAIL: list all issues with severity, do NOT write marker.
10783
+ `;
10784
+ var CMD_FIX_HARNESS = `# /fix-harness [error-description]
10785
+
10786
+ Update harness rules after discovering an error pattern.
10787
+
10788
+ ## Process
10789
+
10790
+ 1. Analyze the error that occurred
10791
+ 2. Route to the correct rule file:
10792
+ - Forbidden zone violation \u2192 constitution.md
10793
+ - Coding principle violation \u2192 golden-principles.md
10794
+ - Known pitfall re-occurrence \u2192 known-pitfalls.md
10795
+ - Architecture boundary violation \u2192 architecture-boundaries.md
10796
+ - Style issue \u2192 engine-style rule file
10797
+ 3. Add a concise rule:
10798
+ - \u274C What went wrong (with example)
10799
+ - \u2705 Correct approach (with example)
10800
+ 4. If mechanically checkable \u2192 add to /verify checks
10801
+ 5. Check total rule file lines < 550 (if exceeded, consolidate)
10802
+
10803
+ ## Important
10804
+ - Only add NEW patterns not already captured
10805
+ - Keep rules minimal: one error = one rule
10806
+ - Include correct-usage example, not just prohibition
10807
+ `;
10808
+ var CMD_PLAN = `# /plan <requirement>
10809
+
10810
+ Analyze requirement and create a development plan.
10811
+
10812
+ ## Output Format
10813
+
10814
+ ### 1. Requirement Analysis
10815
+ - What needs to change and why
10816
+ - Scope assessment
10817
+
10818
+ ### 2. File Checklist
10819
+ For each file to modify:
10820
+ - File path
10821
+ - 1-line description of changes
10822
+ - Risk level (low/medium/high)
10823
+
10824
+ ### 3. Execution Order
10825
+ - Dependencies between changes
10826
+ - Which files can be modified in parallel
10827
+ - Which must be sequential
10828
+
10829
+ ### 4. Verification Strategy
10830
+ - How to verify each change works
10831
+ - Integration test approach
10832
+
10833
+ ## Rules
10834
+ - Read existing code before planning changes
10835
+ - Check architecture boundaries before proposing cross-layer changes
10836
+ - Flag any forbidden zone files that would need regeneration
10837
+ `;
10838
+ var CMD_VERIFY = `# /verify [level]
10839
+
10840
+ Mechanical verification checks. Levels: quick | build | full
10841
+
10842
+ ## quick (default)
10843
+ - Lint check
10844
+ - Syntax check (engine-specific)
10845
+
10846
+ ## build
10847
+ - Everything in quick
10848
+ - Full compilation/build
10849
+
10850
+ ## full
10851
+ - Everything in build
10852
+ - Run tests
10853
+ - Architecture boundary check
10854
+
10855
+ ## Important
10856
+ - Run this BEFORE /review
10857
+ - If verify fails, fix issues before reviewing
10858
+ - Exit with clear pass/fail and error details
10859
+ `;
10860
+ var CMD_PARALLEL_EXECUTE = `# /parallel-execute <mode> <description>
10861
+
10862
+ Wave-based parallel task execution.
10863
+
10864
+ ## Modes
10865
+ - prepare: Decompose task into sub-tasks with dependency declarations
10866
+ - execute: Run prepared tasks in parallel waves
10867
+
10868
+ ## Prepare Mode
10869
+ Create task files in .danya/exec-plans/active/:
10870
+ - task-01.md, task-02.md, etc.
10871
+ - Each has YAML frontmatter with \`depends: []\` field
10872
+ - Tasks with no dependencies \u2192 Wave 1
10873
+ - Tasks depending on Wave 1 \u2192 Wave 2, etc.
10874
+
10875
+ ## Execute Mode
10876
+ - Parse dependency DAG \u2192 compute waves
10877
+ - Wave 1: run all independent tasks in parallel (separate worktrees)
10878
+ - Collect results, merge successful tasks
10879
+ - Wave 2: run next batch
10880
+ - Continue until all waves complete
10881
+ - Run /verify full on integrated code
10882
+
10883
+ ## Rules
10884
+ - Each task must be atomic (can succeed/fail independently)
10885
+ - Failed task \u2192 rollback its worktree, don't affect others
10886
+ - Cyclic dependencies \u2192 error, re-decompose
10887
+ `;
10888
+ var RULE_KNOWN_PITFALLS = `# Known Pitfalls
10889
+
10890
+ Real errors encountered during development. Each entry prevents the same mistake.
10891
+
10892
+ _This file grows through harness self-evolution. Start empty, fill as errors occur._
10893
+ `;
10894
+ var RULE_ARCHITECTURE_BOUNDARIES = `# Architecture Boundaries
10895
+
10896
+ Dependency direction rules. Higher layers can import lower layers, not vice versa.
10897
+
10898
+ ## General Principle
10899
+ - One-way dependencies: lower layers must NOT reference higher layers
10900
+ - Cross-module communication through events/interfaces, not direct references
10901
+
10902
+ _Customize with your project's actual layer structure._
10903
+ `;
10904
+ var MEMORY_INDEX = `# Project Memory
10905
+
10906
+ Persistent domain knowledge. Updated as the agent learns about this project.
10907
+
10908
+ _Memory files are auto-loaded each session and survive context compression._
10909
+ `;
10910
+ var HOOK_CONSTITUTION_GUARD = `#!/bin/bash
10911
+ # Gate 0: GUARD \u2014 forbidden zone check. Exit 2 = block.
10912
+ INPUT=$(cat)
10913
+ FILE_PATH=$(echo "$INPUT" | sed -n 's/.*"file_path"[[:space:]]*:[[:space:]]*"\\([^"]*\\)".*/\\1/p' 2>/dev/null)
10914
+ [ -z "$FILE_PATH" ] && exit 0
10915
+ FILE_PATH=$(echo "$FILE_PATH" | sed 's/\\\\\\\\/\\//g')
10916
+ RULES=".danya/guard-rules.json"
10917
+ [ ! -f "$RULES" ] && exit 0
10918
+ while IFS= read -r p; do
10919
+ p=$(echo "$p" | tr -d '"' | tr -d ' ')
10920
+ [ -z "$p" ] && continue
10921
+ if echo "$FILE_PATH" | grep -qE "$p" 2>/dev/null; then
10922
+ echo "{\\"systemMessage\\":\\"\u274C GUARD: $FILE_PATH is in forbidden zone ($p). Edit the source data and regenerate instead.\\"}"
10923
+ exit 2
10924
+ fi
10925
+ done < <(grep '"pattern"' "$RULES" | sed 's/.*"pattern"\\s*:\\s*"//;s/".*//')
10926
+ exit 0
10927
+ `;
10928
+ var HOOK_PRE_COMMIT = `#!/bin/bash
10929
+ # Gate 3: COMMIT \u2014 pre-commit lint + test. Exit 2 = block.
10930
+ INPUT=$(cat)
10931
+ CMD=$(echo "$INPUT" | sed -n 's/.*"command"[[:space:]]*:[[:space:]]*"\\([^"]*\\)".*/\\1/p' 2>/dev/null)
10932
+ echo "$CMD" | grep -qE 'git\\s+commit' || exit 0
10933
+ if [ -f "Makefile" ]; then
10934
+ make lint > /tmp/danya-lint.log 2>&1 || { echo "\u274C Lint failed" >&2; tail -10 /tmp/danya-lint.log >&2; exit 2; }
10935
+ make test > /tmp/danya-test.log 2>&1 || { echo "\u274C Tests failed" >&2; tail -10 /tmp/danya-test.log >&2; exit 2; }
10936
+ fi
10937
+ exit 0
10938
+ `;
10939
+ var HOOK_POST_COMMIT = `#!/bin/bash
10940
+ # Gate 4: Post-commit review reminder. Always exit 0.
10941
+ echo '{"systemMessage":"\u2705 Commit done. Run /review before push (score \u226580, no CRITICAL)."}'
10942
+ exit 0
10943
+ `;
10944
+ var HOOK_PUSH_GATE = `#!/bin/bash
10945
+ # Gate 5: PUSH \u2014 check push-approved marker. Exit 2 = block.
10946
+ INPUT=$(cat)
10947
+ CMD=$(echo "$INPUT" | sed -n 's/.*"command"[[:space:]]*:[[:space:]]*"\\([^"]*\\)".*/\\1/p' 2>/dev/null)
10948
+ echo "$CMD" | grep -qE 'git[[:space:]]+push' || exit 0
10949
+ MARKER=".danya/push-approved"
10950
+ [ ! -f "$MARKER" ] && { echo "\u274C PUSH BLOCKED: run /review first" >&2; exit 2; }
10951
+ rm -f "$MARKER"
10952
+ exit 0
10953
+ `;
10954
+ var HOOK_HARNESS_EVOLUTION = `#!/bin/bash
10955
+ # PostToolUse: detect error-then-fix pattern for harness self-evolution.
10956
+ # Reads tool result, checks if a previous error was just fixed.
10957
+ # If so, injects a system message prompting the agent to update rules.
10958
+ INPUT=$(cat)
10959
+ TOOL_NAME=$(echo "$INPUT" | sed -n 's/.*"tool_name"[[:space:]]*:[[:space:]]*"\\([^"]*\\)".*/\\1/p' 2>/dev/null)
10960
+ EXIT_CODE=$(echo "$INPUT" | sed -n 's/.*"exit_code"[[:space:]]*:[[:space:]]*\\([0-9]*\\).*/\\1/p' 2>/dev/null)
10961
+
10962
+ # Track error state using project-scoped file (stable across hook invocations)
10963
+ STATE_FILE=".danya/.error-state"
10964
+
10965
+ case "$TOOL_NAME" in
10966
+ Bash)
10967
+ if [ "$EXIT_CODE" != "0" ] && [ -n "$EXIT_CODE" ]; then
10968
+ # Error occurred \u2014 record it
10969
+ echo "error" > "$STATE_FILE" 2>/dev/null
10970
+ elif [ -f "$STATE_FILE" ] && [ "$(cat "$STATE_FILE" 2>/dev/null)" = "error" ]; then
10971
+ # Previous error, now success \u2014 fix confirmed
10972
+ rm -f "$STATE_FILE"
10973
+ echo '{"systemMessage":"Error was fixed. Consider running /fix-harness to update rules and prevent this error pattern in the future."}'
10974
+ fi
10975
+ ;;
10976
+ esac
10977
+ exit 0
10978
+ `;
10979
+
10980
+ // src/templates/bundles/unity.ts
10981
+ var UNITY_RULES_CONSTITUTION = `# Forbidden Zone Constitution
10982
+
10983
+ ## Auto-Generated Code (DO NOT edit manually)
10984
+ - \`{{CONFIG_GEN_PATH}}\` \u2014 Generated config. Edit Excel/data source \u2192 run ConfigGenerate.
10985
+ - \`*_pb.cs\` \u2014 Protobuf generated. Edit .proto \u2192 regenerate.
10986
+
10987
+ ## Framework Layer (requires approval)
10988
+ - \`{{FRAMEWORK_PATH}}\` \u2014 Core framework code. Modifying without understanding impacts all systems.
10989
+
10990
+ ## Art & Resource Directories
10991
+ - \`ArtResources/\`, \`PackResources/\` \u2014 Managed by art pipeline, not code.
10992
+ - \`.unity\` scene files \u2014 Binary, merge-unfriendly. Coordinate with team.
10993
+
10994
+ ## Third-Party Plugins
10995
+ - \`Assets/Plugins/\`, \`Assets/3rd/\` \u2014 No modification unless marked with [CUSTOM_MOD].
10996
+ `;
10997
+ var UNITY_RULES_GOLDEN_PRINCIPLES = `# Golden Principles \u2014 Unity/C#
10998
+
10999
+ Non-negotiable coding rules for this project.
11000
+
11001
+ ## Logging
11002
+ - \u274C \`Debug.Log\`, \`Debug.LogWarning\`, \`Debug.LogError\`
11003
+ - \u2705 Use project logger (MLog or equivalent)
11004
+
11005
+ ## Async
11006
+ - \u274C \`System.Threading.Tasks.Task\`, \`async/await\` with Task
11007
+ - \u2705 \`UniTask\` for all async operations
11008
+
11009
+ ## Object Lifecycle
11010
+ - \u274C \`Destroy()\` on pooled objects
11011
+ - \u2705 \`ObjectPoolUtility.Return()\` or equivalent pool API
11012
+
11013
+ ## Events
11014
+ - Subscribe in \`OnEnable()\` / initialization
11015
+ - Unsubscribe in \`OnDisable()\` / cleanup
11016
+ - \u274C Unmatched Subscribe without Unsubscribe \u2192 memory leak
11017
+
11018
+ ## Architecture
11019
+ - One-way dependencies: Framework \u2190 Gameplay \u2190 Renderer \u2190 Tools
11020
+ - \u274C Lower layer referencing higher layer
11021
+ - \u2705 Cross-module communication through EventManager/interfaces
11022
+
11023
+ ## Null Safety
11024
+ - Always null-check GetComponent<T>() results
11025
+ - Use TryGetComponent<T>() where possible
11026
+ - Never assume Find() returns non-null
11027
+ `;
11028
+ var UNITY_RULES_STYLE = `# Unity C# Style Guide
11029
+
11030
+ ## Naming
11031
+ - Classes/Structs: PascalCase
11032
+ - Methods: PascalCase
11033
+ - Private fields: _camelCase with underscore prefix
11034
+ - Public properties: PascalCase
11035
+ - Constants: UPPER_SNAKE_CASE
11036
+ - Enums: PascalCase (members too)
11037
+
11038
+ ## File Organization
11039
+ - One primary class per file
11040
+ - File name matches class name
11041
+ - Namespace matches directory structure
11042
+
11043
+ ## MonoBehaviour
11044
+ - Lifecycle order: Awake \u2192 OnEnable \u2192 Start \u2192 Update \u2192 OnDisable \u2192 OnDestroy
11045
+ - Heavy init in Awake, subscriptions in OnEnable
11046
+ - Never call Destroy() in Awake or OnEnable
11047
+ `;
11048
+ var UNITY_MEMORY_ARCHITECTURE = `---
11049
+ name: architecture-layers
11050
+ description: Unity project layer structure and dependencies
11051
+ type: project
11052
+ ---
11053
+
11054
+ ## Layer Structure
11055
+
11056
+ | Layer | Responsibility | Can Reference |
11057
+ |-------|---------------|---------------|
11058
+ | Framework | Core systems, managers, utilities | Nothing above |
11059
+ | Gameplay | Game logic, features, handlers | Framework |
11060
+ | Renderer | Visual, UI, effects | Framework, Gameplay |
11061
+ | Tools | Editor tools, debug utilities | All layers |
11062
+
11063
+ _Update this with your project's actual architecture as you learn it._
11064
+ `;
11065
+ var UNITY_HOOK_SYNTAX = `#!/bin/bash
11066
+ # Gate 1: SYNTAX \u2014 post-edit C# syntax check.
11067
+ INPUT=$(cat)
11068
+ FILE_PATH=$(echo "$INPUT" | sed -n 's/.*"file_path"[[:space:]]*:[[:space:]]*"\\([^"]*\\)".*/\\1/p' 2>/dev/null)
11069
+ [ -z "$FILE_PATH" ] && exit 0
11070
+ echo "$FILE_PATH" | grep -qE '\\.cs$' || exit 0
11071
+ # If dotnet-csharp-syntax-check is available, use it
11072
+ if command -v dotnet-csharp-syntax-check &>/dev/null; then
11073
+ dotnet-csharp-syntax-check "$FILE_PATH" 2>&1 || true
11074
+ fi
11075
+ exit 0
11076
+ `;
11077
+ function getUnityBundle() {
11078
+ return {
11079
+ "rules/constitution.md.tmpl": UNITY_RULES_CONSTITUTION,
11080
+ "rules/golden-principles.md": UNITY_RULES_GOLDEN_PRINCIPLES,
11081
+ "rules/unity-csharp.md": UNITY_RULES_STYLE,
11082
+ "rules/known-pitfalls.md": RULE_KNOWN_PITFALLS,
11083
+ "rules/architecture-boundaries.md": RULE_ARCHITECTURE_BOUNDARIES,
11084
+ "commands/auto-work.md": CMD_AUTO_WORK,
11085
+ "commands/auto-bugfix.md": CMD_AUTO_BUGFIX,
11086
+ "commands/review.md": CMD_REVIEW,
11087
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
11088
+ "commands/plan.md": CMD_PLAN,
11089
+ "commands/verify.md": CMD_VERIFY,
11090
+ "commands/parallel-execute.md": CMD_PARALLEL_EXECUTE,
11091
+ "memory/MEMORY.md": MEMORY_INDEX,
11092
+ "memory/architecture-layers.md": UNITY_MEMORY_ARCHITECTURE,
11093
+ "hooks/constitution-guard.sh": HOOK_CONSTITUTION_GUARD,
11094
+ "hooks/syntax-check.sh": UNITY_HOOK_SYNTAX,
11095
+ "hooks/pre-commit.sh": HOOK_PRE_COMMIT,
11096
+ "hooks/post-commit.sh": HOOK_POST_COMMIT,
11097
+ "hooks/push-gate.sh": HOOK_PUSH_GATE,
11098
+ "hooks/harness-evolution.sh": HOOK_HARNESS_EVOLUTION
11099
+ };
11100
+ }
11101
+
11102
+ // src/templates/bundles/goServer.ts
11103
+ var GO_RULES_CONSTITUTION = `# Forbidden Zone Constitution
11104
+
11105
+ ## Auto-Generated Code (DO NOT edit manually)
11106
+ - \`{{ORM_PATH}}\` \u2014 ORM generated from XML. Edit XML \u2192 run ORM generator.
11107
+ - \`{{CONFIG_GEN_PATH}}\` \u2014 Config generated from game data. Edit data source \u2192 regenerate.
11108
+ - \`*_service.go\`, \`*_client.go\` \u2014 Protobuf RPC wrappers. Edit .proto \u2192 protoc.
11109
+
11110
+ ## Git Submodules (edit in upstream repo)
11111
+ - \`base/\` \u2014 Shared base library. Edit in the base repo, then update submodule.
11112
+ - \`{{PROTO_PATH}}\` \u2014 Proto definitions. Edit in proto repo.
11113
+
11114
+ ## Important
11115
+ When you see these files, tell the user HOW to regenerate instead of editing directly.
11116
+ `;
11117
+ var GO_RULES_GOLDEN_PRINCIPLES = `# Golden Principles \u2014 Go Server
11118
+
11119
+ Non-negotiable coding rules.
11120
+
11121
+ ## Error Handling
11122
+ - \u274C \`_ = err\` (errcheck enforced)
11123
+ - \u274C \`fmt.Errorf("...")\` without %w
11124
+ - \u2705 \`fmt.Errorf("context: %w", err)\` \u2014 always wrap errors
11125
+
11126
+ ## RPC Handlers
11127
+ - \u274C Log error + return error (double reporting)
11128
+ - \u2705 RPC handler: catch error \u2192 return RpcError, no log
11129
+ - \u2705 Internal logic: log.Errorf + return err
11130
+
11131
+ ## Goroutines
11132
+ - \u274C Bare \`go func() { ... }()\`
11133
+ - \u2705 \`safego.Go(func() { ... })\` \u2014 panic recovery built-in
11134
+
11135
+ ## Atomics
11136
+ - \u274C \`sync/atomic\`
11137
+ - \u2705 \`go.uber.org/atomic\`
11138
+
11139
+ ## UUID
11140
+ - \u274C \`pborman/uuid\`
11141
+ - \u2705 \`google/uuid\`
11142
+
11143
+ ## Database Operations
11144
+ - \u274C Direct DB access from game logic
11145
+ - \u2705 All DB operations through db_server RPC
11146
+
11147
+ ## ECS (if applicable)
11148
+ - Components: data only, no logic
11149
+ - Systems: logic only, operate on components
11150
+ - \u274C Logic in components, data mutation outside systems
11151
+
11152
+ ## Workflow
11153
+ - Plan first for multi-file changes
11154
+ - Use TaskCreate for progress tracking
11155
+ - Use subagent for 5+ file searches
11156
+ `;
11157
+ var GO_RULES_STYLE = `# Go Style Guide
11158
+
11159
+ ## File Naming
11160
+ - snake_case for all .go files
11161
+ - _test.go suffix for tests
11162
+ - Group by feature, not by type
11163
+
11164
+ ## Error Handling
11165
+ - Check errors immediately after function call
11166
+ - Wrap with context: fmt.Errorf("operation: %w", err)
11167
+ - Don't ignore errors silently
11168
+
11169
+ ## Logging
11170
+ - log.Debugf \u2014 development only, verbose
11171
+ - log.Infof \u2014 normal operations
11172
+ - log.Warnf \u2014 recoverable issues
11173
+ - log.Errorf \u2014 errors that need attention
11174
+ - \u274C fmt.Println, fmt.Printf for logging
11175
+
11176
+ ## Testing
11177
+ - Table-driven tests preferred
11178
+ - go test ./... must pass before commit
11179
+ `;
11180
+ var GO_MEMORY_ARCHITECTURE = `---
11181
+ name: cluster-architecture
11182
+ description: Go microservices cluster structure
11183
+ type: project
11184
+ ---
11185
+
11186
+ ## Service Architecture
11187
+
11188
+ _Update this with your project's actual services as you learn them._
11189
+
11190
+ | Service | Role |
11191
+ |---------|------|
11192
+ | gate_server | Client connection, protocol decode |
11193
+ | logic_server | Game logic, state management |
11194
+ | scene_server | Scene/combat, ECS-based |
11195
+ | db_server | Database operations (all DB access goes here) |
11196
+
11197
+ ## Startup Order
11198
+ Services have dependency order. Check project docs or startup scripts.
11199
+
11200
+ ## RPC Call Chain
11201
+ Typical request flow: client \u2192 gate \u2192 logic \u2192 scene \u2192 db
11202
+ `;
11203
+ var GO_HOOK_VERIFY = `#!/bin/bash
11204
+ # Leveled verification for Go server.
11205
+ LEVEL=\${1:-quick}
11206
+ case "$LEVEL" in
11207
+ quick)
11208
+ go vet ./... 2>&1 || exit 1
11209
+ ;;
11210
+ build)
11211
+ go vet ./... 2>&1 || exit 1
11212
+ go build ./... 2>&1 || exit 1
11213
+ ;;
11214
+ full)
11215
+ go vet ./... 2>&1 || exit 1
11216
+ go build ./... 2>&1 || exit 1
11217
+ go test ./... 2>&1 || exit 1
11218
+ ;;
11219
+ esac
11220
+ exit 0
11221
+ `;
11222
+ function getGoServerBundle() {
11223
+ return {
11224
+ "rules/constitution.md.tmpl": GO_RULES_CONSTITUTION,
11225
+ "rules/golden-principles.md": GO_RULES_GOLDEN_PRINCIPLES,
11226
+ "rules/go-style.md": GO_RULES_STYLE,
11227
+ "rules/known-pitfalls.md": RULE_KNOWN_PITFALLS,
11228
+ "rules/architecture-boundaries.md": RULE_ARCHITECTURE_BOUNDARIES,
11229
+ "commands/auto-work.md": CMD_AUTO_WORK,
11230
+ "commands/auto-bugfix.md": CMD_AUTO_BUGFIX,
11231
+ "commands/review.md": CMD_REVIEW,
11232
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
11233
+ "commands/plan.md": CMD_PLAN,
11234
+ "commands/verify.md": CMD_VERIFY,
11235
+ "commands/parallel-execute.md": CMD_PARALLEL_EXECUTE,
11236
+ "memory/MEMORY.md": MEMORY_INDEX,
11237
+ "memory/cluster-architecture.md": GO_MEMORY_ARCHITECTURE,
11238
+ "hooks/constitution-guard.sh": HOOK_CONSTITUTION_GUARD,
11239
+ "hooks/verify-server.sh": GO_HOOK_VERIFY,
11240
+ "hooks/pre-commit.sh": HOOK_PRE_COMMIT,
11241
+ "hooks/post-commit.sh": HOOK_POST_COMMIT,
11242
+ "hooks/push-gate.sh": HOOK_PUSH_GATE,
11243
+ "hooks/harness-evolution.sh": HOOK_HARNESS_EVOLUTION
11244
+ };
11245
+ }
11246
+
11247
+ // src/templates/bundles/unreal.ts
11248
+ var UE_RULES_CONSTITUTION = `# Forbidden Zone Constitution
11249
+
11250
+ ## Auto-Generated Code
11251
+ - \`{{CONFIG_GEN_PATH}}\` \u2014 Generated code. Do not edit manually.
11252
+ - \`Intermediate/\` \u2014 Build intermediates. Never commit or edit.
11253
+ - \`*.generated.h\` \u2014 UHT generated headers.
11254
+
11255
+ ## Engine Source
11256
+ - \`Engine/\` \u2014 Unreal Engine source. Modify only in engine fork.
11257
+ `;
11258
+ var UE_RULES_GOLDEN_PRINCIPLES = `# Golden Principles \u2014 Unreal C++
11259
+
11260
+ ## Memory Management
11261
+ - \u274C Raw \`new\` for UObjects
11262
+ - \u2705 \`NewObject<T>()\`, \`CreateDefaultSubobject<T>()\`
11263
+ - All UObject* references must have UPROPERTY()
11264
+
11265
+ ## Logging
11266
+ - \u274C printf, cout, std::cerr
11267
+ - \u2705 UE_LOG(LogCategory, Verbosity, TEXT("..."))
11268
+
11269
+ ## Threading
11270
+ - \u274C std::thread
11271
+ - \u2705 FRunnable, AsyncTask, FGraphEvent
11272
+
11273
+ ## Naming Conventions
11274
+ - F = Struct (FVector, FTransform)
11275
+ - U = UObject-derived (UActorComponent)
11276
+ - A = AActor-derived (ACharacter)
11277
+ - E = Enum (EMovementMode)
11278
+ - I = Interface (IInteractable)
11279
+ - b prefix for booleans (bIsActive)
11280
+
11281
+ ## Garbage Collection
11282
+ - Pointers in containers must be UPROPERTY() or AddToRoot()
11283
+ - Use TWeakObjectPtr for non-owning references
11284
+ - Never cache raw pointers to UObjects across frames
11285
+ `;
11286
+ var UE_RULES_STYLE = `# Unreal C++ Style Guide
11287
+
11288
+ ## Headers
11289
+ - #pragma once (not include guards)
11290
+ - Engine headers before project headers
11291
+ - Minimal includes in headers, forward-declare where possible
11292
+
11293
+ ## Code Organization
11294
+ - .h in Public/, .cpp in Private/
11295
+ - One class per file pair
11296
+ - Module boundaries respected
11297
+ `;
11298
+ function getUnrealBundle() {
11299
+ return {
11300
+ "rules/constitution.md.tmpl": UE_RULES_CONSTITUTION,
11301
+ "rules/golden-principles.md": UE_RULES_GOLDEN_PRINCIPLES,
11302
+ "rules/unreal-cpp.md": UE_RULES_STYLE,
11303
+ "rules/known-pitfalls.md": RULE_KNOWN_PITFALLS,
11304
+ "rules/architecture-boundaries.md": RULE_ARCHITECTURE_BOUNDARIES,
11305
+ "commands/auto-work.md": CMD_AUTO_WORK,
11306
+ "commands/auto-bugfix.md": CMD_AUTO_BUGFIX,
11307
+ "commands/review.md": CMD_REVIEW,
11308
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
11309
+ "commands/plan.md": CMD_PLAN,
11310
+ "commands/verify.md": CMD_VERIFY,
11311
+ "commands/parallel-execute.md": CMD_PARALLEL_EXECUTE,
11312
+ "memory/MEMORY.md": MEMORY_INDEX,
11313
+ "hooks/constitution-guard.sh": HOOK_CONSTITUTION_GUARD,
11314
+ "hooks/pre-commit.sh": HOOK_PRE_COMMIT,
11315
+ "hooks/post-commit.sh": HOOK_POST_COMMIT,
11316
+ "hooks/push-gate.sh": HOOK_PUSH_GATE,
11317
+ "hooks/harness-evolution.sh": HOOK_HARNESS_EVOLUTION
11318
+ };
11319
+ }
11320
+
11321
+ // src/templates/bundles/godot.ts
11322
+ var GODOT_RULES_CONSTITUTION = `# Forbidden Zone Constitution
11323
+
11324
+ ## Auto-Generated Code
11325
+ - \`{{CONFIG_GEN_PATH}}\` \u2014 Generated scripts. Edit source data \u2192 regenerate.
11326
+ - \`.import/\` \u2014 Godot import cache. Never edit or commit.
11327
+
11328
+ ## Addons
11329
+ - \`addons/\` \u2014 Third-party plugins. Do not modify unless forked.
11330
+ `;
11331
+ var GODOT_RULES_GOLDEN_PRINCIPLES = `# Golden Principles \u2014 Godot/GDScript
11332
+
11333
+ ## Type Hints
11334
+ - \u2705 All function parameters and return types must have type hints
11335
+ - \u274C Untyped \`func process(delta)\`
11336
+ - \u2705 \`func _process(delta: float) -> void\`
11337
+
11338
+ ## Signals
11339
+ - Connect in \`_ready()\`
11340
+ - Disconnect in \`_exit_tree()\`
11341
+ - \u274C Unmatched connect without disconnect
11342
+
11343
+ ## Physics
11344
+ - \u274C Movement in \`_process()\`
11345
+ - \u2705 Movement in \`_physics_process()\`
11346
+
11347
+ ## Node References
11348
+ - \u274C Hardcoded paths: \`get_node("../Player/Sprite")\`
11349
+ - \u2705 @onready var + @export for configurable references
11350
+
11351
+ ## Resource Loading
11352
+ - \u274C \`load()\` at runtime for large resources
11353
+ - \u2705 \`preload()\` for small, always-needed resources
11354
+ - \u2705 \`ResourceLoader.load_threaded_request()\` for large resources
11355
+ `;
11356
+ var GODOT_RULES_STYLE = `# GDScript Style Guide
11357
+
11358
+ ## Naming
11359
+ - Classes: PascalCase
11360
+ - Functions/variables: snake_case
11361
+ - Constants: UPPER_SNAKE_CASE
11362
+ - Signals: snake_case (past tense: health_changed, item_picked_up)
11363
+ - Private: underscore prefix (_internal_method)
11364
+
11365
+ ## File Organization
11366
+ - One script per node/scene where possible
11367
+ - Autoloads for global systems
11368
+ - Class name matches file name
11369
+ `;
11370
+ function getGodotBundle() {
11371
+ return {
11372
+ "rules/constitution.md.tmpl": GODOT_RULES_CONSTITUTION,
11373
+ "rules/golden-principles.md": GODOT_RULES_GOLDEN_PRINCIPLES,
11374
+ "rules/godot-gdscript.md": GODOT_RULES_STYLE,
11375
+ "rules/known-pitfalls.md": RULE_KNOWN_PITFALLS,
11376
+ "rules/architecture-boundaries.md": RULE_ARCHITECTURE_BOUNDARIES,
11377
+ "commands/auto-work.md": CMD_AUTO_WORK,
11378
+ "commands/auto-bugfix.md": CMD_AUTO_BUGFIX,
11379
+ "commands/review.md": CMD_REVIEW,
11380
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
11381
+ "commands/plan.md": CMD_PLAN,
11382
+ "commands/verify.md": CMD_VERIFY,
11383
+ "commands/parallel-execute.md": CMD_PARALLEL_EXECUTE,
11384
+ "memory/MEMORY.md": MEMORY_INDEX,
11385
+ "hooks/constitution-guard.sh": HOOK_CONSTITUTION_GUARD,
11386
+ "hooks/pre-commit.sh": HOOK_PRE_COMMIT,
11387
+ "hooks/post-commit.sh": HOOK_POST_COMMIT,
11388
+ "hooks/push-gate.sh": HOOK_PUSH_GATE,
11389
+ "hooks/harness-evolution.sh": HOOK_HARNESS_EVOLUTION
11390
+ };
11391
+ }
11392
+
11393
+ // src/templates/bundles/workspace.ts
11394
+ var WORKSPACE_MEMORY_CROSS_PROJECT = `---
11395
+ name: cross-project-protocol
11396
+ description: Cross-project protocol and data sync rules
11397
+ type: project
11398
+ ---
11399
+
11400
+ ## Cross-Project Sync Points
11401
+
11402
+ _Update as you discover sync boundaries between sub-projects._
11403
+
11404
+ ### Protobuf
11405
+ - Proto definitions are shared between client and server
11406
+ - Edit .proto \u2192 regenerate both sides
11407
+ - Check field number compatibility when adding fields
11408
+
11409
+ ### Config Tables
11410
+ - Config data flows: Data Source \u2192 Generator \u2192 Client code + Server code
11411
+ - Both sides must regenerate when schema changes
11412
+
11413
+ ### Version Compatibility
11414
+ - Client and server versions must match on protocol level
11415
+ - Breaking changes require coordinated release
11416
+ `;
11417
+ var WORKSPACE_MEMORY_PITFALLS = `---
11418
+ name: cross-project-pitfalls
11419
+ description: Common mistakes when working across sub-projects
11420
+ type: project
11421
+ ---
11422
+
11423
+ ## Cross-Project Pitfalls
11424
+
11425
+ _This file grows through harness self-evolution._
11426
+
11427
+ ### Proto Field Number Conflicts
11428
+ - \u274C Reusing deleted field numbers
11429
+ - \u2705 Reserve deleted field numbers, always use new ones
11430
+
11431
+ ### Config Schema Drift
11432
+ - \u274C Changing config format on one side only
11433
+ - \u2705 Update generator, regenerate both client and server
11434
+ `;
11435
+ function getWorkspaceBundle() {
11436
+ return {
11437
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
11438
+ "commands/plan.md": CMD_PLAN,
11439
+ "memory/MEMORY.md": MEMORY_INDEX,
11440
+ "memory/cross-project-protocol.md": WORKSPACE_MEMORY_CROSS_PROJECT,
11441
+ "memory/cross-project-pitfalls.md": WORKSPACE_MEMORY_PITFALLS
11442
+ };
11443
+ }
11444
+
11445
+ // src/templates/bundles/scripts.ts
11446
+ var SCRIPT_AUTO_WORK_LOOP = `#!/bin/bash
11447
+ # auto-work-loop.sh \u2014 Shell-enforced full-auto development pipeline.
11448
+ # Each stage runs an independent danya -p call. Agent cannot skip steps.
11449
+ set -uo pipefail
11450
+
11451
+ REQUIREMENT="\${1:?Usage: auto-work-loop.sh '<requirement>'}"
11452
+ PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
11453
+ DANYA_CMD="\${DANYA_CMD:-danya}"
11454
+ MODEL="\${MODEL:-sonnet}"
11455
+ MAX_TURNS="\${MAX_TURNS:-30}"
11456
+ MAX_REVIEW_ROUNDS=3
11457
+ MAX_FIX_ROUNDS=3
11458
+ CACHE_DIR="$PROJECT_ROOT/.danya/.cache/auto-work"
11459
+ TIMESTAMP=$(date +%Y%m%d_%H%M%S)
11460
+ LOG_DIR="$CACHE_DIR/$TIMESTAMP"
11461
+ mkdir -p "$LOG_DIR"
11462
+
11463
+ echo "========================================="
11464
+ echo " Danya Auto-Work Orchestrator"
11465
+ echo "========================================="
11466
+ echo " Requirement: $REQUIREMENT"
11467
+ echo " Project: $PROJECT_ROOT"
11468
+ echo " Logs: $LOG_DIR"
11469
+ echo "========================================="
11470
+
11471
+ run_danya() {
11472
+ local stage="$1"; local prompt="$2"; local log_file="$LOG_DIR/\${stage}.log"
11473
+ echo ""; echo ">>> Stage: $stage"
11474
+ $DANYA_CMD -p "$prompt" --model "$MODEL" --max-turns "$MAX_TURNS" \\
11475
+ --allowedTools "Edit,Write,Read,Bash,Grep,Glob" > "$log_file" 2>&1
11476
+ local ec=$?; echo " Exit: $ec | Log: $log_file"; return $ec
11477
+ }
11478
+
11479
+ check_build() { echo " [CHECK] build..."; cd "$PROJECT_ROOT" && make build > "$LOG_DIR/build.log" 2>&1; }
11480
+ check_test() { echo " [CHECK] test..."; cd "$PROJECT_ROOT" && make test > "$LOG_DIR/test.log" 2>&1; }
11481
+
11482
+ # Stage 0: Classify
11483
+ echo ""; echo "=== Stage 0: Classify ==="
11484
+ TYPE=$($DANYA_CMD -p "Classify this requirement as one word: bug / feature / refactor. Requirement: $REQUIREMENT" --model haiku --max-turns 1 2>/dev/null | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]')
11485
+ case "$TYPE" in bug|fix) TYPE="bug";; feature|feat) TYPE="feature";; refactor) TYPE="refactor";; *) TYPE="feature";; esac
11486
+ echo " Type: $TYPE"
11487
+
11488
+ # Stage 0-B: Reproduce (bug only)
11489
+ if [[ "$TYPE" == "bug" ]]; then
11490
+ echo ""; echo "=== Stage 0-B: Reproduce ==="
11491
+ run_danya "reproduce" "Reproduce this bug without fixing it. Output reproduction report: $REQUIREMENT"
11492
+ if grep -qi "not reproduced\\|unable to reproduce" "$LOG_DIR/reproduce.log" 2>/dev/null; then
11493
+ echo " [END] Bug not reproduced. Pipeline terminated."; exit 0
11494
+ fi
11495
+ fi
11496
+
11497
+ # Stage 1: Plan
11498
+ echo ""; echo "=== Stage 1: Plan ==="
11499
+ run_danya "plan" "Requirement: $REQUIREMENT (type: $TYPE). List all files to modify with 1-line intent each. Do NOT write code."
11500
+
11501
+ # Stage 2: Code + Verify loop
11502
+ echo ""; echo "=== Stage 2: Code ==="
11503
+ for ((fix_round=1; fix_round<=MAX_FIX_ROUNDS; fix_round++)); do
11504
+ echo "--- Code round $fix_round/$MAX_FIX_ROUNDS ---"
11505
+ run_danya "coding-$fix_round" "Requirement: $REQUIREMENT. Execute the plan. Compile-check after each file. Follow .danya/rules/."
11506
+ if check_build && check_test; then echo " [PASS]"; break
11507
+ else
11508
+ echo " [FAIL]"
11509
+ [[ $fix_round -ge $MAX_FIX_ROUNDS ]] && { echo " [END] $MAX_FIX_ROUNDS rounds failed."; exit 1; }
11510
+ fi
11511
+ done
11512
+
11513
+ # Stage 3: Review loop
11514
+ echo ""; echo "=== Stage 3: Review ==="
11515
+ best_score=0
11516
+ for ((rr=1; rr<=MAX_REVIEW_ROUNDS; rr++)); do
11517
+ echo "--- Review round $rr/$MAX_REVIEW_ROUNDS ---"
11518
+ run_danya "review-$rr" "Run /review. Output score as: REVIEW_SCORE: <number>"
11519
+ score=$(grep -oP 'REVIEW_SCORE:\\s*\\K[0-9]+' "$LOG_DIR/review-$rr.log" 2>/dev/null || echo "0")
11520
+ echo " Score: $score (baseline: $best_score)"
11521
+ [[ "$score" -lt "$best_score" ]] && { echo " [ROLLBACK] Score dropped"; git checkout . 2>/dev/null; continue; }
11522
+ best_score=$score
11523
+ [[ "$score" -ge 80 ]] && { echo " [PASS] $score/100"; break; }
11524
+ [[ $rr -ge $MAX_REVIEW_ROUNDS ]] && { echo " [END] Score < 80 after $MAX_REVIEW_ROUNDS rounds."; exit 1; }
11525
+ run_danya "fix-review-$rr" "Review failed ($score/100). Fix all CRITICAL and HIGH issues."
11526
+ done
11527
+
11528
+ # Stage 4: Commit
11529
+ echo ""; echo "=== Stage 4: Commit ==="
11530
+ run_danya "commit" "Generate commit message and commit. Format: <type>(scope) description" || true
11531
+
11532
+ # Stage 5: Knowledge Deposit
11533
+ echo ""; echo "=== Stage 5: Knowledge Deposit ==="
11534
+ run_danya "docs" "Document this work in Docs/ (feature\u2192Version/, bug\u2192Bugs/). Only write docs, no code changes." || true
11535
+
11536
+ # Stage 6: Harness Evolution
11537
+ echo ""; echo "=== Stage 6: Harness Evolution ==="
11538
+ run_danya "harness" "Check if any compile/lint/review errors were fixed. If so, update .danya/rules/ via /fix-harness. If none, output 'No harness update needed'." || true
11539
+
11540
+ echo ""
11541
+ echo "========================================="
11542
+ echo " Auto-Work Complete"
11543
+ echo " Score: $best_score/100 | Type: $TYPE"
11544
+ echo " Logs: $LOG_DIR"
11545
+ echo "========================================="
11546
+ `;
11547
+ var SCRIPT_PARALLEL_WAVE = `#!/bin/bash
11548
+ # parallel-wave.sh \u2014 Wave-based parallel execution with independent worktrees.
11549
+ # Each task runs in its own worktree with its own danya -p instance.
11550
+ set -euo pipefail
11551
+
11552
+ TASKS_DIR="\${1:?Usage: parallel-wave.sh <tasks-dir>}"
11553
+ PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
11554
+ WORKTREE_BASE="$PROJECT_ROOT/.worktrees"
11555
+ RESULTS_FILE="$TASKS_DIR/results.tsv"
11556
+ LOG_DIR="$TASKS_DIR/logs"
11557
+ DANYA_CMD="\${DANYA_CMD:-danya}"
11558
+ MODEL="\${MODEL:-sonnet}"
11559
+ mkdir -p "$WORKTREE_BASE" "$LOG_DIR"
11560
+ echo -e "task\\twave\\tstatus\\tduration\\tnotes" > "$RESULTS_FILE"
11561
+
11562
+ # Parse tasks
11563
+ declare -A TASK_DEPS TASK_FILES TASK_STATUS
11564
+ echo "=== Parsing tasks ==="
11565
+ for f in "$TASKS_DIR"/task-*.md; do
11566
+ [[ -f "$f" ]] || continue
11567
+ basename=$(basename "$f" .md); task_id="\${basename#task-}"
11568
+ deps=$(sed -n '/^---$/,/^---$/p' "$f" | grep "^depends:" | sed 's/depends: *\\[//;s/\\]//;s/,/ /g;s/"//g;s/ //g' || echo "")
11569
+ TASK_DEPS[$task_id]="$deps"; TASK_FILES[$task_id]="$f"; TASK_STATUS[$task_id]="pending"
11570
+ echo " task-$task_id: depends=[\${deps:-none}]"
11571
+ done
11572
+ [[ \${#TASK_FILES[@]} -eq 0 ]] && { echo "No tasks found"; exit 0; }
11573
+
11574
+ # Compute waves (topological sort)
11575
+ declare -a WAVES=()
11576
+ compute_waves() {
11577
+ local -A remaining_deps status; local all_ids=("\${!TASK_FILES[@]}")
11578
+ for id in "\${all_ids[@]}"; do remaining_deps[$id]="\${TASK_DEPS[$id]}"; status[$id]="waiting"; done
11579
+ local wave_num=0 total_done=0 total=\${#all_ids[@]}
11580
+ while [[ $total_done -lt $total ]]; do
11581
+ local wave_tasks=(); wave_num=$((wave_num + 1))
11582
+ for id in "\${all_ids[@]}"; do
11583
+ [[ "\${status[$id]}" != "waiting" ]] && continue
11584
+ local deps="\${remaining_deps[$id]}" all_met=true
11585
+ if [[ -n "$deps" ]]; then
11586
+ for dep in $deps; do [[ "\${status[$dep]:-waiting}" != "done" ]] && all_met=false && break; done
11587
+ fi
11588
+ $all_met && wave_tasks+=("$id")
11589
+ done
11590
+ [[ \${#wave_tasks[@]} -eq 0 ]] && { echo "[ERROR] Circular dependency!"; exit 1; }
11591
+ for id in "\${wave_tasks[@]}"; do status[$id]="done"; total_done=$((total_done + 1)); done
11592
+ WAVES+=("$(IFS=' '; echo "\${wave_tasks[*]}")"); echo " Wave $wave_num: \${wave_tasks[*]}"
11593
+ done
11594
+ }
11595
+ echo ""; echo "=== Computing waves ==="; compute_waves
11596
+ echo "Total: \${#WAVES[@]} wave(s), \${#TASK_FILES[@]} task(s)"
11597
+
11598
+ # Execute task in worktree
11599
+ execute_task() {
11600
+ local task_id="$1" wave_num="$2" task_file="\${TASK_FILES[$task_id]}"
11601
+ local wt_path="$WORKTREE_BASE/task-\${task_id}" branch="wt/task-\${task_id}"
11602
+ local log_file="$LOG_DIR/task-\${task_id}.log" start_time=$(date +%s)
11603
+ echo " [task-$task_id] Starting..."
11604
+ git worktree add -b "$branch" "$wt_path" HEAD >> "$log_file" 2>&1 || {
11605
+ echo -e "$task_id\\t$wave_num\\tfailed\\t0\\tworktree failed" >> "$RESULTS_FILE"; return 1; }
11606
+ local task_content=$(sed '1,/^---$/d; /^---$/,$!d; 1d' "$task_file")
11607
+ ( cd "$wt_path" && $DANYA_CMD -p "$task_content" --allowedTools "Edit,Write,Read,Bash,Grep,Glob" --max-turns 30 >> "$log_file" 2>&1 ) || true
11608
+ local build_ok=false; (cd "$wt_path" && make build >> "$log_file" 2>&1) && build_ok=true
11609
+ local duration=$(( $(date +%s) - start_time ))
11610
+ if $build_ok; then
11611
+ if git merge "$branch" --no-edit >> "$log_file" 2>&1; then
11612
+ echo " [task-$task_id] PASS (\${duration}s)"; echo -e "$task_id\\t$wave_num\\tpassed\\t$duration\\tmerged" >> "$RESULTS_FILE"
11613
+ else
11614
+ git merge --abort 2>/dev/null || true
11615
+ echo " [task-$task_id] FAIL (merge conflict)"; echo -e "$task_id\\t$wave_num\\tfailed\\t$duration\\tmerge conflict" >> "$RESULTS_FILE"
11616
+ fi
11617
+ else
11618
+ echo " [task-$task_id] FAIL (build)"; echo -e "$task_id\\t$wave_num\\tfailed\\t$duration\\tbuild failed" >> "$RESULTS_FILE"
11619
+ fi
11620
+ git worktree remove "$wt_path" --force 2>/dev/null || true; git branch -D "$branch" 2>/dev/null || true
11621
+ }
11622
+
11623
+ # Execute waves
11624
+ echo ""; echo "=== Executing waves ==="
11625
+ wave_num=0
11626
+ for wave in "\${WAVES[@]}"; do
11627
+ wave_num=$((wave_num + 1)); IFS=' ' read -ra tasks <<< "$wave"
11628
+ echo ""; echo "--- Wave $wave_num: \${tasks[*]} ---"
11629
+ for task_id in "\${tasks[@]}"; do
11630
+ for dep in \${TASK_DEPS[$task_id]}; do
11631
+ [[ "\${TASK_STATUS[$dep]}" == "failed" ]] && {
11632
+ echo " [task-$task_id] SKIP (dep failed)"
11633
+ echo -e "$task_id\\t$wave_num\\tskipped\\t0\\tdep failed" >> "$RESULTS_FILE"
11634
+ TASK_STATUS[$task_id]="failed"; }
11635
+ done
11636
+ done
11637
+ pids=()
11638
+ for task_id in "\${tasks[@]}"; do
11639
+ [[ "\${TASK_STATUS[$task_id]}" == "failed" ]] && continue
11640
+ execute_task "$task_id" "$wave_num" &; pids+=($!)
11641
+ done
11642
+ for pid in "\${pids[@]}"; do wait "$pid" 2>/dev/null || true; done
11643
+ done
11644
+
11645
+ echo ""; echo "=== Results ==="; cat "$RESULTS_FILE"
11646
+ passed=$(grep -c "passed" "$RESULTS_FILE" 2>/dev/null || echo "0")
11647
+ failed=$(grep -c "failed" "$RESULTS_FILE" 2>/dev/null || echo "0")
11648
+ echo "Summary: $passed passed, $failed failed"
11649
+ rmdir "$WORKTREE_BASE" 2>/dev/null || true
11650
+ [[ "$failed" -gt 0 ]] && exit 1; exit 0
11651
+ `;
11652
+ var SCRIPT_RED_BLUE = `#!/bin/bash
11653
+ # red-blue-loop.sh \u2014 Adversarial: red team finds bugs, blue team fixes, loop until clean.
11654
+ set -uo pipefail
11655
+
11656
+ SCOPE="\${1:-.}"
11657
+ PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
11658
+ DANYA_CMD="\${DANYA_CMD:-danya}"
11659
+ MODEL="\${MODEL:-sonnet}"
11660
+ MAX_ROUNDS="\${MAX_ROUNDS:-5}"
11661
+ CACHE_DIR="$PROJECT_ROOT/.danya/.cache/red-blue"
11662
+ TIMESTAMP=$(date +%Y%m%d_%H%M%S)
11663
+ LOG_DIR="$CACHE_DIR/$TIMESTAMP"
11664
+ mkdir -p "$LOG_DIR"
11665
+
11666
+ echo "========================================="
11667
+ echo " Red-Blue Adversarial Loop"
11668
+ echo " Scope: $SCOPE | Max rounds: $MAX_ROUNDS"
11669
+ echo "========================================="
11670
+
11671
+ for ((round=1; round<=MAX_ROUNDS; round++)); do
11672
+ echo ""; echo "=== Round $round/$MAX_ROUNDS ==="
11673
+
11674
+ # Red Team: find bugs
11675
+ echo " [RED] Analyzing..."
11676
+ DIFF=$(cd "$PROJECT_ROOT" && git diff HEAD~1 2>/dev/null || echo "No diff available")
11677
+ $DANYA_CMD -p "You are the RED TEAM. Read .danya/agents/red-team.md for your role.
11678
+ Analyze the code in scope: $SCOPE
11679
+ Recent changes: $DIFF
11680
+ Find all bugs. Output format: BUG-N [SEVERITY]: description" \\
11681
+ --model "$MODEL" --max-turns 15 \\
11682
+ --allowedTools "Read,Grep,Glob,Bash" \\
11683
+ > "$LOG_DIR/red-$round.log" 2>&1 || true
11684
+
11685
+ # Count bugs
11686
+ bug_count=$(grep -c "^BUG-" "$LOG_DIR/red-$round.log" 2>/dev/null || echo "0")
11687
+ echo " [RED] Found $bug_count bug(s)"
11688
+
11689
+ [[ "$bug_count" -eq 0 ]] && { echo " [CLEAN] Zero bugs found. Stopping."; break; }
11690
+
11691
+ # Blue Team: fix bugs
11692
+ echo " [BLUE] Fixing..."
11693
+ $DANYA_CMD -p "You are the BLUE TEAM. Read .danya/agents/blue-team.md for your role.
11694
+ Fix bugs from the red team report:
11695
+ $(cat "$LOG_DIR/red-$round.log")
11696
+ Priority: CRITICAL > HIGH > MEDIUM. Minimal fixes only." \\
11697
+ --model "$MODEL" --max-turns 20 \\
11698
+ --allowedTools "Edit,Write,Read,Bash,Grep,Glob" \\
11699
+ > "$LOG_DIR/blue-$round.log" 2>&1 || true
11700
+
11701
+ # Build check
11702
+ echo " [CHECK] Building..."
11703
+ if (cd "$PROJECT_ROOT" && make build > "$LOG_DIR/build-$round.log" 2>&1); then
11704
+ echo " [PASS] Build OK. Committing fixes."
11705
+ (cd "$PROJECT_ROOT" && git add -A && git commit -m "<fix>(red-blue) round $round fixes" 2>/dev/null) || true
11706
+ else
11707
+ echo " [FAIL] Build failed. Reverting."
11708
+ (cd "$PROJECT_ROOT" && git checkout . 2>/dev/null) || true
11709
+ break
11710
+ fi
11711
+ done
11712
+
11713
+ # Skill Extraction
11714
+ echo ""; echo "=== Skill Extraction ==="
11715
+ $DANYA_CMD -p "You are the SKILL EXTRACTOR. Read .danya/agents/skill-extractor.md for your role.
11716
+ Analyze logs in $LOG_DIR/ (red-*.log, blue-*.log).
11717
+ Extract patterns (2+ occurrences) to .danya/rules/ and .danya/memory/." \\
11718
+ --model "$MODEL" --max-turns 10 \\
11719
+ --allowedTools "Read,Write,Grep,Glob" \\
11720
+ > "$LOG_DIR/skill-extract.log" 2>&1 || true
11721
+
11722
+ echo ""; echo "=== Red-Blue Complete ==="; echo "Logs: $LOG_DIR"
11723
+ `;
11724
+ var SCRIPT_ORCHESTRATOR = `#!/bin/bash
11725
+ # orchestrator.sh \u2014 Auto-research iteration: AI codes \u2192 verify \u2192 commit/revert \xD7 N rounds.
11726
+ set -uo pipefail
11727
+
11728
+ TASK_FILE="\${1:?Usage: orchestrator.sh <task-file.md>}"
11729
+ PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
11730
+ DANYA_CMD="\${DANYA_CMD:-danya}"
11731
+ MODEL="\${MODEL:-sonnet}"
11732
+ MAX_ITERATIONS="\${MAX_ITERATIONS:-20}"
11733
+ CIRCUIT_BREAK="\${CIRCUIT_BREAK:-5}"
11734
+ CACHE_DIR="$PROJECT_ROOT/.danya/.cache/orchestrator"
11735
+ TIMESTAMP=$(date +%Y%m%d_%H%M%S)
11736
+ LOG_DIR="$CACHE_DIR/$TIMESTAMP"
11737
+ mkdir -p "$LOG_DIR"
11738
+
11739
+ BASELINE_FILE="$LOG_DIR/baseline.txt"
11740
+ RESULTS_FILE="$LOG_DIR/results.tsv"
11741
+ echo "0" > "$BASELINE_FILE"
11742
+ echo -e "iter\\tscore\\tbaseline\\tstatus\\ttimestamp" > "$RESULTS_FILE"
11743
+
11744
+ echo "========================================="
11745
+ echo " Danya Orchestrator (Auto-Research)"
11746
+ echo " Task: $TASK_FILE"
11747
+ echo " Max iterations: $MAX_ITERATIONS"
11748
+ echo " Circuit break: $CIRCUIT_BREAK consecutive failures"
11749
+ echo "========================================="
11750
+
11751
+ TASK_CONTENT=$(cat "$TASK_FILE")
11752
+ consecutive_failures=0
11753
+
11754
+ for ((iter=1; iter<=MAX_ITERATIONS; iter++)); do
11755
+ echo ""; echo "=== Iteration $iter/$MAX_ITERATIONS ==="
11756
+ baseline=$(cat "$BASELINE_FILE")
11757
+
11758
+ # Code
11759
+ $DANYA_CMD -p "You are a code-writer. Read .danya/agents/code-writer.md for your role.
11760
+ Task: $TASK_CONTENT
11761
+ Iteration $iter. Current baseline: $baseline/100. Improve the score." \\
11762
+ --model "$MODEL" --max-turns 20 \\
11763
+ --allowedTools "Edit,Write,Read,Bash,Grep,Glob" \\
11764
+ > "$LOG_DIR/iter-$iter.log" 2>&1 || true
11765
+
11766
+ # Verify (score 0-100)
11767
+ score=0
11768
+ if [ -f "$PROJECT_ROOT/.danya/scripts/verify-server.sh" ]; then
11769
+ score=$(bash "$PROJECT_ROOT/.danya/scripts/verify-server.sh" 2>/dev/null || echo "0")
11770
+ elif [ -f Makefile ]; then
11771
+ (cd "$PROJECT_ROOT" && make build > /dev/null 2>&1) && score=40
11772
+ (cd "$PROJECT_ROOT" && make lint > /dev/null 2>&1) && score=$((score + 20))
11773
+ (cd "$PROJECT_ROOT" && make test > /dev/null 2>&1) && score=$((score + 40))
11774
+ fi
11775
+
11776
+ echo " Score: $score (baseline: $baseline)"
11777
+
11778
+ if [[ "$score" -ge "$baseline" ]]; then
11779
+ echo " [COMMIT] Score >= baseline"
11780
+ (cd "$PROJECT_ROOT" && git add -A && git commit -m "<feat>(orchestrator) iter $iter score $score" 2>/dev/null) || true
11781
+ echo "$score" > "$BASELINE_FILE"
11782
+ consecutive_failures=0
11783
+ echo -e "$iter\\t$score\\t$score\\tpass\\t$(date +%H:%M:%S)" >> "$RESULTS_FILE"
11784
+ else
11785
+ echo " [REVERT] Score < baseline"
11786
+ (cd "$PROJECT_ROOT" && git checkout . 2>/dev/null) || true
11787
+ consecutive_failures=$((consecutive_failures + 1))
11788
+ echo -e "$iter\\t$score\\t$baseline\\tfail\\t$(date +%H:%M:%S)" >> "$RESULTS_FILE"
11789
+ fi
11790
+
11791
+ if [[ $consecutive_failures -ge $CIRCUIT_BREAK ]]; then
11792
+ echo " [CIRCUIT BREAK] $CIRCUIT_BREAK consecutive failures. Stopping."
11793
+ break
11794
+ fi
11795
+ done
11796
+
11797
+ echo ""; echo "========================================="
11798
+ echo " Orchestrator Complete"
11799
+ echo " Final baseline: $(cat "$BASELINE_FILE")/100"
11800
+ echo " Results: $RESULTS_FILE"
11801
+ echo "========================================="
11802
+ cat "$RESULTS_FILE"
11803
+ `;
11804
+ var SCRIPT_VERIFY_SERVER = `#!/bin/bash
11805
+ # verify-server.sh \u2014 Quantitative server verification (0-100 points).
11806
+ # build=40, lint=20, test=40
11807
+ set -uo pipefail
11808
+ PROJECT_ROOT="\${1:-$(pwd)}"
11809
+ score=0
11810
+ (cd "$PROJECT_ROOT" && make build > /dev/null 2>&1) && score=40 || { echo "$score"; exit 0; }
11811
+ (cd "$PROJECT_ROOT" && make lint > /dev/null 2>&1) && score=$((score + 20))
11812
+ if (cd "$PROJECT_ROOT" && make test > /dev/null 2>&1); then score=$((score + 40))
11813
+ else score=$((score + 10)); fi # partial credit
11814
+ echo "$score"
11815
+ `;
11816
+ var SCRIPT_VERIFY_CLIENT = `#!/bin/bash
11817
+ # verify-client.sh \u2014 Quantitative client verification using CSharp syntax check.
11818
+ set -uo pipefail
11819
+ PROJECT_ROOT="\${1:-$(pwd)}"
11820
+ CHECKER="$PROJECT_ROOT/.danya/tools/CSharpSyntaxChecker"
11821
+ MODIFIED_CS=$(cd "$PROJECT_ROOT" && git diff --name-only HEAD 2>/dev/null | grep '\\.cs$' || echo "")
11822
+ [[ -z "$MODIFIED_CS" ]] && { echo "100"; exit 0; }
11823
+ total=$(echo "$MODIFIED_CS" | wc -l)
11824
+ errors=0
11825
+ if [[ -x "$CHECKER" ]]; then
11826
+ for f in $MODIFIED_CS; do
11827
+ "$CHECKER" "$PROJECT_ROOT/$f" > /dev/null 2>&1 || errors=$((errors + 1))
11828
+ done
11829
+ else
11830
+ echo "80"; exit 0 # fallback if checker not available
11831
+ fi
11832
+ pass_rate=$(( (total - errors) * 100 / total ))
11833
+ echo "$pass_rate"
11834
+ `;
11835
+ var SCRIPT_CHECK_ENV = `#!/bin/bash
11836
+ # check-env.sh \u2014 Validate environment dependencies for Danya tools.
11837
+ set -uo pipefail
11838
+ ok=true
11839
+ check() { command -v "$1" > /dev/null 2>&1 && echo " [OK] $1" || { echo " [MISSING] $1 \u2014 $2"; ok=false; }; }
11840
+ echo "=== Danya Environment Check ==="
11841
+ check danya "Install: npm install -g @danya-ai/cli"
11842
+ check git "Install: https://git-scm.com"
11843
+ check make "Install: build-essential (Linux) or MinGW (Windows)"
11844
+ check python3 "Install: https://python.org"
11845
+ command -v go > /dev/null 2>&1 && echo " [OK] go" || echo " [SKIP] go (only needed for Go server projects)"
11846
+ command -v dotnet > /dev/null 2>&1 && echo " [OK] dotnet" || echo " [SKIP] dotnet (only needed for C# syntax checking)"
11847
+ echo ""
11848
+ $ok && echo "All required dependencies found." || echo "Some dependencies missing. Install them before using shell-enforced scripts."
11849
+ `;
11850
+
11851
+ // src/templates/bundles/agents.ts
11852
+ var AGENT_CODE_WRITER = `# Code Writer Agent
11853
+
11854
+ You are a focused coding agent. Your job is to modify files within the allowed scope.
11855
+
11856
+ ## Tools
11857
+ Edit, Write, Read, Bash, Grep, Glob
11858
+
11859
+ ## Rules
11860
+ 1. **Scope**: Only modify files listed in the task. Never touch files outside scope.
11861
+ 2. **Read first**: Always read a file before modifying it.
11862
+ 3. **Compile-driven**: After each file modification, run compile/build to verify.
11863
+ 4. **Minimal changes**: Make the smallest change that achieves the goal.
11864
+ 5. **No refactoring**: Don't clean up surrounding code. Only change what's needed.
11865
+ 6. **Forbidden files**: Never modify files in .danya/guard-rules.json forbidden zones.
11866
+
11867
+ ## Workflow
11868
+ 1. Read the task requirements
11869
+ 2. Read all files in scope
11870
+ 3. Plan changes (mentally, don't output a plan document)
11871
+ 4. Modify files one at a time
11872
+ 5. Compile after each file
11873
+ 6. If compile fails, fix immediately before moving to next file
11874
+ `;
11875
+ var AGENT_CODE_REVIEWER = `# Code Reviewer Agent
11876
+
11877
+ You are a read-only code review agent. You find issues but NEVER modify code.
11878
+
11879
+ ## Tools
11880
+ Read, Grep, Glob, Bash (read-only commands only)
11881
+
11882
+ ## Scoring
11883
+ Start at 100 points. Deduct for issues found:
11884
+ - CRITICAL: -30 (build failure, forbidden file change, data corruption risk, security hole)
11885
+ - HIGH: -10 (unhandled error, race condition, missing validation, wrong API usage)
11886
+ - MEDIUM: -3 (naming violation, missing log, style issue, dead code)
11887
+
11888
+ Pass threshold: >= 80 AND zero CRITICAL
11889
+
11890
+ ## What to Check
11891
+ 1. **Architecture**: forbidden file edits, cross-layer imports, dependency direction
11892
+ 2. **Coding standards**: engine-specific rules from .danya/rules/
11893
+ 3. **Logic**: error propagation, null safety, concurrency, edge cases
11894
+ 4. **Harness completeness**: were errors fixed? are rules updated?
11895
+
11896
+ ## Output Format
11897
+ \`\`\`
11898
+ ISSUE-1 [CRITICAL]: description \u2014 file:line
11899
+ ISSUE-2 [HIGH]: description \u2014 file:line
11900
+ ...
11901
+ REVIEW_SCORE: <number>
11902
+ \`\`\`
11903
+ `;
11904
+ var AGENT_RED_TEAM = `# Red Team Agent
11905
+
11906
+ You are an adversarial tester. Assume the code is WRONG until proven right.
11907
+
11908
+ ## Tools
11909
+ Read, Grep, Glob, Bash (read-only)
11910
+
11911
+ ## Focus Areas
11912
+ - **Edge cases**: nil, null, empty, zero, negative, max values, overflow
11913
+ - **Error paths**: what if the DB call fails? what if the RPC times out?
11914
+ - **Implicit assumptions**: does the code assume input is always valid?
11915
+ - **Concurrency**: race conditions, deadlocks, goroutine leaks
11916
+ - **Security**: injection, authentication bypass, privilege escalation
11917
+
11918
+ ## Rules
11919
+ 1. Never modify code. Only read and analyze.
11920
+ 2. For each bug found, describe: trigger condition + expected consequence.
11921
+ 3. Rate severity: CRITICAL / HIGH / MEDIUM.
11922
+ 4. Don't report style issues \u2014 only real bugs.
11923
+
11924
+ ## Output Format
11925
+ \`\`\`
11926
+ BUG-1 [CRITICAL]: description
11927
+ Location: file:line
11928
+ Trigger: condition that causes the bug
11929
+ Consequence: what happens when triggered
11930
+
11931
+ BUG-2 [HIGH]: description
11932
+ ...
11933
+ \`\`\`
11934
+
11935
+ If no bugs found, output: "NO BUGS FOUND \u2014 code review passed."
11936
+ `;
11937
+ var AGENT_BLUE_TEAM = `# Blue Team Agent
11938
+
11939
+ You are a defensive programmer. Fix bugs found by the Red Team.
11940
+
11941
+ ## Tools
11942
+ Edit, Write, Read, Bash, Grep, Glob
11943
+
11944
+ ## Rules
11945
+ 1. Fix bugs in priority order: CRITICAL > HIGH > MEDIUM.
11946
+ 2. **Minimal fixes**: add a nil check, not a refactor. Add a mutex, not a redesign.
11947
+ 3. **Defensive coding**: add checks, don't assume valid input.
11948
+ 4. **Verify each fix**: compile after each change.
11949
+ 5. **Skip false positives**: if a bug report is wrong, explain why and skip.
11950
+ 6. **Don't introduce new features**: only fix the bugs in the report.
11951
+
11952
+ ## Workflow
11953
+ 1. Read the Red Team report
11954
+ 2. For each bug (priority order):
11955
+ a. Read the file and understand the context
11956
+ b. Apply minimal fix
11957
+ c. Compile to verify
11958
+ 3. After all fixes: run full build + test
11959
+ `;
11960
+ var AGENT_SKILL_EXTRACTOR = `# Skill Extractor Agent
11961
+
11962
+ You are a knowledge analyst. Extract reusable patterns from development logs.
11963
+
11964
+ ## Tools
11965
+ Read, Write, Grep, Glob
11966
+
11967
+ ## What to Extract
11968
+ Analyze iteration logs and look for:
11969
+
11970
+ 1. **Repeated failure modes** \u2192 add to .danya/rules/known-pitfalls.md
11971
+ - Same error appearing 2+ times across iterations
11972
+ - Format: error description + correct approach
11973
+
11974
+ 2. **Repeated fix patterns** \u2192 add to .danya/rules/golden-principles.md
11975
+ - Same fix applied 2+ times
11976
+ - Format: principle + example
11977
+
11978
+ 3. **Domain knowledge** \u2192 write to .danya/memory/
11979
+ - Module architecture, API patterns, data flow
11980
+ - Only if discovered through iterations, not already documented
11981
+
11982
+ ## Rules
11983
+ 1. **Evidence-based**: only extract patterns with 2+ occurrences
11984
+ 2. **Concise**: one error = one rule, keep it short
11985
+ 3. **Actionable**: each rule must have a correct-usage example
11986
+ 4. **No duplicates**: check existing rules before adding
11987
+ 5. **Line limit**: keep each rule file under 550 lines
11988
+ `;
11989
+ var TEMPLATE_PROGRAM = `# Task Definition Template
11990
+
11991
+ Use this template to define a task for the orchestrator.
11992
+
11993
+ ---
11994
+
11995
+ ## Goal
11996
+ [Quantified objective. Example: "Increase test coverage from 15% to 60%"]
11997
+
11998
+ ## Modifiable Scope
11999
+ [Files/directories the AI can modify]
12000
+ - servers/logic_server/internal/slot/
12001
+ - common/slot/
12002
+
12003
+ ## Forbidden Files
12004
+ [Files the AI must never touch]
12005
+ - orm/
12006
+ - common/config/cfg_*.go
12007
+ - base/
12008
+
12009
+ ## Quantitative Metrics
12010
+ [How to score each iteration, total 100 points]
12011
+ - make build passes: 40 points
12012
+ - make lint passes: 20 points
12013
+ - make test passes: 40 points (10 partial if some tests fail)
12014
+
12015
+ ## Context
12016
+ [Background knowledge to help the AI]
12017
+ - This module handles slot machine game logic
12018
+ - RPC handlers are in internal/slot/handler.go
12019
+ - Config is auto-generated from Excel, read-only
12020
+ `;
12021
+
12022
+ // src/templates/bundles/monitor.ts
12023
+ var MONITOR_LOG_TOOL_USE = `#!/usr/bin/env python3
12024
+ """PostToolUse Hook \u2014 record every tool call to JSONL."""
12025
+ import json, sys, time
12026
+ from pathlib import Path
12027
+ DATA_DIR = Path(".danya/monitor/data"); DATA_DIR.mkdir(parents=True, exist_ok=True)
12028
+ try:
12029
+ data = json.load(sys.stdin)
12030
+ entry = {"timestamp": time.time(), "session_id": data.get("session_id",""), "tool_name": data.get("tool_name",""), "tool_input_keys": list(data.get("tool_input",{}).keys()), "cwd": data.get("cwd","")}
12031
+ with open(DATA_DIR / "tool-usage.jsonl", "a") as f: f.write(json.dumps(entry) + "\\n")
12032
+ except: pass
12033
+ `;
12034
+ var MONITOR_LOG_SESSION_END = `#!/usr/bin/env python3
12035
+ """Stop Hook \u2014 record session end to JSONL."""
12036
+ import json, sys, time
12037
+ from pathlib import Path
12038
+ DATA_DIR = Path(".danya/monitor/data"); DATA_DIR.mkdir(parents=True, exist_ok=True)
12039
+ try:
12040
+ data = json.load(sys.stdin)
12041
+ entry = {"timestamp": time.time(), "session_id": data.get("session_id",""), "cwd": data.get("cwd",""), "stop_reason": data.get("stop_hook_reason","unknown")}
12042
+ with open(DATA_DIR / "sessions.jsonl", "a") as f: f.write(json.dumps(entry) + "\\n")
12043
+ except: pass
12044
+ `;
12045
+ var MONITOR_LOG_VERIFY = `#!/usr/bin/env python3
12046
+ """Verify metrics \u2014 call with: python log-verify.py start|end <type> [result]"""
12047
+ import json, sys, time
12048
+ from pathlib import Path
12049
+ DATA_DIR = Path(".danya/monitor/data"); DATA_DIR.mkdir(parents=True, exist_ok=True)
12050
+ STATE = DATA_DIR / ".verify-state.json"
12051
+ def start(t): STATE.write_text(json.dumps({"type":t,"start_time":time.time()}))
12052
+ def end(t,r):
12053
+ if not STATE.exists(): return
12054
+ s = json.loads(STATE.read_text()); d = time.time() - s.get("start_time", time.time())
12055
+ with open(DATA_DIR / "verify-metrics.jsonl", "a") as f:
12056
+ f.write(json.dumps({"timestamp":time.time(),"type":t,"result":r,"duration_seconds":round(d,1)}) + "\\n")
12057
+ STATE.unlink(missing_ok=True)
12058
+ if len(sys.argv) >= 3:
12059
+ if sys.argv[1] == "start": start(sys.argv[2])
12060
+ elif sys.argv[1] == "end": end(sys.argv[2], sys.argv[3] if len(sys.argv)>3 else "UNKNOWN")
12061
+ `;
12062
+ var MONITOR_LOG_BUGFIX = `#!/usr/bin/env python3
12063
+ """Bugfix metrics \u2014 call with: python log-bugfix.py start|round|end <args>"""
12064
+ import json, sys, time
12065
+ from pathlib import Path
12066
+ DATA_DIR = Path(".danya/monitor/data"); DATA_DIR.mkdir(parents=True, exist_ok=True)
12067
+ STATE = DATA_DIR / ".bugfix-state.json"
12068
+ def start(desc): STATE.write_text(json.dumps({"description":desc,"start_time":time.time(),"rounds":[]}))
12069
+ def round_log(n,r):
12070
+ if not STATE.exists(): return
12071
+ s = json.loads(STATE.read_text()); s["rounds"].append({"round":int(n),"result":r,"timestamp":time.time()})
12072
+ STATE.write_text(json.dumps(s))
12073
+ def end(n,r):
12074
+ if not STATE.exists(): return
12075
+ s = json.loads(STATE.read_text()); d = time.time() - s.get("start_time", time.time())
12076
+ with open(DATA_DIR / "bugfix-metrics.jsonl", "a") as f:
12077
+ f.write(json.dumps({"timestamp":time.time(),"description":s.get("description",""),"total_rounds":int(n),"final_result":r,"duration_seconds":round(d,1),"rounds":s.get("rounds",[])}) + "\\n")
12078
+ STATE.unlink(missing_ok=True)
12079
+ if len(sys.argv) >= 3:
12080
+ a = sys.argv[1]
12081
+ if a == "start": start(sys.argv[2])
12082
+ elif a == "round": round_log(sys.argv[2], sys.argv[3] if len(sys.argv)>3 else "UNKNOWN")
12083
+ elif a == "end": end(sys.argv[2], sys.argv[3] if len(sys.argv)>3 else "UNKNOWN")
12084
+ `;
12085
+ var MONITOR_LOG_REVIEW = `#!/usr/bin/env python3
12086
+ """Review metrics \u2014 call with: python log-review.py <score> <result> [critical] [high] [medium]"""
12087
+ import json, sys, time
12088
+ from pathlib import Path
12089
+ DATA_DIR = Path(".danya/monitor/data"); DATA_DIR.mkdir(parents=True, exist_ok=True)
12090
+ if len(sys.argv) >= 3:
12091
+ entry = {"timestamp":time.time(),"score":int(sys.argv[1]),"result":sys.argv[2],"critical":int(sys.argv[3]) if len(sys.argv)>3 else 0,"high":int(sys.argv[4]) if len(sys.argv)>4 else 0,"medium":int(sys.argv[5]) if len(sys.argv)>5 else 0}
12092
+ with open(DATA_DIR / "review-metrics.jsonl", "a") as f: f.write(json.dumps(entry) + "\\n")
12093
+ `;
12094
+
12095
+ // src/templates/index.ts
12096
+ function getSharedToolsBundle() {
12097
+ return {
12098
+ // Shell-enforced scripts
12099
+ "scripts/auto-work-loop.sh": SCRIPT_AUTO_WORK_LOOP,
12100
+ "scripts/parallel-wave.sh": SCRIPT_PARALLEL_WAVE,
12101
+ "scripts/red-blue-loop.sh": SCRIPT_RED_BLUE,
12102
+ "scripts/orchestrator.sh": SCRIPT_ORCHESTRATOR,
12103
+ "scripts/verify-server.sh": SCRIPT_VERIFY_SERVER,
12104
+ "scripts/verify-client.sh": SCRIPT_VERIFY_CLIENT,
12105
+ "scripts/check-env.sh": SCRIPT_CHECK_ENV,
12106
+ // Agent role specs
12107
+ "agents/code-writer.md": AGENT_CODE_WRITER,
12108
+ "agents/code-reviewer.md": AGENT_CODE_REVIEWER,
12109
+ "agents/red-team.md": AGENT_RED_TEAM,
12110
+ "agents/blue-team.md": AGENT_BLUE_TEAM,
12111
+ "agents/skill-extractor.md": AGENT_SKILL_EXTRACTOR,
12112
+ // Task template
12113
+ "templates/program-template.md": TEMPLATE_PROGRAM,
12114
+ // Monitor data collection
12115
+ "monitor/log-tool-use.py": MONITOR_LOG_TOOL_USE,
12116
+ "monitor/log-session-end.py": MONITOR_LOG_SESSION_END,
12117
+ "monitor/log-verify.py": MONITOR_LOG_VERIFY,
12118
+ "monitor/log-bugfix.py": MONITOR_LOG_BUGFIX,
12119
+ "monitor/log-review.py": MONITOR_LOG_REVIEW
12120
+ };
12121
+ }
12122
+ function getBundleForEngine(engine, serverLanguage) {
12123
+ let engineBundle;
12124
+ if (engine === "unity") engineBundle = getUnityBundle();
12125
+ else if (engine === "unreal") engineBundle = getUnrealBundle();
12126
+ else if (engine === "godot") engineBundle = getGodotBundle();
12127
+ else if (serverLanguage === "go") engineBundle = getGoServerBundle();
12128
+ else {
12129
+ engineBundle = {
12130
+ "rules/known-pitfalls.md": RULE_KNOWN_PITFALLS,
12131
+ "rules/architecture-boundaries.md": RULE_ARCHITECTURE_BOUNDARIES,
12132
+ "commands/auto-work.md": CMD_AUTO_WORK,
12133
+ "commands/auto-bugfix.md": CMD_AUTO_BUGFIX,
12134
+ "commands/review.md": CMD_REVIEW,
12135
+ "commands/fix-harness.md": CMD_FIX_HARNESS,
12136
+ "commands/plan.md": CMD_PLAN,
12137
+ "commands/verify.md": CMD_VERIFY,
12138
+ "commands/parallel-execute.md": CMD_PARALLEL_EXECUTE,
12139
+ "memory/MEMORY.md": MEMORY_INDEX,
12140
+ "hooks/constitution-guard.sh": HOOK_CONSTITUTION_GUARD,
12141
+ "hooks/pre-commit.sh": HOOK_PRE_COMMIT,
12142
+ "hooks/post-commit.sh": HOOK_POST_COMMIT,
12143
+ "hooks/push-gate.sh": HOOK_PUSH_GATE,
12144
+ "hooks/harness-evolution.sh": HOOK_HARNESS_EVOLUTION
12145
+ };
12146
+ }
12147
+ return { ...engineBundle, ...getSharedToolsBundle() };
12148
+ }
12149
+
12150
+ // src/services/harness/consolidate.ts
12151
+ import { existsSync as existsSync10, readdirSync as readdirSync4, readFileSync as readFileSync8, writeFileSync as writeFileSync5, mkdirSync as mkdirSync6, statSync as statSync11 } from "fs";
12152
+ import { join as join8 } from "path";
12153
+ var CONSOLIDATION_DIRS = ["rules", "commands", "memory", "skills", "hooks"];
12154
+ var LEGACY_DIRS = [".claude", ".codex"];
12155
+ function consolidateLegacyIntoDanya(projectDir) {
12156
+ const danyaDir = join8(projectDir, ".danya");
12157
+ const result = { merged: [], skipped: [], sources: [] };
12158
+ for (const legacyName of LEGACY_DIRS) {
12159
+ const legacyDir = join8(projectDir, legacyName);
12160
+ if (!existsSync10(legacyDir)) continue;
12161
+ result.sources.push(legacyName);
12162
+ for (const subDir of CONSOLIDATION_DIRS) {
12163
+ const legacySub = join8(legacyDir, subDir);
12164
+ if (!existsSync10(legacySub)) continue;
12165
+ const danyaSub = join8(danyaDir, subDir);
12166
+ mkdirSync6(danyaSub, { recursive: true });
12167
+ copyMissing(legacySub, danyaSub, subDir, result);
12168
+ }
12169
+ mergeSettings(legacyDir, danyaDir, result);
12170
+ }
12171
+ return result;
12172
+ }
12173
+ function copyMissing(legacyDir, danyaDir, prefix, result) {
12174
+ let entries;
12175
+ try {
12176
+ entries = readdirSync4(legacyDir);
12177
+ } catch {
12178
+ return;
12179
+ }
12180
+ for (const entry of entries) {
12181
+ const legacyPath = join8(legacyDir, entry);
12182
+ const danyaPath = join8(danyaDir, entry);
12183
+ try {
12184
+ if (statSync11(legacyPath).isDirectory()) {
12185
+ mkdirSync6(danyaPath, { recursive: true });
12186
+ copyMissing(legacyPath, danyaPath, `${prefix}/${entry}`, result);
12187
+ continue;
12188
+ }
12189
+ } catch {
12190
+ continue;
12191
+ }
12192
+ const relPath = `${prefix}/${entry}`;
12193
+ if (existsSync10(danyaPath)) {
12194
+ result.skipped.push(relPath);
12195
+ } else {
12196
+ try {
12197
+ const content = readFileSync8(legacyPath);
12198
+ writeFileSync5(danyaPath, content, {
12199
+ mode: prefix === "hooks" ? 493 : 420
12200
+ });
12201
+ result.merged.push(relPath);
12202
+ } catch {
12203
+ }
12204
+ }
12205
+ }
12206
+ }
12207
+ function mergeSettings(legacyDir, danyaDir, result) {
12208
+ const legacySettings = join8(legacyDir, "settings.json");
12209
+ const danyaSettings = join8(danyaDir, "settings.json");
12210
+ if (!existsSync10(legacySettings)) return;
12211
+ let legacyConfig;
12212
+ let danyaConfig;
12213
+ try {
12214
+ legacyConfig = JSON.parse(readFileSync8(legacySettings, "utf-8"));
12215
+ } catch {
12216
+ return;
12217
+ }
12218
+ try {
12219
+ danyaConfig = existsSync10(danyaSettings) ? JSON.parse(readFileSync8(danyaSettings, "utf-8")) : {};
12220
+ } catch {
12221
+ danyaConfig = {};
12222
+ }
12223
+ if (legacyConfig.hooks) {
12224
+ if (!danyaConfig.hooks) danyaConfig.hooks = {};
12225
+ for (const [event, handlers] of Object.entries(legacyConfig.hooks)) {
12226
+ if (!danyaConfig.hooks[event]) {
12227
+ danyaConfig.hooks[event] = handlers;
12228
+ result.merged.push(`settings.json:hooks.${event}`);
12229
+ }
12230
+ }
12231
+ writeFileSync5(danyaSettings, JSON.stringify(danyaConfig, null, 2), "utf-8");
12232
+ }
12233
+ }
12234
+
12235
+ // src/commands/initProject.ts
12236
+ async function initDanyaProject(cwd2, force = false) {
12237
+ const danyaDir = join9(cwd2, ".danya");
12238
+ if (existsSync11(danyaDir) && !force) {
12239
+ return "\u26A0\uFE0F .danya/ already exists. Skipping initialization. Use --force to overwrite.";
12240
+ }
12241
+ const workspace = detectWorkspace(cwd2);
12242
+ const instructionsFile = isClaudeModel() ? "CLAUDE.md" : "AGENTS.md";
12243
+ if (workspace.type === "workspace") {
12244
+ return initWorkspace(cwd2, workspace.subProjects, instructionsFile, force);
12245
+ }
12246
+ return initSingleProject(cwd2, instructionsFile, force);
12247
+ }
12248
+ function initSingleProject(cwd2, instructionsFile, force) {
12249
+ const danyaDir = join9(cwd2, ".danya");
12250
+ const detection = detectProject(cwd2);
12251
+ const projectName = basename4(cwd2);
12252
+ const bundle = getBundleForEngine(detection.engine, detection.serverLanguage);
12253
+ const ctx = buildTemplateContext(projectName, detection.engine, detection.serverLanguage, instructionsFile);
12254
+ const installed = installBundle(danyaDir, bundle, ctx, { force });
12255
+ const guardRules = generateGuardRules(detection.engine, detection.serverLanguage);
12256
+ writeFileSync6(join9(danyaDir, "guard-rules.json"), JSON.stringify(guardRules, null, 2), "utf-8");
12257
+ writeFileSync6(join9(danyaDir, "gate-chain.json"), JSON.stringify({
12258
+ gates: {
12259
+ guard: { enabled: true },
12260
+ syntax: { enabled: true },
12261
+ verify: { enabled: true, default_level: "build" },
12262
+ commit: { enabled: true },
12263
+ review: { enabled: true },
12264
+ push: { enabled: true, require_review: true }
12265
+ }
12266
+ }, null, 2), "utf-8");
12267
+ writeFileSync6(join9(danyaDir, "settings.json"), JSON.stringify(generateSettings(), null, 2), "utf-8");
12268
+ const consolidation = consolidateLegacyIntoDanya(cwd2);
12269
+ writeInstructionsFile(cwd2, instructionsFile, detection.engine, detection.serverLanguage);
12270
+ const legacyMsg = consolidation.sources.length > 0 ? [` Legacy integrated: ${consolidation.sources.join(", ")} \u2192 .danya/ (${consolidation.merged.length} merged, ${consolidation.skipped.length} skipped)`] : [];
12271
+ return [
12272
+ "\u2705 Danya project initialized! (single-project mode)",
12273
+ "",
12274
+ `Detected: engine=${detection.engine ?? "none"}, server=${detection.serverLanguage ?? "none"}`,
12275
+ `Guard rules: ${guardRules.length} forbidden zone patterns`,
12276
+ `Harness files: ${installed.length} files installed`,
12277
+ "",
12278
+ "Created:",
12279
+ ` .danya/rules/ \u2014 ${countFiles(installed, "rules/")} constraint files`,
12280
+ ` .danya/commands/ \u2014 ${countFiles(installed, "commands/")} workflow commands`,
12281
+ ` .danya/memory/ \u2014 ${countFiles(installed, "memory/")} knowledge files`,
12282
+ ` .danya/hooks/ \u2014 ${countFiles(installed, "hooks/")} hook scripts`,
12283
+ " .danya/gate-chain.json \u2014 Gate chain configuration",
12284
+ " .danya/guard-rules.json \u2014 Forbidden zone patterns",
12285
+ " .danya/settings.json \u2014 Hook registration",
12286
+ ...legacyMsg,
12287
+ "",
12288
+ "Gate chain: Edit \u2192 Guard \u2192 Syntax \u2192 Verify \u2192 Commit \u2192 Review \u2192 Push",
12289
+ "",
12290
+ "Available commands:",
12291
+ " /auto-work <req> \u2014 Full-auto pipeline (plan\u2192code\u2192verify\u2192review\u2192commit\u2192sediment\u2192evolve)",
12292
+ " /auto-bugfix <bug> \u2014 Bug reproduction + auto-fix (max 5 rounds)",
12293
+ " /review \u2014 100-point scoring code review",
12294
+ " /fix-harness \u2014 Update rules after error pattern",
12295
+ " /plan <req> \u2014 Analysis and planning",
12296
+ " /verify [level] \u2014 Mechanical verification (quick|build|full)",
12297
+ "",
12298
+ "Next steps:",
12299
+ ` 1. Customize ${instructionsFile} with your project-specific rules`,
12300
+ " 2. Review .danya/rules/ and adjust to your project",
12301
+ " 3. Start developing: danya"
12302
+ ].join("\n");
12303
+ }
12304
+ function initWorkspace(rootPath, subProjects, instructionsFile, force) {
12305
+ const rootDanyaDir = join9(rootPath, ".danya");
12306
+ const rootName = basename4(rootPath);
12307
+ const wsBundle = getWorkspaceBundle();
12308
+ const wsCtx = buildTemplateContext(rootName, null, null, instructionsFile);
12309
+ const wsInstalled = installBundle(rootDanyaDir, wsBundle, wsCtx, { force });
12310
+ writeFileSync6(join9(rootDanyaDir, "guard-rules.json"), "[]", "utf-8");
12311
+ writeFileSync6(join9(rootDanyaDir, "gate-chain.json"), JSON.stringify({
12312
+ gates: { guard: { enabled: true }, review: { enabled: true }, push: { enabled: true } }
12313
+ }, null, 2), "utf-8");
12314
+ const subResults = [];
12315
+ for (const sub of subProjects) {
12316
+ const subDanyaDir = join9(sub.path, ".danya");
12317
+ const bundle = getBundleForEngine(sub.engine, sub.serverLanguage);
12318
+ const ctx = buildTemplateContext(sub.name, sub.engine, sub.serverLanguage, instructionsFile);
12319
+ const installed = installBundle(subDanyaDir, bundle, ctx, { force });
12320
+ const guardRules = generateGuardRules(sub.engine, sub.serverLanguage);
12321
+ writeFileSync6(join9(subDanyaDir, "guard-rules.json"), JSON.stringify(guardRules, null, 2), "utf-8");
12322
+ writeFileSync6(join9(subDanyaDir, "gate-chain.json"), JSON.stringify({
12323
+ gates: { guard: { enabled: true }, syntax: { enabled: true }, verify: { enabled: true }, commit: { enabled: true }, review: { enabled: true }, push: { enabled: true, require_review: true } }
12324
+ }, null, 2), "utf-8");
12325
+ writeFileSync6(join9(subDanyaDir, "settings.json"), JSON.stringify(generateSettings(), null, 2), "utf-8");
12326
+ consolidateLegacyIntoDanya(sub.path);
12327
+ writeInstructionsFile(sub.path, instructionsFile, sub.engine, sub.serverLanguage);
12328
+ subResults.push(` ${sub.name}/ (${sub.role}): engine=${sub.engine ?? "none"}, server=${sub.serverLanguage ?? "none"}, ${installed.length} files`);
12329
+ }
12330
+ consolidateLegacyIntoDanya(rootPath);
12331
+ writeInstructionsFile(rootPath, instructionsFile, null, null);
12332
+ return [
12333
+ "\u2705 Danya workspace initialized! (multi-project mode)",
12334
+ "",
12335
+ "Workspace structure:",
12336
+ ` ${rootName}/.danya/ \u2014 Cross-project (${wsInstalled.length} files)`,
12337
+ ...subResults,
12338
+ "",
12339
+ "Three-layer isolation:",
12340
+ " Layer 1: Workspace \u2014 cross-project rules, memory, commands",
12341
+ " Layer 2: Sub-projects \u2014 engine-specific rules, hooks, commands",
12342
+ " Layer 3: Session \u2014 git worktree isolation for parallel tasks",
12343
+ "",
12344
+ "Gate chain per sub-project: Edit \u2192 Guard \u2192 Syntax \u2192 Verify \u2192 Commit \u2192 Review \u2192 Push"
12345
+ ].join("\n");
12346
+ }
12347
+ function generateSettings() {
12348
+ return {
12349
+ hooks: {
12350
+ PreToolUse: [
12351
+ { matcher: "Edit|Write", hooks: [{ type: "command", command: "bash .danya/hooks/constitution-guard.sh", timeout: 5e3 }] },
12352
+ { matcher: "Bash", commandPattern: "git\\s+commit", hooks: [{ type: "command", command: "bash .danya/hooks/pre-commit.sh", timeout: 3e5 }] },
12353
+ { matcher: "Bash", commandPattern: "git\\s+push", hooks: [{ type: "command", command: "bash .danya/hooks/push-gate.sh", timeout: 5e3 }] }
12354
+ ],
12355
+ PostToolUse: [
12356
+ { matcher: "Bash", hooks: [{ type: "command", command: "bash .danya/hooks/harness-evolution.sh", timeout: 5e3 }] },
12357
+ { matcher: "Bash", commandPattern: "git\\s+commit", hooks: [{ type: "command", command: "bash .danya/hooks/post-commit.sh", timeout: 5e3 }] },
12358
+ { matcher: "*", hooks: [{ type: "command", command: "python3 .danya/monitor/log-tool-use.py", timeout: 3e3 }] }
12359
+ ],
12360
+ Stop: [
12361
+ { matcher: "*", hooks: [{ type: "command", command: "python3 .danya/monitor/log-session-end.py", timeout: 3e3 }] }
12362
+ ]
12363
+ }
12364
+ };
12365
+ }
12366
+ function writeInstructionsFile(dir, instructionsFile, engine, serverLanguage) {
12367
+ const path5 = join9(dir, instructionsFile);
12368
+ const altFile = instructionsFile === "CLAUDE.md" ? "AGENTS.md" : "CLAUDE.md";
12369
+ if (existsSync11(path5) || existsSync11(join9(dir, altFile))) return;
12370
+ writeFileSync6(path5, generateInstructionsTemplate(engine, serverLanguage), "utf-8");
12371
+ }
12372
+ function generateInstructionsTemplate(engine, serverLanguage) {
12373
+ const lines = ["# Project Instructions", ""];
12374
+ lines.push("## Build & Test");
12375
+ if (engine === "unity") {
12376
+ lines.push("- Build: Unity Editor \u2192 File > Build Settings");
12377
+ lines.push("- Test: Window > General > Test Runner");
12378
+ } else if (engine === "unreal") {
12379
+ lines.push("- Build: UnrealBuildTool or IDE build");
12380
+ lines.push("- Test: Automation tab in Session Frontend");
12381
+ } else if (engine === "godot") {
12382
+ lines.push("- Build: godot --export-release <preset>");
12383
+ lines.push("- Test: GUT or custom test framework");
12384
+ }
12385
+ if (serverLanguage === "go") {
12386
+ lines.push("- Server build: make build");
12387
+ lines.push("- Server test: make test");
12388
+ lines.push("- Server lint: make lint");
12389
+ }
12390
+ if (!engine && !serverLanguage) {
12391
+ lines.push("- Build: <your build command>");
12392
+ lines.push("- Test: <your test command>");
12393
+ }
12394
+ lines.push("");
12395
+ lines.push("## Harness");
12396
+ lines.push("This project uses Danya harness. See .danya/ for:");
12397
+ lines.push("- rules/ \u2014 Coding constraints (auto-loaded every session)");
12398
+ lines.push("- commands/ \u2014 Workflow commands (/auto-work, /review, /fix-harness, etc.)");
12399
+ lines.push("- memory/ \u2014 Persistent domain knowledge");
12400
+ lines.push("- hooks/ \u2014 Mechanical enforcement scripts");
12401
+ lines.push("");
12402
+ lines.push("## Forbidden Zones");
12403
+ lines.push("See .danya/guard-rules.json. Hook enforced \u2014 Agent cannot bypass.");
12404
+ lines.push("");
12405
+ return lines.join("\n");
12406
+ }
12407
+ function generateGuardRules(engine, serverLanguage) {
12408
+ const rules = [];
12409
+ if (engine === "unity") {
12410
+ rules.push(
12411
+ { pattern: "Config/Gen/", description: "Auto-generated config", fix_hint: "Edit Excel \u2192 run ConfigGenerate" },
12412
+ { pattern: "Scripts/Framework/", description: "Core framework", fix_hint: "Needs programmer approval" }
12413
+ );
12414
+ }
12415
+ if (engine === "unreal") {
12416
+ rules.push(
12417
+ { pattern: "Generated/", description: "UE generated code", fix_hint: "Regenerate via UBT" },
12418
+ { pattern: "Intermediate/", description: "Build intermediates", fix_hint: "Do not edit" }
12419
+ );
12420
+ }
12421
+ if (engine === "godot") {
12422
+ rules.push(
12423
+ { pattern: "\\.import/", description: "Godot import cache", fix_hint: "Do not edit" }
12424
+ );
12425
+ }
12426
+ if (serverLanguage === "go") {
12427
+ rules.push(
12428
+ { pattern: "orm/(golang|redis|mongo)/", description: "ORM generated code", fix_hint: "Edit XML \u2192 make orm" },
12429
+ { pattern: "cfg_.*\\.go$", description: "Config generated code", fix_hint: "Edit data source \u2192 regenerate" },
12430
+ { pattern: ".*_pb\\.go$", description: "Protobuf generated", fix_hint: "Edit .proto \u2192 protoc" }
12431
+ );
12432
+ }
12433
+ return rules;
12434
+ }
12435
+ function countFiles(installed, prefix) {
12436
+ return installed.filter((f) => f.startsWith(prefix)).length;
12437
+ }
12438
+ function isClaudeModel() {
12439
+ try {
12440
+ const modelManager = getModelManager();
12441
+ const modelName = modelManager.getCurrentModel();
12442
+ return Boolean(modelName && modelName.startsWith("claude"));
12443
+ } catch {
12444
+ return false;
12445
+ }
12446
+ }
10545
12447
 
10546
- // src/constants/releaseNotes.ts
10547
- var RELEASE_NOTES = {
10548
- "0.1.178": [
10549
- "New release notes now show you what's changed since you last launched"
10550
- ]
10551
- };
12448
+ // src/ui/screens/AutoInitHarness.ts
12449
+ var hasRun = false;
12450
+ function autoInitHarness(cwd2) {
12451
+ if (hasRun) return;
12452
+ hasRun = true;
12453
+ const danyaDir = join10(cwd2, ".danya");
12454
+ if (existsSync12(danyaDir)) return;
12455
+ try {
12456
+ initDanyaProject(cwd2, false).catch(() => {
12457
+ });
12458
+ } catch {
12459
+ }
12460
+ }
10552
12461
 
10553
12462
  // src/ui/components/ProjectOnboarding.tsx
10554
- import { gt } from "semver";
10555
- init_macros();
10556
- init_product();
10557
12463
  function markProjectOnboardingComplete() {
10558
12464
  const projectConfig = getCurrentProjectConfig();
10559
12465
  if (!projectConfig.hasCompletedProjectOnboarding) {
@@ -10590,9 +12496,14 @@ function ProjectOnboarding({
10590
12496
  if (!showOnboarding && !hasReleaseNotes) {
10591
12497
  return null;
10592
12498
  }
10593
- const workspaceHasProjectGuide = existsSync8(join6(workspaceDir, PROJECT_FILE));
12499
+ const workspaceHasProjectGuide = existsSync13(join11(workspaceDir, PROJECT_FILE));
10594
12500
  const isWorkspaceDirEmpty = isDirEmpty(workspaceDir);
10595
12501
  const shouldRecommendProjectGuide = !workspaceHasProjectGuide && !isWorkspaceDirEmpty;
12502
+ React28.useEffect(() => {
12503
+ if (!isWorkspaceDirEmpty) {
12504
+ autoInitHarness(workspaceDir);
12505
+ }
12506
+ }, [workspaceDir, isWorkspaceDirEmpty]);
10596
12507
  const theme = getTheme();
10597
12508
  return /* @__PURE__ */ React28.createElement(Box21, { flexDirection: "column", gap: 1, padding: 1, paddingBottom: 0 }, showOnboarding && /* @__PURE__ */ React28.createElement(React28.Fragment, null, /* @__PURE__ */ React28.createElement(Text24, { color: theme.secondaryText }, "Tips for getting started:"), /* @__PURE__ */ React28.createElement(OrderedList, null, (() => {
10598
12509
  const items = [];
@@ -10626,31 +12537,44 @@ function ProjectOnboarding({
10626
12537
 
10627
12538
  // src/commands/init.ts
10628
12539
  init_product();
12540
+ init_state();
10629
12541
  var command2 = {
10630
12542
  type: "prompt",
10631
12543
  name: "init",
10632
- description: `Initialize a new ${PROJECT_FILE} file with codebase documentation`,
12544
+ description: `Initialize Danya harness + ${PROJECT_FILE} for this project`,
10633
12545
  isEnabled: true,
10634
12546
  isHidden: false,
10635
- progressMessage: "analyzing your codebase",
12547
+ progressMessage: "initializing harness and analyzing codebase",
10636
12548
  userFacingName() {
10637
12549
  return "init";
10638
12550
  },
10639
12551
  async getPromptForCommand(_args) {
10640
12552
  markProjectOnboardingComplete();
12553
+ let harnessResult = "";
12554
+ try {
12555
+ const force = _args.includes("--force");
12556
+ harnessResult = await initDanyaProject(getCwd(), force);
12557
+ } catch (e) {
12558
+ harnessResult = `\u26A0\uFE0F Harness init failed: ${e.message}`;
12559
+ }
10641
12560
  return [
10642
12561
  {
10643
12562
  role: "user",
10644
12563
  content: [
10645
12564
  {
10646
12565
  type: "text",
10647
- text: `Please analyze this codebase and create a ${PROJECT_FILE} file containing:
12566
+ text: `${harnessResult}
12567
+
12568
+ ---
12569
+
12570
+ Now please analyze this codebase and create a ${PROJECT_FILE} file containing:
10648
12571
  1. Build/lint/test commands - especially for running a single test
10649
12572
  2. Code style guidelines including imports, formatting, types, naming conventions, error handling, etc.
10650
12573
 
10651
12574
  The file you create will be given to agentic coding agents (such as yourself) that operate in this repository. Make it about 20 lines long.
10652
12575
  If there's already a ${PROJECT_FILE}, improve it.
10653
- If there are Cursor rules (in .cursor/rules/ or .cursorrules) or Copilot rules (in .github/copilot-instructions.md), make sure to include them.`
12576
+ If there are Cursor rules (in .cursor/rules/ or .cursorrules) or Copilot rules (in .github/copilot-instructions.md), make sure to include them.
12577
+ Also review the .danya/rules/ files and customize them based on what you learn about this codebase.`
10654
12578
  }
10655
12579
  ]
10656
12580
  }
@@ -10740,8 +12664,8 @@ function getReplStaticPrefixLength(orderedMessages, allMessages, unresolvedToolU
10740
12664
 
10741
12665
  // src/commands/messages-debug.ts
10742
12666
  init_log();
10743
- import { existsSync as existsSync9, readdirSync as readdirSync2, readFileSync as readFileSync6, statSync as statSync9 } from "fs";
10744
- import { join as join7 } from "path";
12667
+ import { existsSync as existsSync14, readdirSync as readdirSync5, readFileSync as readFileSync9, statSync as statSync12 } from "fs";
12668
+ import { join as join12 } from "path";
10745
12669
  function isDebugMode() {
10746
12670
  return process.argv.includes("--debug") || process.argv.includes("--debug-verbose");
10747
12671
  }
@@ -10772,15 +12696,15 @@ function getProgressText(message) {
10772
12696
  }
10773
12697
  function getLatestMessagesLogFile() {
10774
12698
  const dir = CACHE_PATHS.messages();
10775
- if (!existsSync9(dir)) return null;
10776
- const files = readdirSync2(dir).filter((f) => f.endsWith(".json"));
12699
+ if (!existsSync14(dir)) return null;
12700
+ const files = readdirSync5(dir).filter((f) => f.endsWith(".json"));
10777
12701
  if (files.length === 0) return null;
10778
12702
  let best = null;
10779
12703
  for (const file of files) {
10780
- const fullPath = join7(dir, file);
12704
+ const fullPath = join12(dir, file);
10781
12705
  let mtimeMs = 0;
10782
12706
  try {
10783
- mtimeMs = statSync9(fullPath).mtimeMs;
12707
+ mtimeMs = statSync12(fullPath).mtimeMs;
10784
12708
  } catch {
10785
12709
  continue;
10786
12710
  }
@@ -10891,9 +12815,9 @@ var command3 = {
10891
12815
  );
10892
12816
  const { toolUseIDs, duplicates, byID } = summarizeToolUses(normalized);
10893
12817
  const latestLog = getLatestMessagesLogFile();
10894
- const latestLogContent = latestLog && existsSync9(latestLog.path) ? (() => {
12818
+ const latestLogContent = latestLog && existsSync14(latestLog.path) ? (() => {
10895
12819
  try {
10896
- return JSON.parse(readFileSync6(latestLog.path, "utf8"));
12820
+ return JSON.parse(readFileSync9(latestLog.path, "utf8"));
10897
12821
  } catch {
10898
12822
  return null;
10899
12823
  }
@@ -11172,7 +13096,7 @@ async function createAndStoreApiKey(accessToken) {
11172
13096
  }
11173
13097
  saveGlobalConfig(config2);
11174
13098
  try {
11175
- const { resetAnthropicClient } = await import("./llm-IFU62ZT4.js");
13099
+ const { resetAnthropicClient } = await import("./llm-23Z6UDED.js");
11176
13100
  resetAnthropicClient();
11177
13101
  } catch {
11178
13102
  }
@@ -15523,7 +17447,7 @@ async function refreshPluginRuntimeFromInstalls() {
15523
17447
  const existingRoots = getSessionPlugins().map((p) => p.rootDir);
15524
17448
  const dirs = Array.from(/* @__PURE__ */ new Set([...existingRoots, ...installedRoots]));
15525
17449
  if (dirs.length === 0) return [];
15526
- const { configureSessionPlugins } = await import("./pluginRuntime-IG2H3W7C.js");
17450
+ const { configureSessionPlugins } = await import("./pluginRuntime-ZIT4IL6O.js");
15527
17451
  const { errors } = await configureSessionPlugins({ pluginDirs: dirs });
15528
17452
  return errors;
15529
17453
  }
@@ -16192,7 +18116,7 @@ async function call(onDone, context) {
16192
18116
  ModelConfig,
16193
18117
  {
16194
18118
  onClose: () => {
16195
- import("./model-PC6MMS2S.js").then(({ reloadModelManager: reloadModelManager2 }) => {
18119
+ import("./model-NIOLLP6W.js").then(({ reloadModelManager: reloadModelManager2 }) => {
16196
18120
  reloadModelManager2();
16197
18121
  triggerModelConfigChange();
16198
18122
  onDone();
@@ -16880,6 +18804,203 @@ var parallel_execute_default = {
16880
18804
  }
16881
18805
  };
16882
18806
 
18807
+ // src/commands/fix-harness.ts
18808
+ var fixHarnessCommand = {
18809
+ name: "fix-harness",
18810
+ description: "Update harness rules after an error pattern is found",
18811
+ isEnabled: true,
18812
+ isHidden: false,
18813
+ type: "prompt",
18814
+ progressMessage: "Analyzing error and updating harness rules...",
18815
+ argumentHint: "[error-description]",
18816
+ userFacingName() {
18817
+ return "fix-harness";
18818
+ },
18819
+ async getPromptForCommand(args) {
18820
+ return [
18821
+ {
18822
+ role: "user",
18823
+ content: [
18824
+ {
18825
+ type: "text",
18826
+ text: buildFixHarnessPrompt(args)
18827
+ }
18828
+ ]
18829
+ }
18830
+ ];
18831
+ }
18832
+ };
18833
+ function buildFixHarnessPrompt(errorDescription) {
18834
+ return `You are performing harness self-evolution. An error pattern was discovered during development.
18835
+
18836
+ Error description: ${errorDescription || "(Analyze recent errors in this session)"}
18837
+
18838
+ ## Process
18839
+
18840
+ 1. **Identify the error pattern**: What went wrong? What type of error is it?
18841
+
18842
+ 2. **Route to correct rule file**:
18843
+ - Forbidden zone violation \u2192 .danya/rules/constitution.md
18844
+ - Coding principle violation \u2192 .danya/rules/golden-principles.md
18845
+ - Known pitfall re-occurrence \u2192 .danya/rules/known-pitfalls.md
18846
+ - Architecture boundary violation \u2192 .danya/rules/architecture-boundaries.md
18847
+ - Style issue \u2192 engine-specific style rule file
18848
+
18849
+ 3. **Add a concise rule**:
18850
+ - \u274C What went wrong (with example)
18851
+ - \u2705 Correct approach (with example)
18852
+
18853
+ 4. **Check constraints**:
18854
+ - Is this error pattern already captured in rules? If yes, skip.
18855
+ - Total rule file lines must stay under 550. If exceeded, consolidate.
18856
+ - If mechanically checkable, note it for /verify checks.
18857
+
18858
+ 5. **Write the update**: Edit the appropriate rule file.
18859
+
18860
+ 6. **Report**: State which file was updated and what rule was added.
18861
+
18862
+ ## Important
18863
+ - Only add NEW patterns not already captured
18864
+ - Keep rules minimal: one error = one rule
18865
+ - Include correct-usage example, not just prohibition
18866
+ - Do NOT modify code files \u2014 only update .danya/rules/`;
18867
+ }
18868
+ var fix_harness_default = fixHarnessCommand;
18869
+
18870
+ // src/commands/orchestrate.ts
18871
+ var orchestrateCommand = {
18872
+ name: "orchestrate",
18873
+ description: "Auto-research iteration loop (AI codes \u2192 verify \u2192 commit/revert)",
18874
+ isEnabled: true,
18875
+ isHidden: false,
18876
+ type: "prompt",
18877
+ progressMessage: "Starting auto-research iteration...",
18878
+ argumentHint: "<task-file.md> [-n iterations]",
18879
+ userFacingName() {
18880
+ return "orchestrate";
18881
+ },
18882
+ async getPromptForCommand(args) {
18883
+ return [{
18884
+ role: "user",
18885
+ content: [{ type: "text", text: `Run the orchestrator script for autonomous iteration:
18886
+
18887
+ \`\`\`bash
18888
+ bash .danya/scripts/orchestrator.sh ${args || ".danya/templates/program-template.md"}
18889
+ \`\`\`
18890
+
18891
+ This will:
18892
+ 1. Read the task definition
18893
+ 2. Loop N iterations: AI codes \u2192 quantitative verification \u2192 commit if score \u2265 baseline, revert if not
18894
+ 3. Circuit break after 5 consecutive failures
18895
+ 4. Report final baseline score
18896
+
18897
+ If the task file doesn't exist, help the user create one using .danya/templates/program-template.md as reference.` }]
18898
+ }];
18899
+ }
18900
+ };
18901
+ var orchestrate_default = orchestrateCommand;
18902
+
18903
+ // src/commands/red-blue.ts
18904
+ var redBlueCommand = {
18905
+ name: "red-blue",
18906
+ description: "Adversarial red-blue testing loop (find bugs \u2192 fix \u2192 repeat)",
18907
+ isEnabled: true,
18908
+ isHidden: false,
18909
+ type: "prompt",
18910
+ progressMessage: "Starting red-blue adversarial loop...",
18911
+ argumentHint: "[scope-path]",
18912
+ userFacingName() {
18913
+ return "red-blue";
18914
+ },
18915
+ async getPromptForCommand(args) {
18916
+ return [{
18917
+ role: "user",
18918
+ content: [{ type: "text", text: `Run the red-blue adversarial testing loop:
18919
+
18920
+ \`\`\`bash
18921
+ bash .danya/scripts/red-blue-loop.sh ${args || "."}
18922
+ \`\`\`
18923
+
18924
+ This will:
18925
+ 1. **Red Team** (read-only): Analyze code, find all bugs (edge cases, error paths, concurrency, security)
18926
+ 2. **Blue Team** (write): Fix CRITICAL \u2192 HIGH \u2192 MEDIUM bugs with minimal changes
18927
+ 3. **Build check**: Verify fixes compile
18928
+ 4. **Loop**: Repeat until red team finds 0 bugs (max 5 rounds)
18929
+ 5. **Skill Extract**: Analyze all logs, extract patterns to .danya/rules/ and .danya/memory/
18930
+
18931
+ Agent role specs are in .danya/agents/ (red-team.md, blue-team.md, skill-extractor.md).` }]
18932
+ }];
18933
+ }
18934
+ };
18935
+ var red_blue_default = redBlueCommand;
18936
+
18937
+ // src/commands/monitor.ts
18938
+ init_state();
18939
+ import { existsSync as existsSync15, readdirSync as readdirSync6, readFileSync as readFileSync10 } from "fs";
18940
+ import { join as join13 } from "path";
18941
+ var monitorCommand = {
18942
+ name: "monitor",
18943
+ description: "View harness effectiveness metrics and data",
18944
+ isEnabled: true,
18945
+ isHidden: false,
18946
+ type: "prompt",
18947
+ progressMessage: "Analyzing harness metrics...",
18948
+ argumentHint: "[summary|tools|reviews|bugfixes|sessions] [days]",
18949
+ userFacingName() {
18950
+ return "monitor";
18951
+ },
18952
+ async getPromptForCommand(args) {
18953
+ const dataDir = join13(getCwd(), ".danya", "monitor", "data");
18954
+ const parts = args.trim().split(/\s+/);
18955
+ const metric = parts[0] || "summary";
18956
+ const days = parts[1] || "7";
18957
+ if (!existsSync15(dataDir)) {
18958
+ return [{
18959
+ role: "user",
18960
+ content: [{ type: "text", text: `No monitor data found at .danya/monitor/data/.
18961
+
18962
+ Monitor data is collected automatically via PostToolUse and Stop hooks registered in .danya/settings.json. Data will accumulate as you use Danya.
18963
+
18964
+ Available metrics: summary, tools, reviews, bugfixes, sessions
18965
+ Usage: /monitor [metric] [days]` }]
18966
+ }];
18967
+ }
18968
+ const files = readdirSync6(dataDir).filter((f) => f.endsWith(".jsonl"));
18969
+ const dataSummary = [];
18970
+ for (const file of files) {
18971
+ try {
18972
+ const lines = readFileSync10(join13(dataDir, file), "utf-8").trim().split("\n").filter(Boolean);
18973
+ dataSummary.push(`${file}: ${lines.length} entries`);
18974
+ } catch {
18975
+ }
18976
+ }
18977
+ return [{
18978
+ role: "user",
18979
+ content: [{ type: "text", text: `Analyze harness metrics from .danya/monitor/data/ for the last ${days} days.
18980
+
18981
+ Metric requested: ${metric}
18982
+
18983
+ Available data files:
18984
+ ${dataSummary.length > 0 ? dataSummary.map((s) => ` - ${s}`).join("\n") : " (no data yet)"}
18985
+
18986
+ ## Analysis Instructions
18987
+
18988
+ For **summary**: Show overview of all metrics (tool usage count, session count, avg verify time, avg review score, bug fix success rate).
18989
+
18990
+ For **tools**: Show tool usage distribution (which tools are used most).
18991
+
18992
+ For **reviews**: Show review score trends (avg, min, max, pass rate, CRITICAL count).
18993
+
18994
+ For **bugfixes**: Show bug fix efficiency (avg rounds, success rate).
18995
+
18996
+ For **sessions**: Show session count and duration.
18997
+
18998
+ Read the JSONL files, parse entries within the date range, and present a formatted summary. Each line in the JSONL files is a JSON object with a "timestamp" field (Unix epoch).` }]
18999
+ }];
19000
+ }
19001
+ };
19002
+ var monitor_default = monitorCommand;
19003
+
16883
19004
  // src/commands/rename.ts
16884
19005
  var rename = {
16885
19006
  type: "local",
@@ -18523,7 +20644,7 @@ import * as React89 from "react";
18523
20644
  // src/ui/components/permissions/file-edit-permission-request/FileEditPermissionRequest.tsx
18524
20645
  import chalk7 from "chalk";
18525
20646
  import { Box as Box55, Text as Text60, useInput as useInput19 } from "ink";
18526
- import { basename as basename2, dirname as dirname6, extname as extname7 } from "path";
20647
+ import { basename as basename5, dirname as dirname7, extname as extname8 } from "path";
18527
20648
  import React74, { useCallback as useCallback8, useMemo as useMemo13 } from "react";
18528
20649
 
18529
20650
  // src/ui/hooks/usePermissionRequestLogging.ts
@@ -18603,11 +20724,11 @@ function PermissionRequestTitle({
18603
20724
 
18604
20725
  // src/ui/components/permissions/file-edit-permission-request/FileEditToolDiff.tsx
18605
20726
  import * as React73 from "react";
18606
- import { existsSync as existsSync10, readFileSync as readFileSync7 } from "fs";
20727
+ import { existsSync as existsSync16, readFileSync as readFileSync11 } from "fs";
18607
20728
  import { useMemo as useMemo12 } from "react";
18608
20729
  import { Box as Box54, Text as Text59 } from "ink";
18609
20730
  init_state();
18610
- import { relative as relative11 } from "path";
20731
+ import { relative as relative13 } from "path";
18611
20732
  function FileEditToolDiff({
18612
20733
  file_path,
18613
20734
  new_string,
@@ -18617,7 +20738,7 @@ function FileEditToolDiff({
18617
20738
  width
18618
20739
  }) {
18619
20740
  const file = useMemo12(
18620
- () => existsSync10(file_path) ? readFileSync7(file_path, "utf8") : "",
20741
+ () => existsSync16(file_path) ? readFileSync11(file_path, "utf8") : "",
18621
20742
  [file_path]
18622
20743
  );
18623
20744
  const patch = useMemo12(
@@ -18637,7 +20758,7 @@ function FileEditToolDiff({
18637
20758
  flexDirection: "column",
18638
20759
  paddingX: 1
18639
20760
  },
18640
- /* @__PURE__ */ React73.createElement(Box54, { paddingBottom: 1 }, /* @__PURE__ */ React73.createElement(Text59, { bold: true }, verbose ? file_path : relative11(getCwd(), file_path))),
20761
+ /* @__PURE__ */ React73.createElement(Box54, { paddingBottom: 1 }, /* @__PURE__ */ React73.createElement(Text59, { bold: true }, verbose ? file_path : relative13(getCwd(), file_path))),
18641
20762
  intersperse(
18642
20763
  patch.map((_) => /* @__PURE__ */ React73.createElement(
18643
20764
  StructuredDiff,
@@ -18694,8 +20815,8 @@ function getPermissionModeCycleShortcut() {
18694
20815
 
18695
20816
  // src/ui/components/permissions/file-edit-permission-request/FileEditPermissionRequest.tsx
18696
20817
  function getOptions(args) {
18697
- const dirPath = dirname6(args.path);
18698
- const dirName = basename2(dirPath) || "this directory";
20818
+ const dirPath = dirname7(args.path);
20819
+ const dirName = basename5(dirPath) || "this directory";
18699
20820
  const options = [
18700
20821
  {
18701
20822
  label: "Yes",
@@ -18726,7 +20847,7 @@ function FileEditPermissionRequest({
18726
20847
  const modeCycleShortcut = useMemo13(() => getPermissionModeCycleShortcut(), []);
18727
20848
  const hasSessionSuggestion = (toolUseConfirm.suggestions?.length ?? 0) > 0;
18728
20849
  const isInWorkingDir = isPathInWorkingDirectories(
18729
- dirname6(file_path),
20850
+ dirname7(file_path),
18730
20851
  toolPermissionContext
18731
20852
  );
18732
20853
  const unaryEvent = useMemo13(
@@ -18836,7 +20957,7 @@ function FileEditPermissionRequest({
18836
20957
  width: columns - 12
18837
20958
  }
18838
20959
  ),
18839
- /* @__PURE__ */ React74.createElement(Box55, { flexDirection: "column" }, /* @__PURE__ */ React74.createElement(Text60, null, "Do you want to make this edit to", " ", /* @__PURE__ */ React74.createElement(Text60, { bold: true }, basename2(file_path)), "?"), /* @__PURE__ */ React74.createElement(
20960
+ /* @__PURE__ */ React74.createElement(Box55, { flexDirection: "column" }, /* @__PURE__ */ React74.createElement(Text60, null, "Do you want to make this edit to", " ", /* @__PURE__ */ React74.createElement(Text60, { bold: true }, basename5(file_path)), "?"), /* @__PURE__ */ React74.createElement(
18840
20961
  Select,
18841
20962
  {
18842
20963
  options: getOptions({
@@ -18851,7 +20972,7 @@ function FileEditPermissionRequest({
18851
20972
  );
18852
20973
  }
18853
20974
  async function extractLanguageName(file_path) {
18854
- const ext = extname7(file_path);
20975
+ const ext = extname8(file_path);
18855
20976
  if (!ext) {
18856
20977
  return "unknown";
18857
20978
  }
@@ -19217,31 +21338,31 @@ function useNotifyAfterTimeout(message, timeout = DEFAULT_INTERACTION_THRESHOLD_
19217
21338
  // src/ui/components/permissions/file-write-permission-request/FileWritePermissionRequest.tsx
19218
21339
  import { Box as Box59, Text as Text64, useInput as useInput20 } from "ink";
19219
21340
  import React78, { useCallback as useCallback9, useMemo as useMemo17 } from "react";
19220
- import { basename as basename3, dirname as dirname7, extname as extname9 } from "path";
21341
+ import { basename as basename6, dirname as dirname8, extname as extname10 } from "path";
19221
21342
  init_env();
19222
- import { existsSync as existsSync12 } from "fs";
21343
+ import { existsSync as existsSync18 } from "fs";
19223
21344
  import chalk10 from "chalk";
19224
21345
 
19225
21346
  // src/ui/components/permissions/file-write-permission-request/FileWriteToolDiff.tsx
19226
21347
  import * as React77 from "react";
19227
- import { existsSync as existsSync11, readFileSync as readFileSync8 } from "fs";
21348
+ import { existsSync as existsSync17, readFileSync as readFileSync12 } from "fs";
19228
21349
  import { useMemo as useMemo16 } from "react";
19229
21350
  import { Box as Box58, Text as Text63 } from "ink";
19230
21351
  init_state();
19231
- import { extname as extname8, relative as relative12 } from "path";
21352
+ import { extname as extname9, relative as relative14 } from "path";
19232
21353
  function FileWriteToolDiff({
19233
21354
  file_path,
19234
21355
  content,
19235
21356
  verbose,
19236
21357
  width
19237
21358
  }) {
19238
- const fileExists = useMemo16(() => existsSync11(file_path), [file_path]);
21359
+ const fileExists = useMemo16(() => existsSync17(file_path), [file_path]);
19239
21360
  const oldContent = useMemo16(() => {
19240
21361
  if (!fileExists) {
19241
21362
  return "";
19242
21363
  }
19243
21364
  const enc = detectFileEncoding(file_path);
19244
- return readFileSync8(file_path, enc);
21365
+ return readFileSync12(file_path, enc);
19245
21366
  }, [file_path, fileExists]);
19246
21367
  const hunks = useMemo16(() => {
19247
21368
  if (!fileExists) {
@@ -19262,7 +21383,7 @@ function FileWriteToolDiff({
19262
21383
  flexDirection: "column",
19263
21384
  paddingX: 1
19264
21385
  },
19265
- /* @__PURE__ */ React77.createElement(Box58, { paddingBottom: 1 }, /* @__PURE__ */ React77.createElement(Text63, { bold: true }, verbose ? file_path : relative12(getCwd(), file_path))),
21386
+ /* @__PURE__ */ React77.createElement(Box58, { paddingBottom: 1 }, /* @__PURE__ */ React77.createElement(Text63, { bold: true }, verbose ? file_path : relative14(getCwd(), file_path))),
19266
21387
  hunks ? intersperse(
19267
21388
  hunks.map((_) => /* @__PURE__ */ React77.createElement(
19268
21389
  StructuredDiff,
@@ -19278,7 +21399,7 @@ function FileWriteToolDiff({
19278
21399
  HighlightedCode,
19279
21400
  {
19280
21401
  code: content || "(No content)",
19281
- language: extname8(file_path).slice(1)
21402
+ language: extname9(file_path).slice(1)
19282
21403
  }
19283
21404
  )
19284
21405
  );
@@ -19295,18 +21416,18 @@ function FileWritePermissionRequest({
19295
21416
  const modeCycleShortcut = useMemo17(() => getPermissionModeCycleShortcut(), []);
19296
21417
  const hasSessionSuggestion = (toolUseConfirm.suggestions?.length ?? 0) > 0;
19297
21418
  const isInWorkingDir = isPathInWorkingDirectories(
19298
- dirname7(file_path),
21419
+ dirname8(file_path),
19299
21420
  toolPermissionContext
19300
21421
  );
19301
21422
  const sessionLabel = useMemo17(() => {
19302
- const dirPath = dirname7(file_path);
19303
- const dirName = basename3(dirPath) || "this directory";
21423
+ const dirPath = dirname8(file_path);
21424
+ const dirName = basename6(dirPath) || "this directory";
19304
21425
  const shortcutHint = chalk10.bold.hex(getTheme().warning)(
19305
21426
  `(${modeCycleShortcut.displayText})`
19306
21427
  );
19307
21428
  return isInWorkingDir ? `Yes, allow all edits during this session ${shortcutHint}` : `Yes, allow all edits in ${chalk10.bold(`${dirName}/`)} during this session ${shortcutHint}`;
19308
21429
  }, [file_path, isInWorkingDir, modeCycleShortcut.displayText]);
19309
- const fileExists = useMemo17(() => existsSync12(file_path), [file_path]);
21430
+ const fileExists = useMemo17(() => existsSync18(file_path), [file_path]);
19310
21431
  const unaryEvent = useMemo17(
19311
21432
  () => ({
19312
21433
  completion_type: "write_file_single",
@@ -19414,7 +21535,7 @@ function FileWritePermissionRequest({
19414
21535
  width: columns - 12
19415
21536
  }
19416
21537
  )),
19417
- /* @__PURE__ */ React78.createElement(Box59, { flexDirection: "column" }, /* @__PURE__ */ React78.createElement(Text64, null, "Do you want to ", fileExists ? "make this edit to" : "create", " ", /* @__PURE__ */ React78.createElement(Text64, { bold: true }, basename3(file_path)), "?"), /* @__PURE__ */ React78.createElement(
21538
+ /* @__PURE__ */ React78.createElement(Box59, { flexDirection: "column" }, /* @__PURE__ */ React78.createElement(Text64, null, "Do you want to ", fileExists ? "make this edit to" : "create", " ", /* @__PURE__ */ React78.createElement(Text64, { bold: true }, basename6(file_path)), "?"), /* @__PURE__ */ React78.createElement(
19418
21539
  Select,
19419
21540
  {
19420
21541
  options: [
@@ -19439,7 +21560,7 @@ function FileWritePermissionRequest({
19439
21560
  );
19440
21561
  }
19441
21562
  async function extractLanguageName2(file_path) {
19442
- const ext = extname9(file_path);
21563
+ const ext = extname10(file_path);
19443
21564
  if (!ext) {
19444
21565
  return "unknown";
19445
21566
  }
@@ -19453,8 +21574,8 @@ import React79, { useCallback as useCallback10, useMemo as useMemo18 } from "rea
19453
21574
  init_env();
19454
21575
  import chalk11 from "chalk";
19455
21576
  init_state();
19456
- import { basename as basename4, dirname as dirname8 } from "path";
19457
- import { statSync as statSync10 } from "fs";
21577
+ import { basename as basename7, dirname as dirname9 } from "path";
21578
+ import { statSync as statSync13 } from "fs";
19458
21579
  function pathArgNameForToolUse(toolUseConfirm) {
19459
21580
  switch (toolUseConfirm.tool) {
19460
21581
  case FileWriteTool:
@@ -19483,11 +21604,11 @@ function isMultiFile(toolUseConfirm) {
19483
21604
  }
19484
21605
  function pathToPermissionDirectory2(path5) {
19485
21606
  try {
19486
- const stats = statSync10(path5);
21607
+ const stats = statSync13(path5);
19487
21608
  if (stats.isDirectory()) return path5;
19488
21609
  } catch {
19489
21610
  }
19490
- return dirname8(path5);
21611
+ return dirname9(path5);
19491
21612
  }
19492
21613
  function pathFromToolUse(toolUseConfirm) {
19493
21614
  const pathArgName = pathArgNameForToolUse(toolUseConfirm);
@@ -19530,7 +21651,7 @@ function FilesystemPermissionRequest({
19530
21651
  function getDontAskAgainOptions(toolUseConfirm, path5, modeCycleShortcut, isInWorkingDir, hasSessionSuggestion) {
19531
21652
  if (!hasSessionSuggestion) return [];
19532
21653
  const permissionDirPath = pathToPermissionDirectory2(path5);
19533
- const permissionDirName = basename4(permissionDirPath) || "this directory";
21654
+ const permissionDirName = basename7(permissionDirPath) || "this directory";
19534
21655
  if (toolUseConfirm.tool.isReadOnly(toolUseConfirm.input)) {
19535
21656
  const label2 = isInWorkingDir ? "Yes, during this session" : `Yes, allow reading from ${chalk11.bold(`${permissionDirName}/`)} during this session`;
19536
21657
  return [{ label: label2, value: "yes-session" }];
@@ -20565,9 +22686,9 @@ init_planMode();
20565
22686
 
20566
22687
  // src/utils/system/externalEditor.ts
20567
22688
  import { spawn, spawnSync } from "child_process";
20568
- import { mkdtempSync, readFileSync as readFileSync9, rmSync, writeFileSync as writeFileSync4 } from "fs";
22689
+ import { mkdtempSync, readFileSync as readFileSync13, rmSync, writeFileSync as writeFileSync7 } from "fs";
20569
22690
  import { tmpdir } from "os";
20570
- import { join as join8 } from "path";
22691
+ import { join as join14 } from "path";
20571
22692
  var isWindows = process.platform === "win32";
20572
22693
  function isCommandAvailable(command4) {
20573
22694
  const checker = isWindows ? "where" : "which";
@@ -20643,9 +22764,9 @@ async function launchExternalEditor(initialText) {
20643
22764
  )
20644
22765
  };
20645
22766
  }
20646
- const dir = mkdtempSync(join8(tmpdir(), "kode-edit-"));
20647
- const filePath = join8(dir, "message.txt");
20648
- writeFileSync4(filePath, initialText, "utf-8");
22767
+ const dir = mkdtempSync(join14(tmpdir(), "kode-edit-"));
22768
+ const filePath = join14(dir, "message.txt");
22769
+ writeFileSync7(filePath, initialText, "utf-8");
20649
22770
  const wasRaw = Boolean(process.stdin.isTTY && process.stdin.isRaw);
20650
22771
  if (process.stdin.isTTY) {
20651
22772
  process.stdin.pause();
@@ -20687,7 +22808,7 @@ async function launchExternalEditor(initialText) {
20687
22808
  }
20688
22809
  restoreStdinState(wasRaw);
20689
22810
  try {
20690
- const edited = normalizeNewlines(readFileSync9(filePath, "utf-8"));
22811
+ const edited = normalizeNewlines(readFileSync13(filePath, "utf-8"));
20691
22812
  rmSync(dir, { recursive: true, force: true });
20692
22813
  return { text: edited, editorLabel: editorCommand.displayName };
20693
22814
  } catch (error) {
@@ -20752,7 +22873,7 @@ async function launchExternalEditorForFilePath(filePath) {
20752
22873
  }
20753
22874
 
20754
22875
  // src/ui/components/permissions/plan-mode-permission-request/ExitPlanModePermissionRequest.tsx
20755
- import { writeFileSync as writeFileSync5 } from "fs";
22876
+ import { writeFileSync as writeFileSync8 } from "fs";
20756
22877
  function getExitPlanModeOptions(args) {
20757
22878
  const options = [];
20758
22879
  options.push(
@@ -20838,7 +22959,7 @@ function ExitPlanModePermissionRequest({
20838
22959
  if (!planExists) {
20839
22960
  const initial = planText === planPlaceholder() ? "# Plan\n" : planText;
20840
22961
  try {
20841
- writeFileSync5(planFilePath, initial, "utf-8");
22962
+ writeFileSync8(planFilePath, initial, "utf-8");
20842
22963
  } catch {
20843
22964
  const edited = await launchExternalEditor(initial);
20844
22965
  if (edited.text !== null) {
@@ -21708,8 +23829,8 @@ function getCompletionContext(args) {
21708
23829
  }
21709
23830
 
21710
23831
  // src/utils/completion/fileSuggestions.ts
21711
- import { existsSync as existsSync13, readdirSync as readdirSync3, statSync as statSync11 } from "fs";
21712
- import { basename as basename5, dirname as dirname9, join as join9, resolve as resolve9 } from "path";
23832
+ import { existsSync as existsSync19, readdirSync as readdirSync7, statSync as statSync14 } from "fs";
23833
+ import { basename as basename8, dirname as dirname10, join as join15, resolve as resolve9 } from "path";
21713
23834
  function generateFileSuggestions(args) {
21714
23835
  const { prefix, cwd: cwd2 } = args;
21715
23836
  try {
@@ -21725,35 +23846,35 @@ function generateFileSuggestions(args) {
21725
23846
  searchPath = resolve9(cwd2, userPath);
21726
23847
  }
21727
23848
  const endsWithSlash = userPath.endsWith("/");
21728
- const searchStat = existsSync13(searchPath) ? statSync11(searchPath) : null;
23849
+ const searchStat = existsSync19(searchPath) ? statSync14(searchPath) : null;
21729
23850
  let searchDir;
21730
23851
  let nameFilter;
21731
23852
  if (endsWithSlash || searchStat?.isDirectory()) {
21732
23853
  searchDir = searchPath;
21733
23854
  nameFilter = "";
21734
23855
  } else {
21735
- searchDir = dirname9(searchPath);
21736
- nameFilter = basename5(searchPath);
23856
+ searchDir = dirname10(searchPath);
23857
+ nameFilter = basename8(searchPath);
21737
23858
  }
21738
- if (!existsSync13(searchDir)) return [];
23859
+ if (!existsSync19(searchDir)) return [];
21739
23860
  const showHidden = nameFilter.startsWith(".") || userPath.includes("/.");
21740
- const entries = readdirSync3(searchDir).filter((entry) => {
23861
+ const entries = readdirSync7(searchDir).filter((entry) => {
21741
23862
  if (!showHidden && entry.startsWith(".")) return false;
21742
23863
  if (nameFilter && !entry.toLowerCase().startsWith(nameFilter.toLowerCase()))
21743
23864
  return false;
21744
23865
  return true;
21745
23866
  }).sort((a, b) => {
21746
- const aPath = join9(searchDir, a);
21747
- const bPath = join9(searchDir, b);
21748
- const aIsDir = statSync11(aPath).isDirectory();
21749
- const bIsDir = statSync11(bPath).isDirectory();
23867
+ const aPath = join15(searchDir, a);
23868
+ const bPath = join15(searchDir, b);
23869
+ const aIsDir = statSync14(aPath).isDirectory();
23870
+ const bIsDir = statSync14(bPath).isDirectory();
21750
23871
  if (aIsDir && !bIsDir) return -1;
21751
23872
  if (!aIsDir && bIsDir) return 1;
21752
23873
  return a.toLowerCase().localeCompare(b.toLowerCase());
21753
23874
  }).slice(0, 25);
21754
23875
  return entries.map((entry) => {
21755
- const entryPath = join9(searchDir, entry);
21756
- const isDir = statSync11(entryPath).isDirectory();
23876
+ const entryPath = join15(searchDir, entry);
23877
+ const isDir = statSync14(entryPath).isDirectory();
21757
23878
  const icon = isDir ? "\u{1F4C1}" : "\u{1F4C4}";
21758
23879
  let value;
21759
23880
  if (userPath.includes("/")) {
@@ -23086,19 +25207,19 @@ function useUnifiedCompletion({
23086
25207
  if (systemCommands.length > 0 || isLoadingCommands) return;
23087
25208
  setIsLoadingCommands(true);
23088
25209
  try {
23089
- const { readdirSync: readdirSync4, statSync: statSync13 } = await import("fs");
25210
+ const { readdirSync: readdirSync8, statSync: statSync16 } = await import("fs");
23090
25211
  const pathDirs = (process.env.PATH || "").split(":").filter(Boolean);
23091
25212
  const commandSet = /* @__PURE__ */ new Set();
23092
25213
  const essentialCommands = getEssentialCommands();
23093
25214
  essentialCommands.forEach((cmd) => commandSet.add(cmd));
23094
25215
  for (const dir of pathDirs) {
23095
25216
  try {
23096
- if (readdirSync4 && statSync13) {
23097
- const entries = readdirSync4(dir);
25217
+ if (readdirSync8 && statSync16) {
25218
+ const entries = readdirSync8(dir);
23098
25219
  for (const entry of entries) {
23099
25220
  try {
23100
25221
  const fullPath = `${dir}/${entry}`;
23101
- const stats = statSync13(fullPath);
25222
+ const stats = statSync16(fullPath);
23102
25223
  if (stats.isFile() && (stats.mode & 73) !== 0) {
23103
25224
  commandSet.add(entry);
23104
25225
  }
@@ -23632,17 +25753,17 @@ function TokenWarning({ tokenUsage }) {
23632
25753
 
23633
25754
  // src/utils/commands/hashCommand.ts
23634
25755
  init_log();
23635
- import { join as join10 } from "path";
23636
- import { readFileSync as readFileSync10, writeFileSync as writeFileSync6 } from "fs";
25756
+ import { join as join16 } from "path";
25757
+ import { readFileSync as readFileSync14, writeFileSync as writeFileSync9 } from "fs";
23637
25758
  function handleHashCommand(interpreted) {
23638
25759
  try {
23639
25760
  const cwd2 = process.cwd();
23640
- const agentsPath = join10(cwd2, "AGENTS.md");
23641
- const legacyPath = join10(cwd2, "CLAUDE.md");
25761
+ const agentsPath = join16(cwd2, "AGENTS.md");
25762
+ const legacyPath = join16(cwd2, "CLAUDE.md");
23642
25763
  const filesToUpdate = [];
23643
25764
  filesToUpdate.push({ path: agentsPath, name: "AGENTS.md" });
23644
25765
  try {
23645
- readFileSync10(legacyPath, "utf-8");
25766
+ readFileSync14(legacyPath, "utf-8");
23646
25767
  filesToUpdate.push({ path: legacyPath, name: "CLAUDE.md" });
23647
25768
  } catch {
23648
25769
  }
@@ -23657,12 +25778,12 @@ _Added on ${now.toLocaleString()} ${timezone}_`;
23657
25778
  try {
23658
25779
  let existingContent = "";
23659
25780
  try {
23660
- existingContent = readFileSync10(file.path, "utf-8").trim();
25781
+ existingContent = readFileSync14(file.path, "utf-8").trim();
23661
25782
  } catch {
23662
25783
  }
23663
25784
  const separator = existingContent ? "\n\n" : "";
23664
25785
  const newContent = `${existingContent}${separator}${interpreted}${timestamp}`;
23665
- writeFileSync6(file.path, newContent, "utf-8");
25786
+ writeFileSync9(file.path, newContent, "utf-8");
23666
25787
  updatedFiles.push(file.name);
23667
25788
  } catch (error) {
23668
25789
  logError(error);
@@ -23862,7 +25983,7 @@ function useStatusLine() {
23862
25983
  // src/ui/components/PromptInput.tsx
23863
25984
  async function interpretHashCommand(input) {
23864
25985
  try {
23865
- const { queryQuick: queryQuick2 } = await import("./llm-IFU62ZT4.js");
25986
+ const { queryQuick: queryQuick2 } = await import("./llm-23Z6UDED.js");
23866
25987
  const systemPrompt = [
23867
25988
  "You're helping the user structure notes that will be added to their KODING.md file.",
23868
25989
  "Format the user's input into a well-structured note that will be useful for later reference.",
@@ -24175,7 +26296,7 @@ function PromptInput({
24175
26296
  if (messages2.length) {
24176
26297
  if (mode === "bash") {
24177
26298
  onQuery(messages2, newAbortController).then(async () => {
24178
- const { getCwd: getCwd2 } = await import("./state-5OBXGJX6.js");
26299
+ const { getCwd: getCwd2 } = await import("./state-YAYMHZAZ.js");
24179
26300
  setCurrentPwd(getCwd2());
24180
26301
  });
24181
26302
  } else {
@@ -24894,35 +27015,121 @@ async function selectAndReadFiles() {
24894
27015
 
24895
27016
  // src/utils/session/autoCompactCore.ts
24896
27017
  init_log();
24897
- async function getMainConversationContextLimit() {
24898
- try {
24899
- const modelManager = getModelManager();
24900
- const resolution = modelManager.resolveModelWithInfo("main");
24901
- const modelProfile = resolution.success ? resolution.profile : null;
24902
- if (modelProfile?.contextLength) {
24903
- return modelProfile.contextLength;
27018
+
27019
+ // src/services/compact/compact.ts
27020
+ var DEFAULT_COMPACTION_CONFIG = {
27021
+ triggerThresholdPercent: 90,
27022
+ targetPercent: 60,
27023
+ preserveRecentMessages: 4,
27024
+ enabled: true
27025
+ };
27026
+ function groupMessages(messages, preserveCount) {
27027
+ const groups = [];
27028
+ const preserveStartIdx = Math.max(0, messages.length - preserveCount);
27029
+ let currentGroup = null;
27030
+ for (let i = 0; i < messages.length; i++) {
27031
+ const msg = messages[i];
27032
+ const isPreserved = i >= preserveStartIdx;
27033
+ if (isPreserved) {
27034
+ if (currentGroup) {
27035
+ groups.push(currentGroup);
27036
+ currentGroup = null;
27037
+ }
27038
+ groups.push({
27039
+ type: "preserved",
27040
+ messages: [msg],
27041
+ totalTokens: msg.tokens
27042
+ });
27043
+ continue;
27044
+ }
27045
+ if (msg.type === "tool_result") {
27046
+ if (currentGroup?.type === "tool_use") {
27047
+ currentGroup.messages.push(msg);
27048
+ currentGroup.totalTokens += msg.tokens;
27049
+ } else {
27050
+ if (currentGroup) groups.push(currentGroup);
27051
+ currentGroup = {
27052
+ type: "tool_use",
27053
+ messages: [msg],
27054
+ totalTokens: msg.tokens
27055
+ };
27056
+ }
27057
+ } else if (msg.type === "system") {
27058
+ if (currentGroup) groups.push(currentGroup);
27059
+ groups.push({
27060
+ type: "system",
27061
+ messages: [msg],
27062
+ totalTokens: msg.tokens
27063
+ });
27064
+ currentGroup = null;
27065
+ } else {
27066
+ if (currentGroup?.type === "conversation") {
27067
+ currentGroup.messages.push(msg);
27068
+ currentGroup.totalTokens += msg.tokens;
27069
+ } else {
27070
+ if (currentGroup) groups.push(currentGroup);
27071
+ currentGroup = {
27072
+ type: msg.type === "assistant" ? "tool_use" : "conversation",
27073
+ messages: [msg],
27074
+ totalTokens: msg.tokens
27075
+ };
27076
+ }
27077
+ }
27078
+ }
27079
+ if (currentGroup) groups.push(currentGroup);
27080
+ return groups;
27081
+ }
27082
+ function calculateCompactionTarget(currentTokens, contextWindowSize, config2 = DEFAULT_COMPACTION_CONFIG) {
27083
+ const targetTokens = Math.floor(contextWindowSize * (config2.targetPercent / 100));
27084
+ return Math.max(0, currentTokens - targetTokens);
27085
+ }
27086
+ function selectGroupsForCompaction(groups, tokensToFree) {
27087
+ const toCompact = [];
27088
+ const toKeep = [];
27089
+ let freed = 0;
27090
+ for (const group of groups) {
27091
+ if (group.type === "preserved") {
27092
+ toKeep.push(group);
27093
+ continue;
27094
+ }
27095
+ if (freed < tokensToFree) {
27096
+ toCompact.push(group);
27097
+ freed += group.totalTokens;
27098
+ } else {
27099
+ toKeep.push(group);
27100
+ }
27101
+ }
27102
+ return { toCompact, toKeep };
27103
+ }
27104
+ function buildCompactionPrompt(groups) {
27105
+ const lines = [];
27106
+ for (const group of groups) {
27107
+ for (const msg of group.messages) {
27108
+ const prefix = msg.type === "user" ? "User" : msg.type === "assistant" ? "Assistant" : "System";
27109
+ const content = msg.content.length > 2e3 ? msg.content.slice(0, 2e3) + "... (truncated)" : msg.content;
27110
+ lines.push(`[${prefix}]: ${content}`);
24904
27111
  }
24905
- return 2e5;
24906
- } catch (error) {
24907
- return 2e5;
24908
27112
  }
27113
+ return lines.join("\n\n");
24909
27114
  }
24910
- var COMPRESSION_PROMPT2 = `Please provide a comprehensive summary of our conversation structured as follows:
27115
+
27116
+ // src/utils/session/autoCompactCore.ts
27117
+ var COMPRESSION_PROMPT2 = `Please provide a comprehensive summary of the following conversation history, structured as follows:
24911
27118
 
24912
27119
  ## Technical Context
24913
27120
  Development environment, tools, frameworks, and configurations in use. Programming languages, libraries, and technical constraints. File structure, directory organization, and project architecture.
24914
27121
 
24915
- ## Project Overview
27122
+ ## Project Overview
24916
27123
  Main project goals, features, and scope. Key components, modules, and their relationships. Data models, APIs, and integration patterns.
24917
27124
 
24918
27125
  ## Code Changes
24919
- Files created, modified, or analyzed during our conversation. Specific code implementations, functions, and algorithms added. Configuration changes and structural modifications.
27126
+ Files created, modified, or analyzed during the conversation. Specific code implementations, functions, and algorithms added. Configuration changes and structural modifications.
24920
27127
 
24921
27128
  ## Debugging & Issues
24922
27129
  Problems encountered and their root causes. Solutions implemented and their effectiveness. Error messages, logs, and diagnostic information.
24923
27130
 
24924
27131
  ## Current Status
24925
- What we just completed successfully. Current state of the codebase and any ongoing work. Test results, validation steps, and verification performed.
27132
+ What was most recently completed. Current state of the codebase and any ongoing work. Test results, validation steps, and verification performed.
24926
27133
 
24927
27134
  ## Pending Tasks
24928
27135
  Immediate next steps and priorities. Planned features, improvements, and refactoring. Known issues, technical debt, and areas needing attention.
@@ -24934,14 +27141,22 @@ Coding style, formatting, and organizational preferences. Communication patterns
24934
27141
  Important technical decisions made and their rationale. Alternative approaches considered and why they were rejected. Trade-offs accepted and their implications.
24935
27142
 
24936
27143
  Focus on information essential for continuing the conversation effectively, including specific details about code, files, errors, and plans.`;
24937
- async function calculateThresholds(tokenCount) {
24938
- const contextLimit = await getMainConversationContextLimit();
24939
- return calculateAutoCompactThresholds(tokenCount, contextLimit);
27144
+ async function getMainConversationContextLimit() {
27145
+ try {
27146
+ const modelManager = getModelManager();
27147
+ const resolution = modelManager.resolveModelWithInfo("main");
27148
+ const modelProfile = resolution.success ? resolution.profile : null;
27149
+ if (modelProfile?.contextLength) return modelProfile.contextLength;
27150
+ return 2e5;
27151
+ } catch {
27152
+ return 2e5;
27153
+ }
24940
27154
  }
24941
27155
  async function shouldAutoCompact(messages) {
24942
27156
  if (messages.length < 3) return false;
24943
27157
  const tokenCount = countTokens(messages);
24944
- const { isAboveAutoCompactThreshold } = await calculateThresholds(tokenCount);
27158
+ const contextLimit = await getMainConversationContextLimit();
27159
+ const { isAboveAutoCompactThreshold } = calculateAutoCompactThresholds(tokenCount, contextLimit);
24945
27160
  return isAboveAutoCompactThreshold;
24946
27161
  }
24947
27162
  async function checkAutoCompact(messages, toolUseContext) {
@@ -24950,10 +27165,7 @@ async function checkAutoCompact(messages, toolUseContext) {
24950
27165
  }
24951
27166
  try {
24952
27167
  const compactedMessages = await executeAutoCompact(messages, toolUseContext);
24953
- return {
24954
- messages: compactedMessages,
24955
- wasCompacted: true
24956
- };
27168
+ return { messages: compactedMessages, wasCompacted: true };
24957
27169
  } catch (error) {
24958
27170
  logError(error);
24959
27171
  debug.warn("AUTO_COMPACT_FAILED", {
@@ -24963,30 +27175,53 @@ async function checkAutoCompact(messages, toolUseContext) {
24963
27175
  }
24964
27176
  }
24965
27177
  async function executeAutoCompact(messages, toolUseContext) {
24966
- const summaryRequest = createUserMessage(COMPRESSION_PROMPT2);
24967
27178
  const tokenCount = countTokens(messages);
24968
- const modelManager = getModelManager();
24969
- const compactResolution = modelManager.resolveModelWithInfo("compact");
24970
- const mainResolution = modelManager.resolveModelWithInfo("main");
24971
- let compressionModelPointer = "compact";
24972
- let compressionNotice = null;
24973
- if (!compactResolution.success || !compactResolution.profile) {
24974
- compressionModelPointer = "main";
24975
- compressionNotice = compactResolution.error || "Compression model pointer 'compact' is not configured.";
24976
- } else {
24977
- const compactBudget = Math.floor(
24978
- compactResolution.profile.contextLength * 0.9
24979
- );
24980
- if (compactBudget > 0 && tokenCount > compactBudget) {
24981
- compressionModelPointer = "main";
24982
- compressionNotice = `Compression model '${compactResolution.profile.name}' does not fit current context (~${Math.round(tokenCount / 1e3)}k tokens).`;
24983
- }
27179
+ const contextLimit = await getMainConversationContextLimit();
27180
+ const compactableMessages = messagesToCompactable(messages);
27181
+ const groups = groupMessages(compactableMessages, DEFAULT_COMPACTION_CONFIG.preserveRecentMessages);
27182
+ const tokensToFree = calculateCompactionTarget(tokenCount, contextLimit);
27183
+ const { toCompact, toKeep } = selectGroupsForCompaction(groups, tokensToFree);
27184
+ if (toCompact.length === 0) {
27185
+ return executeFullCompact(messages, toolUseContext, tokenCount);
24984
27186
  }
24985
- if (compressionModelPointer === "main" && (!mainResolution.success || !mainResolution.profile)) {
24986
- throw new Error(
24987
- mainResolution.error || "Compression fallback failed: model pointer 'main' is not configured."
24988
- );
27187
+ const conversationToSummarize = buildCompactionPrompt(toCompact);
27188
+ const { modelPointer, notice } = resolveCompressionModel(tokenCount);
27189
+ const summaryPrompt = `${COMPRESSION_PROMPT2}
27190
+
27191
+ ---
27192
+
27193
+ Conversation to summarize:
27194
+
27195
+ ${conversationToSummarize}`;
27196
+ const summaryRequest = createUserMessage(summaryPrompt);
27197
+ const summaryResponse = await queryLLM(
27198
+ normalizeMessagesForAPI([summaryRequest]),
27199
+ [
27200
+ "You are a helpful AI assistant tasked with creating comprehensive conversation summaries that preserve all essential context for continuing development work."
27201
+ ],
27202
+ 0,
27203
+ [],
27204
+ toolUseContext.abortController.signal,
27205
+ {
27206
+ safeMode: false,
27207
+ model: modelPointer,
27208
+ prependCLISysprompt: true
27209
+ }
27210
+ );
27211
+ const noticeText = notice ? `Context selectively compressed (${toCompact.length} groups summarized, ${toKeep.length} preserved). ${notice}` : `Context selectively compressed (${toCompact.length} groups summarized, ${toKeep.length} preserved).`;
27212
+ const result = await finalizeSummary(summaryResponse, noticeText);
27213
+ for (const group of toKeep) {
27214
+ for (const msg of group.messages) {
27215
+ if (msg.original) {
27216
+ result.push(msg.original);
27217
+ }
27218
+ }
24989
27219
  }
27220
+ return result;
27221
+ }
27222
+ async function executeFullCompact(messages, toolUseContext, tokenCount) {
27223
+ const { modelPointer, notice } = resolveCompressionModel(tokenCount);
27224
+ const summaryRequest = createUserMessage(COMPRESSION_PROMPT2);
24990
27225
  const summaryResponse = await queryLLM(
24991
27226
  normalizeMessagesForAPI([...messages, summaryRequest]),
24992
27227
  [
@@ -24997,16 +27232,17 @@ async function executeAutoCompact(messages, toolUseContext) {
24997
27232
  toolUseContext.abortController.signal,
24998
27233
  {
24999
27234
  safeMode: false,
25000
- model: compressionModelPointer,
27235
+ model: modelPointer,
25001
27236
  prependCLISysprompt: true
25002
27237
  }
25003
27238
  );
25004
- const content = summaryResponse.message.content;
25005
- const summary = typeof content === "string" ? content : content.length > 0 && content[0]?.type === "text" ? content[0].text : null;
27239
+ const noticeText = notice ? `Context fully compressed due to token limit. ${notice}` : `Context fully compressed due to token limit.`;
27240
+ return finalizeSummary(summaryResponse, noticeText);
27241
+ }
27242
+ async function finalizeSummary(summaryResponse, noticeText) {
27243
+ const summary = extractSummaryText(summaryResponse);
25006
27244
  if (!summary) {
25007
- throw new Error(
25008
- "Failed to generate conversation summary - response did not contain valid text content"
25009
- );
27245
+ throw new Error("Failed to generate conversation summary");
25010
27246
  }
25011
27247
  summaryResponse.message.usage = {
25012
27248
  input_tokens: 0,
@@ -25015,19 +27251,14 @@ async function executeAutoCompact(messages, toolUseContext) {
25015
27251
  cache_read_input_tokens: 0
25016
27252
  };
25017
27253
  const recoveredFiles = await selectAndReadFiles();
25018
- const compactedMessages = [
25019
- createUserMessage(
25020
- compressionNotice ? `Context automatically compressed due to token limit. ${compressionNotice} Using '${compressionModelPointer}' for compression.` : `Context automatically compressed due to token limit. Using '${compressionModelPointer}' for compression.`
25021
- ),
27254
+ const result = [
27255
+ createUserMessage(noticeText),
25022
27256
  summaryResponse
25023
27257
  ];
25024
- if (recoveredFiles.length > 0) {
25025
- for (const file of recoveredFiles) {
25026
- const contentWithLines = addLineNumbers({
25027
- content: file.content,
25028
- startLine: 1
25029
- });
25030
- const recoveryMessage = createUserMessage(
27258
+ for (const file of recoveredFiles) {
27259
+ const contentWithLines = addLineNumbers({ content: file.content, startLine: 1 });
27260
+ result.push(
27261
+ createUserMessage(
25031
27262
  `**Recovered File: ${file.path}**
25032
27263
 
25033
27264
  \`\`\`
@@ -25035,15 +27266,62 @@ ${contentWithLines}
25035
27266
  \`\`\`
25036
27267
 
25037
27268
  *Automatically recovered (${file.tokens} tokens)${file.truncated ? " [truncated]" : ""}*`
25038
- );
25039
- compactedMessages.push(recoveryMessage);
25040
- }
27269
+ )
27270
+ );
25041
27271
  }
25042
27272
  getMessagesSetter()([]);
25043
27273
  getContext.cache.clear?.();
25044
27274
  getCodeStyle.cache.clear?.();
25045
27275
  resetFileFreshnessSession();
25046
- return compactedMessages;
27276
+ return result;
27277
+ }
27278
+ function resolveCompressionModel(tokenCount) {
27279
+ const modelManager = getModelManager();
27280
+ const compactResolution = modelManager.resolveModelWithInfo("compact");
27281
+ const mainResolution = modelManager.resolveModelWithInfo("main");
27282
+ let modelPointer = "compact";
27283
+ let notice = null;
27284
+ if (!compactResolution.success || !compactResolution.profile) {
27285
+ modelPointer = "main";
27286
+ notice = compactResolution.error || "Compression model 'compact' not configured.";
27287
+ } else {
27288
+ const compactBudget = Math.floor(compactResolution.profile.contextLength * 0.9);
27289
+ if (compactBudget > 0 && tokenCount > compactBudget) {
27290
+ modelPointer = "main";
27291
+ notice = `Compression model '${compactResolution.profile.name}' can't fit context (~${Math.round(tokenCount / 1e3)}k tokens).`;
27292
+ }
27293
+ }
27294
+ if (modelPointer === "main" && (!mainResolution.success || !mainResolution.profile)) {
27295
+ throw new Error(mainResolution.error || "Compression fallback failed: 'main' not configured.");
27296
+ }
27297
+ return { modelPointer, notice };
27298
+ }
27299
+ function extractSummaryText(response) {
27300
+ const content = response.message.content;
27301
+ if (typeof content === "string") return content;
27302
+ if (Array.isArray(content)) {
27303
+ for (const block of content) {
27304
+ if (block?.type === "text" && block.text) return block.text;
27305
+ }
27306
+ }
27307
+ return null;
27308
+ }
27309
+ function messagesToCompactable(messages) {
27310
+ return messages.map((msg) => {
27311
+ let content = "";
27312
+ let type2 = "user";
27313
+ if (msg.type === "user") {
27314
+ type2 = "user";
27315
+ const rawContent = msg.message?.content;
27316
+ content = typeof rawContent === "string" ? rawContent : Array.isArray(rawContent) ? rawContent.map((c) => c.text || "").join("\n") : "";
27317
+ } else if (msg.type === "assistant") {
27318
+ type2 = "assistant";
27319
+ const rawContent = msg.message?.content;
27320
+ content = typeof rawContent === "string" ? rawContent : Array.isArray(rawContent) ? rawContent.map((c) => c.text || "").join("\n") : "";
27321
+ }
27322
+ const tokens = Math.ceil(content.length * 0.25);
27323
+ return { type: type2, content, tokens, original: msg };
27324
+ });
25047
27325
  }
25048
27326
 
25049
27327
  // src/app/query.ts
@@ -26911,7 +29189,7 @@ import React102, { useCallback as useCallback17, useEffect as useEffect26, useMe
26911
29189
  import { Box as Box76, Text as Text80, useInput as useInput30 } from "ink";
26912
29190
  import figures8 from "figures";
26913
29191
  import chalk15 from "chalk";
26914
- import { join as join12 } from "path";
29192
+ import { join as join18 } from "path";
26915
29193
  import { spawn as spawn2 } from "child_process";
26916
29194
 
26917
29195
  // src/commands/agents/tooling.ts
@@ -26953,13 +29231,13 @@ async function getAvailableTools() {
26953
29231
  // src/commands/agents/storage.ts
26954
29232
  init_state();
26955
29233
  import {
26956
- existsSync as existsSync14,
26957
- mkdirSync as mkdirSync5,
29234
+ existsSync as existsSync20,
29235
+ mkdirSync as mkdirSync8,
26958
29236
  renameSync as renameSync2,
26959
29237
  unlinkSync as unlinkSync2,
26960
- writeFileSync as writeFileSync7
29238
+ writeFileSync as writeFileSync10
26961
29239
  } from "fs";
26962
- import { join as join11 } from "path";
29240
+ import { join as join17 } from "path";
26963
29241
  import { homedir as homedir7 } from "os";
26964
29242
  init_log();
26965
29243
 
@@ -26967,7 +29245,7 @@ init_log();
26967
29245
  import { randomUUID as randomUUID5 } from "crypto";
26968
29246
  init_log();
26969
29247
  async function generateAgentWithClaude(prompt) {
26970
- const { queryModel } = await import("./llm-IFU62ZT4.js");
29248
+ const { queryModel } = await import("./llm-23Z6UDED.js");
26971
29249
  const systemPrompt = `You are an expert at creating AI agent configurations. Based on the user's description, generate a specialized agent configuration.
26972
29250
 
26973
29251
  Return your response as a JSON object with exactly these fields:
@@ -27151,26 +29429,26 @@ var LEGACY_FOLDER = ".kode";
27151
29429
  var AGENTS_DIR = "agents";
27152
29430
  function getAgentDirectory(location) {
27153
29431
  if (location === "user") {
27154
- return join11(homedir7(), PRIMARY_FOLDER, AGENTS_DIR);
29432
+ return join17(homedir7(), PRIMARY_FOLDER, AGENTS_DIR);
27155
29433
  }
27156
- return join11(getCwd(), PRIMARY_FOLDER, AGENTS_DIR);
29434
+ return join17(getCwd(), PRIMARY_FOLDER, AGENTS_DIR);
27157
29435
  }
27158
29436
  function getLegacyAgentDirectory(location) {
27159
29437
  if (location === "user") {
27160
- return join11(homedir7(), LEGACY_FOLDER, AGENTS_DIR);
29438
+ return join17(homedir7(), LEGACY_FOLDER, AGENTS_DIR);
27161
29439
  }
27162
- return join11(getCwd(), LEGACY_FOLDER, AGENTS_DIR);
29440
+ return join17(getCwd(), LEGACY_FOLDER, AGENTS_DIR);
27163
29441
  }
27164
29442
  function getPrimaryAgentFilePath(location, agentType) {
27165
- return join11(getAgentDirectory(location), `${agentType}.md`);
29443
+ return join17(getAgentDirectory(location), `${agentType}.md`);
27166
29444
  }
27167
29445
  function getLegacyAgentFilePath(location, agentType) {
27168
- return join11(getLegacyAgentDirectory(location), `${agentType}.md`);
29446
+ return join17(getLegacyAgentDirectory(location), `${agentType}.md`);
27169
29447
  }
27170
29448
  function ensureDirectoryExists(location) {
27171
29449
  const dir = getAgentDirectory(location);
27172
- if (!existsSync14(dir)) {
27173
- mkdirSync5(dir, { recursive: true });
29450
+ if (!existsSync20(dir)) {
29451
+ mkdirSync8(dir, { recursive: true });
27174
29452
  }
27175
29453
  return dir;
27176
29454
  }
@@ -27178,7 +29456,7 @@ async function saveAgent(location, agentType, description2, tools, systemPrompt,
27178
29456
  ensureDirectoryExists(location);
27179
29457
  const filePath = getPrimaryAgentFilePath(location, agentType);
27180
29458
  const legacyPath = getLegacyAgentFilePath(location, agentType);
27181
- if (throwIfExists && (existsSync14(filePath) || existsSync14(legacyPath))) {
29459
+ if (throwIfExists && (existsSync20(filePath) || existsSync20(legacyPath))) {
27182
29460
  throw new Error(`Agent file already exists: ${filePath}`);
27183
29461
  }
27184
29462
  const tempFile = `${filePath}.tmp.${Date.now()}.${Math.random().toString(36).substr(2, 9)}`;
@@ -27192,8 +29470,8 @@ async function saveAgent(location, agentType, description2, tools, systemPrompt,
27192
29470
  color
27193
29471
  );
27194
29472
  try {
27195
- writeFileSync7(tempFile, content, { encoding: "utf-8", flag: "wx" });
27196
- if (throwIfExists && (existsSync14(filePath) || existsSync14(legacyPath))) {
29473
+ writeFileSync10(tempFile, content, { encoding: "utf-8", flag: "wx" });
29474
+ if (throwIfExists && (existsSync20(filePath) || existsSync20(legacyPath))) {
27197
29475
  try {
27198
29476
  unlinkSync2(tempFile);
27199
29477
  } catch {
@@ -27203,7 +29481,7 @@ async function saveAgent(location, agentType, description2, tools, systemPrompt,
27203
29481
  renameSync2(tempFile, filePath);
27204
29482
  } catch (error) {
27205
29483
  try {
27206
- if (existsSync14(tempFile)) {
29484
+ if (existsSync20(tempFile)) {
27207
29485
  unlinkSync2(tempFile);
27208
29486
  }
27209
29487
  } catch (cleanupError) {
@@ -27231,9 +29509,9 @@ async function updateAgent(agent, description2, tools, systemPrompt, color, mode
27231
29509
  const location = agent.location;
27232
29510
  const primaryPath = getPrimaryAgentFilePath(location, agent.agentType);
27233
29511
  const legacyPath = getLegacyAgentFilePath(location, agent.agentType);
27234
- const filePath = existsSync14(primaryPath) ? primaryPath : existsSync14(legacyPath) ? legacyPath : primaryPath;
29512
+ const filePath = existsSync20(primaryPath) ? primaryPath : existsSync20(legacyPath) ? legacyPath : primaryPath;
27235
29513
  ensureDirectoryExists(location);
27236
- writeFileSync7(filePath, content, { encoding: "utf-8", flag: "w" });
29514
+ writeFileSync10(filePath, content, { encoding: "utf-8", flag: "w" });
27237
29515
  }
27238
29516
  async function deleteAgent(agent) {
27239
29517
  if (agent.location === "built-in" || agent.location === "plugin") {
@@ -27242,10 +29520,10 @@ async function deleteAgent(agent) {
27242
29520
  const location = agent.location;
27243
29521
  const primaryPath = getPrimaryAgentFilePath(location, agent.agentType);
27244
29522
  const legacyPath = getLegacyAgentFilePath(location, agent.agentType);
27245
- if (existsSync14(primaryPath)) {
29523
+ if (existsSync20(primaryPath)) {
27246
29524
  unlinkSync2(primaryPath);
27247
29525
  }
27248
- if (existsSync14(legacyPath)) {
29526
+ if (existsSync20(legacyPath)) {
27249
29527
  unlinkSync2(legacyPath);
27250
29528
  }
27251
29529
  }
@@ -28261,8 +30539,8 @@ function ViewAgent(props) {
28261
30539
  if (props.agent.source === "plugin") return `Plugin: ${props.agent.baseDir ?? "Unknown"}`;
28262
30540
  const baseDir = props.agent.baseDir;
28263
30541
  const file = `${props.agent.filename ?? props.agent.agentType}.md`;
28264
- if (props.agent.source === "projectSettings") return join12(".claude", "agents", file);
28265
- if (baseDir) return join12(baseDir, file);
30542
+ if (props.agent.source === "projectSettings") return join18(".claude", "agents", file);
30543
+ if (baseDir) return join18(baseDir, file);
28266
30544
  return props.agent.source;
28267
30545
  })();
28268
30546
  const toolsSummary = () => {
@@ -28610,6 +30888,10 @@ var COMMANDS = memoize3(() => [
28610
30888
  auto_work_default,
28611
30889
  auto_bugfix_default,
28612
30890
  parallel_execute_default,
30891
+ fix_harness_default,
30892
+ orchestrate_default,
30893
+ red_blue_default,
30894
+ monitor_default,
28613
30895
  todos_default,
28614
30896
  ...isAnthropicAuthEnabled() ? [logout_default, login_default()] : [],
28615
30897
  ...INTERNAL_ONLY_COMMANDS
@@ -30229,8 +32511,8 @@ var BashTool = {
30229
32511
  const targetDir = parts[1].replace(/^['"]|['"]$/g, "");
30230
32512
  const fullTargetDir = isAbsolute10(targetDir) ? targetDir : resolve10(getCwd(), targetDir);
30231
32513
  if (!isInDirectory(
30232
- relative13(getOriginalCwd(), fullTargetDir),
30233
- relative13(getCwd(), getOriginalCwd())
32514
+ relative15(getOriginalCwd(), fullTargetDir),
32515
+ relative15(getCwd(), getOriginalCwd())
30234
32516
  )) {
30235
32517
  return {
30236
32518
  result: false,
@@ -30636,7 +32918,7 @@ ${footerParts.join(" ")}`;
30636
32918
  for (const filePath of filePaths) {
30637
32919
  const fullFilePath = isAbsolute10(filePath) ? filePath : resolve10(getCwd(), filePath);
30638
32920
  try {
30639
- readFileTimestamps[fullFilePath] = statSync12(fullFilePath).mtimeMs;
32921
+ readFileTimestamps[fullFilePath] = statSync15(fullFilePath).mtimeMs;
30640
32922
  } catch (e) {
30641
32923
  logError(e);
30642
32924
  }
@@ -31051,8 +33333,25 @@ Game project changes must pass through verification layers before being committe
31051
33333
  ## Harness Self-Evolution
31052
33334
  When your changes cause an error and you fix it:
31053
33335
  1. Analyze the root cause
31054
- 2. Determine which rule file should be updated to prevent the same class of error
31055
- 3. Add a rule with the shortest possible statement + a correct-usage example`;
33336
+ 2. Determine which rule file should be updated (.danya/rules/) to prevent the same class of error
33337
+ 3. Route to the correct file:
33338
+ - Forbidden zone violation \u2192 constitution.md
33339
+ - Coding principle violation \u2192 golden-principles.md
33340
+ - Known pitfall \u2192 known-pitfalls.md
33341
+ - Architecture boundary \u2192 architecture-boundaries.md
33342
+ 4. Add a rule: \u274C what went wrong + \u2705 correct approach
33343
+ 5. Keep total lines per rule file under 550
33344
+ The system will prompt you when it detects an error-then-fix pattern. Cooperate by writing the rule update.
33345
+ You can also manually run /fix-harness at any time.
33346
+
33347
+ ## Subagent Dispatch
33348
+ Use subagents to avoid polluting the main context with large search/analysis results:
33349
+ - Searching 5+ files \u2192 dispatch to subagent, get back a file list
33350
+ - Analyzing large compilation output \u2192 dispatch to subagent, get back a summary
33351
+ - Exploring unfamiliar codebase area \u2192 dispatch to subagent, get back structure overview
33352
+ - Checking all event subscriptions/references \u2192 dispatch to subagent, get back unmatched pairs
33353
+ Rule of thumb: if the task needs 5+ tool calls and you only need the final result, use a subagent.
33354
+ Do NOT use subagents for: single file edits, single command execution, reading 2-3 files.`;
31056
33355
  }
31057
33356
  function getExecutingWithCareSection() {
31058
33357
  return `# Executing Actions with Care