@morphllm/morphsdk 0.2.44 → 0.2.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. package/README.md +1 -1
  2. package/dist/{chunk-TVFGHXPE.js → chunk-3FTAIJBH.js} +4 -4
  3. package/dist/chunk-5JTJOQUX.js +283 -0
  4. package/dist/chunk-5JTJOQUX.js.map +1 -0
  5. package/dist/{chunk-ZRLEAPZV.js → chunk-76DJEQEP.js} +4 -4
  6. package/dist/{chunk-W3XLPMV3.js → chunk-7HS6YXA3.js} +21 -5
  7. package/dist/{chunk-W3XLPMV3.js.map → chunk-7HS6YXA3.js.map} +1 -1
  8. package/dist/chunk-7T7YOPJV.js +82 -0
  9. package/dist/chunk-7T7YOPJV.js.map +1 -0
  10. package/dist/chunk-CL45IWIU.js +105 -0
  11. package/dist/chunk-CL45IWIU.js.map +1 -0
  12. package/dist/chunk-D6OD3IST.js +70 -0
  13. package/dist/chunk-D6OD3IST.js.map +1 -0
  14. package/dist/{chunk-PEGZVGG4.js → chunk-G4AWE5A2.js} +4 -4
  15. package/dist/{chunk-OUEJ6XEO.js → chunk-GJU7UOFL.js} +4 -4
  16. package/dist/{chunk-Q7PDN7TS.js → chunk-GZMUGMOZ.js} +1 -1
  17. package/dist/{chunk-Q7PDN7TS.js.map → chunk-GZMUGMOZ.js.map} +1 -1
  18. package/dist/chunk-JYBVRF72.js +1 -0
  19. package/dist/{chunk-EYHXBQQX.js → chunk-LVY5LPEX.js} +70 -10
  20. package/dist/chunk-LVY5LPEX.js.map +1 -0
  21. package/dist/{chunk-GDR65N2J.js → chunk-OXHGFHEU.js} +53 -26
  22. package/dist/chunk-OXHGFHEU.js.map +1 -0
  23. package/dist/{chunk-VBBJGWHY.js → chunk-P2XKFWFD.js} +2 -2
  24. package/dist/chunk-PABIV7X6.js +76 -0
  25. package/dist/chunk-PABIV7X6.js.map +1 -0
  26. package/dist/{chunk-GTOXMAF2.js → chunk-SWQPIKPY.js} +44 -3
  27. package/dist/chunk-SWQPIKPY.js.map +1 -0
  28. package/dist/chunk-TJIUA27P.js +94 -0
  29. package/dist/chunk-TJIUA27P.js.map +1 -0
  30. package/dist/{chunk-O5DA5V5S.js → chunk-UBX7QYBD.js} +4 -4
  31. package/dist/{chunk-X4CQ6D3G.js → chunk-UIZT3KVJ.js} +4 -4
  32. package/dist/{chunk-UYBIKZPM.js → chunk-UXYK7WZX.js} +2 -2
  33. package/dist/chunk-WETRQJGU.js +129 -0
  34. package/dist/chunk-WETRQJGU.js.map +1 -0
  35. package/dist/client-BGctTHu9.d.ts +318 -0
  36. package/dist/client.cjs +1954 -53
  37. package/dist/client.cjs.map +1 -1
  38. package/dist/client.d.ts +14 -110
  39. package/dist/client.js +29 -4
  40. package/dist/core-DxiUwyBe.d.ts +156 -0
  41. package/dist/git/client.cjs +52 -25
  42. package/dist/git/client.cjs.map +1 -1
  43. package/dist/git/client.d.ts +17 -8
  44. package/dist/git/client.js +1 -1
  45. package/dist/git/index.cjs +52 -25
  46. package/dist/git/index.cjs.map +1 -1
  47. package/dist/git/index.d.ts +1 -1
  48. package/dist/git/index.js +2 -2
  49. package/dist/git/types.cjs.map +1 -1
  50. package/dist/git/types.d.ts +20 -2
  51. package/dist/index.cjs +2033 -55
  52. package/dist/index.cjs.map +1 -1
  53. package/dist/index.d.ts +8 -1
  54. package/dist/index.js +48 -6
  55. package/dist/tools/browser/anthropic.cjs +1 -0
  56. package/dist/tools/browser/anthropic.cjs.map +1 -1
  57. package/dist/tools/browser/anthropic.js +1 -1
  58. package/dist/tools/browser/core.cjs +69 -9
  59. package/dist/tools/browser/core.cjs.map +1 -1
  60. package/dist/tools/browser/core.js +1 -1
  61. package/dist/tools/browser/index.cjs +69 -9
  62. package/dist/tools/browser/index.cjs.map +1 -1
  63. package/dist/tools/browser/index.js +1 -1
  64. package/dist/tools/browser/openai.cjs +1 -0
  65. package/dist/tools/browser/openai.cjs.map +1 -1
  66. package/dist/tools/browser/openai.js +1 -1
  67. package/dist/tools/browser/types.cjs.map +1 -1
  68. package/dist/tools/browser/types.d.ts +2 -0
  69. package/dist/tools/browser/vercel.cjs +1 -0
  70. package/dist/tools/browser/vercel.cjs.map +1 -1
  71. package/dist/tools/browser/vercel.js +1 -1
  72. package/dist/tools/codebase_search/anthropic.js +2 -2
  73. package/dist/tools/codebase_search/index.js +9 -9
  74. package/dist/tools/codebase_search/openai.js +2 -2
  75. package/dist/tools/codebase_search/vercel.js +2 -2
  76. package/dist/tools/fastapply/anthropic.js +2 -2
  77. package/dist/tools/fastapply/index.js +7 -7
  78. package/dist/tools/fastapply/openai.js +2 -2
  79. package/dist/tools/fastapply/vercel.js +2 -2
  80. package/dist/tools/index.js +7 -7
  81. package/dist/tools/warp_grep/agent/config.cjs +80 -1
  82. package/dist/tools/warp_grep/agent/config.cjs.map +1 -1
  83. package/dist/tools/warp_grep/agent/config.js +1 -1
  84. package/dist/tools/warp_grep/agent/parser.cjs +43 -2
  85. package/dist/tools/warp_grep/agent/parser.cjs.map +1 -1
  86. package/dist/tools/warp_grep/agent/parser.js +1 -1
  87. package/dist/tools/warp_grep/agent/prompt.cjs +89 -45
  88. package/dist/tools/warp_grep/agent/prompt.cjs.map +1 -1
  89. package/dist/tools/warp_grep/agent/prompt.d.ts +1 -1
  90. package/dist/tools/warp_grep/agent/prompt.js +1 -1
  91. package/dist/tools/warp_grep/agent/runner.cjs +229 -49
  92. package/dist/tools/warp_grep/agent/runner.cjs.map +1 -1
  93. package/dist/tools/warp_grep/agent/runner.js +4 -4
  94. package/dist/tools/warp_grep/agent/types.js +0 -1
  95. package/dist/tools/warp_grep/anthropic.cjs +311 -83
  96. package/dist/tools/warp_grep/anthropic.cjs.map +1 -1
  97. package/dist/tools/warp_grep/anthropic.d.ts +75 -12
  98. package/dist/tools/warp_grep/anthropic.js +21 -8
  99. package/dist/tools/warp_grep/index.cjs +415 -126
  100. package/dist/tools/warp_grep/index.cjs.map +1 -1
  101. package/dist/tools/warp_grep/index.d.ts +17 -4
  102. package/dist/tools/warp_grep/index.js +29 -21
  103. package/dist/tools/warp_grep/openai.cjs +314 -83
  104. package/dist/tools/warp_grep/openai.cjs.map +1 -1
  105. package/dist/tools/warp_grep/openai.d.ts +73 -29
  106. package/dist/tools/warp_grep/openai.js +21 -8
  107. package/dist/tools/warp_grep/providers/command.cjs +80 -1
  108. package/dist/tools/warp_grep/providers/command.cjs.map +1 -1
  109. package/dist/tools/warp_grep/providers/command.js +2 -2
  110. package/dist/tools/warp_grep/providers/local.cjs +80 -1
  111. package/dist/tools/warp_grep/providers/local.cjs.map +1 -1
  112. package/dist/tools/warp_grep/providers/local.js +2 -2
  113. package/dist/tools/warp_grep/vercel.cjs +291 -57
  114. package/dist/tools/warp_grep/vercel.cjs.map +1 -1
  115. package/dist/tools/warp_grep/vercel.d.ts +40 -19
  116. package/dist/tools/warp_grep/vercel.js +17 -8
  117. package/package.json +1 -1
  118. package/dist/chunk-AFEPUNAO.js +0 -15
  119. package/dist/chunk-AFEPUNAO.js.map +0 -1
  120. package/dist/chunk-EYHXBQQX.js.map +0 -1
  121. package/dist/chunk-GDR65N2J.js.map +0 -1
  122. package/dist/chunk-GTOXMAF2.js.map +0 -1
  123. package/dist/chunk-HKZB23U7.js +0 -85
  124. package/dist/chunk-HKZB23U7.js.map +0 -1
  125. package/dist/chunk-IQHKEIQX.js +0 -54
  126. package/dist/chunk-IQHKEIQX.js.map +0 -1
  127. package/dist/chunk-JKFVDM62.js +0 -45
  128. package/dist/chunk-JKFVDM62.js.map +0 -1
  129. package/dist/chunk-KL4YVZRF.js +0 -57
  130. package/dist/chunk-KL4YVZRF.js.map +0 -1
  131. package/dist/chunk-SMR2T5BT.js +0 -104
  132. package/dist/chunk-SMR2T5BT.js.map +0 -1
  133. package/dist/chunk-XYPMN4A3.js +0 -1
  134. /package/dist/{chunk-TVFGHXPE.js.map → chunk-3FTAIJBH.js.map} +0 -0
  135. /package/dist/{chunk-ZRLEAPZV.js.map → chunk-76DJEQEP.js.map} +0 -0
  136. /package/dist/{chunk-PEGZVGG4.js.map → chunk-G4AWE5A2.js.map} +0 -0
  137. /package/dist/{chunk-OUEJ6XEO.js.map → chunk-GJU7UOFL.js.map} +0 -0
  138. /package/dist/{chunk-XYPMN4A3.js.map → chunk-JYBVRF72.js.map} +0 -0
  139. /package/dist/{chunk-VBBJGWHY.js.map → chunk-P2XKFWFD.js.map} +0 -0
  140. /package/dist/{chunk-O5DA5V5S.js.map → chunk-UBX7QYBD.js.map} +0 -0
  141. /package/dist/{chunk-X4CQ6D3G.js.map → chunk-UIZT3KVJ.js.map} +0 -0
  142. /package/dist/{chunk-UYBIKZPM.js.map → chunk-UXYK7WZX.js.map} +0 -0
@@ -1,22 +1,22 @@
1
1
  import "../../chunk-X2K57BH6.js";
2
2
  import {
3
3
  anthropic_exports
4
- } from "../../chunk-TVFGHXPE.js";
4
+ } from "../../chunk-3FTAIJBH.js";
5
5
  import {
6
6
  openai_exports
7
- } from "../../chunk-PEGZVGG4.js";
7
+ } from "../../chunk-G4AWE5A2.js";
8
8
  import {
9
9
  vercel_exports
10
- } from "../../chunk-X4CQ6D3G.js";
11
- import {
12
- EDIT_FILE_SYSTEM_PROMPT,
13
- EDIT_FILE_TOOL_DESCRIPTION
14
- } from "../../chunk-63WE2C5R.js";
10
+ } from "../../chunk-UIZT3KVJ.js";
15
11
  import {
16
12
  countChanges,
17
13
  executeEditFile,
18
14
  generateUdiff
19
15
  } from "../../chunk-64PMM72R.js";
16
+ import {
17
+ EDIT_FILE_SYSTEM_PROMPT,
18
+ EDIT_FILE_TOOL_DESCRIPTION
19
+ } from "../../chunk-63WE2C5R.js";
20
20
  import "../../chunk-4VWJFZVS.js";
21
21
  import "../../chunk-PZ5AY32C.js";
22
22
  export {
@@ -5,9 +5,9 @@ import {
5
5
  formatResult,
6
6
  getSystemPrompt,
7
7
  openai_default
8
- } from "../../chunk-PEGZVGG4.js";
9
- import "../../chunk-63WE2C5R.js";
8
+ } from "../../chunk-G4AWE5A2.js";
10
9
  import "../../chunk-64PMM72R.js";
10
+ import "../../chunk-63WE2C5R.js";
11
11
  import "../../chunk-4VWJFZVS.js";
12
12
  import "../../chunk-PZ5AY32C.js";
13
13
  export {
@@ -3,9 +3,9 @@ import {
3
3
  editFileTool,
4
4
  getSystemPrompt,
5
5
  vercel_default
6
- } from "../../chunk-X4CQ6D3G.js";
7
- import "../../chunk-63WE2C5R.js";
6
+ } from "../../chunk-UIZT3KVJ.js";
8
7
  import "../../chunk-64PMM72R.js";
8
+ import "../../chunk-63WE2C5R.js";
9
9
  import "../../chunk-4VWJFZVS.js";
10
10
  import "../../chunk-PZ5AY32C.js";
11
11
  export {
@@ -1,22 +1,22 @@
1
1
  import "../chunk-X2K57BH6.js";
2
2
  import {
3
3
  anthropic_exports
4
- } from "../chunk-TVFGHXPE.js";
4
+ } from "../chunk-3FTAIJBH.js";
5
5
  import {
6
6
  openai_exports
7
- } from "../chunk-PEGZVGG4.js";
7
+ } from "../chunk-G4AWE5A2.js";
8
8
  import {
9
9
  vercel_exports
10
- } from "../chunk-X4CQ6D3G.js";
11
- import {
12
- EDIT_FILE_SYSTEM_PROMPT,
13
- EDIT_FILE_TOOL_DESCRIPTION
14
- } from "../chunk-63WE2C5R.js";
10
+ } from "../chunk-UIZT3KVJ.js";
15
11
  import {
16
12
  countChanges,
17
13
  executeEditFile,
18
14
  generateUdiff
19
15
  } from "../chunk-64PMM72R.js";
16
+ import {
17
+ EDIT_FILE_SYSTEM_PROMPT,
18
+ EDIT_FILE_TOOL_DESCRIPTION
19
+ } from "../chunk-63WE2C5R.js";
20
20
  import "../chunk-4VWJFZVS.js";
21
21
  import "../chunk-PZ5AY32C.js";
22
22
  export {
@@ -30,7 +30,86 @@ var AGENT_CONFIG = {
30
30
  MAX_ROUNDS: 10,
31
31
  TIMEOUT_MS: 3e4
32
32
  };
33
- var DEFAULT_EXCLUDES = (process.env.MORPH_WARP_GREP_EXCLUDE || "").split(",").map((s) => s.trim()).filter(Boolean).concat(["node_modules", ".git", "dist", "build", ".cache", "venv", "target"]);
33
+ var BUILTIN_EXCLUDES = [
34
+ // Version control
35
+ ".git",
36
+ ".svn",
37
+ ".hg",
38
+ ".bzr",
39
+ // Dependencies
40
+ "node_modules",
41
+ "bower_components",
42
+ ".pnpm",
43
+ ".yarn",
44
+ "vendor",
45
+ "packages",
46
+ "Pods",
47
+ ".bundle",
48
+ // Python
49
+ "__pycache__",
50
+ ".pytest_cache",
51
+ ".mypy_cache",
52
+ ".ruff_cache",
53
+ ".venv",
54
+ "venv",
55
+ ".tox",
56
+ ".nox",
57
+ ".eggs",
58
+ "*.egg-info",
59
+ // Build outputs
60
+ "dist",
61
+ "build",
62
+ "out",
63
+ "output",
64
+ "target",
65
+ "_build",
66
+ ".next",
67
+ ".nuxt",
68
+ ".output",
69
+ ".vercel",
70
+ ".netlify",
71
+ // Cache directories
72
+ ".cache",
73
+ ".parcel-cache",
74
+ ".turbo",
75
+ ".nx",
76
+ ".gradle",
77
+ // IDE/Editor
78
+ ".idea",
79
+ ".vscode",
80
+ ".vs",
81
+ // Coverage
82
+ "coverage",
83
+ ".coverage",
84
+ "htmlcov",
85
+ ".nyc_output",
86
+ // Temporary
87
+ "tmp",
88
+ "temp",
89
+ ".tmp",
90
+ ".temp",
91
+ // Lock files
92
+ "package-lock.json",
93
+ "yarn.lock",
94
+ "pnpm-lock.yaml",
95
+ "bun.lockb",
96
+ "Cargo.lock",
97
+ "Gemfile.lock",
98
+ "poetry.lock",
99
+ // Binary/minified
100
+ "*.min.js",
101
+ "*.min.css",
102
+ "*.bundle.js",
103
+ "*.wasm",
104
+ "*.so",
105
+ "*.dll",
106
+ "*.pyc",
107
+ "*.map",
108
+ "*.js.map",
109
+ // Hidden directories catch-all
110
+ ".*"
111
+ ];
112
+ var DEFAULT_EXCLUDES = (process.env.MORPH_WARP_GREP_EXCLUDE || "").split(",").map((s) => s.trim()).filter(Boolean).concat(BUILTIN_EXCLUDES);
34
113
  var DEFAULT_MODEL = "morph-warp-grep";
35
114
  // Annotate the CommonJS export names for ESM import in node:
36
115
  0 && (module.exports = {
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../tools/warp_grep/agent/config.ts"],"sourcesContent":["// Agent configuration defaults for morph-warp-grep\n// Hard-coded: SDK does not expose control over rounds or timeout.\nexport const AGENT_CONFIG = {\n // Give the model freedom; failsafe cap to prevent infinite loops\n MAX_ROUNDS: 10,\n TIMEOUT_MS: 30000,\n};\n\nexport const DEFAULT_EXCLUDES = (process.env.MORPH_WARP_GREP_EXCLUDE || '')\n .split(',')\n .map(s => s.trim())\n .filter(Boolean)\n .concat(['node_modules', '.git', 'dist', 'build', '.cache', 'venv', 'target']);\n\nexport const DEFAULT_MODEL = 'morph-warp-grep';\n\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEO,IAAM,eAAe;AAAA;AAAA,EAE1B,YAAY;AAAA,EACZ,YAAY;AACd;AAEO,IAAM,oBAAoB,QAAQ,IAAI,2BAA2B,IACrE,MAAM,GAAG,EACT,IAAI,OAAK,EAAE,KAAK,CAAC,EACjB,OAAO,OAAO,EACd,OAAO,CAAC,gBAAgB,QAAQ,QAAQ,SAAS,UAAU,QAAQ,QAAQ,CAAC;AAExE,IAAM,gBAAgB;","names":[]}
1
+ {"version":3,"sources":["../../../../tools/warp_grep/agent/config.ts"],"sourcesContent":["// Agent configuration defaults for morph-warp-grep\n// Hard-coded: SDK does not expose control over rounds or timeout.\nexport const AGENT_CONFIG = {\n // Give the model freedom; failsafe cap to prevent infinite loops\n MAX_ROUNDS: 10,\n TIMEOUT_MS: 30000,\n};\n\n/**\n * Comprehensive exclusion list for directories and files\n * These patterns are used with ripgrep's -g flag\n */\nconst BUILTIN_EXCLUDES = [\n // Version control\n '.git', '.svn', '.hg', '.bzr',\n \n // Dependencies\n 'node_modules', 'bower_components', '.pnpm', '.yarn',\n 'vendor', 'packages', 'Pods', '.bundle',\n \n // Python\n '__pycache__', '.pytest_cache', '.mypy_cache', '.ruff_cache',\n '.venv', 'venv', '.tox', '.nox', '.eggs', '*.egg-info',\n \n // Build outputs\n 'dist', 'build', 'out', 'output', 'target', '_build',\n '.next', '.nuxt', '.output', '.vercel', '.netlify',\n \n // Cache directories\n '.cache', '.parcel-cache', '.turbo', '.nx', '.gradle',\n \n // IDE/Editor\n '.idea', '.vscode', '.vs',\n \n // Coverage\n 'coverage', '.coverage', 'htmlcov', '.nyc_output',\n \n // Temporary\n 'tmp', 'temp', '.tmp', '.temp',\n \n // Lock files\n 'package-lock.json', 'yarn.lock', 'pnpm-lock.yaml', 'bun.lockb',\n 'Cargo.lock', 'Gemfile.lock', 'poetry.lock',\n \n // Binary/minified\n '*.min.js', '*.min.css', '*.bundle.js',\n '*.wasm', '*.so', '*.dll', '*.pyc',\n '*.map', '*.js.map',\n \n // Hidden directories catch-all\n '.*',\n];\n\nexport const DEFAULT_EXCLUDES = (process.env.MORPH_WARP_GREP_EXCLUDE || '')\n .split(',')\n .map(s => s.trim())\n .filter(Boolean)\n .concat(BUILTIN_EXCLUDES);\n\nexport const DEFAULT_MODEL = 'morph-warp-grep';\n\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAEO,IAAM,eAAe;AAAA;AAAA,EAE1B,YAAY;AAAA,EACZ,YAAY;AACd;AAMA,IAAM,mBAAmB;AAAA;AAAA,EAEvB;AAAA,EAAQ;AAAA,EAAQ;AAAA,EAAO;AAAA;AAAA,EAGvB;AAAA,EAAgB;AAAA,EAAoB;AAAA,EAAS;AAAA,EAC7C;AAAA,EAAU;AAAA,EAAY;AAAA,EAAQ;AAAA;AAAA,EAG9B;AAAA,EAAe;AAAA,EAAiB;AAAA,EAAe;AAAA,EAC/C;AAAA,EAAS;AAAA,EAAQ;AAAA,EAAQ;AAAA,EAAQ;AAAA,EAAS;AAAA;AAAA,EAG1C;AAAA,EAAQ;AAAA,EAAS;AAAA,EAAO;AAAA,EAAU;AAAA,EAAU;AAAA,EAC5C;AAAA,EAAS;AAAA,EAAS;AAAA,EAAW;AAAA,EAAW;AAAA;AAAA,EAGxC;AAAA,EAAU;AAAA,EAAiB;AAAA,EAAU;AAAA,EAAO;AAAA;AAAA,EAG5C;AAAA,EAAS;AAAA,EAAW;AAAA;AAAA,EAGpB;AAAA,EAAY;AAAA,EAAa;AAAA,EAAW;AAAA;AAAA,EAGpC;AAAA,EAAO;AAAA,EAAQ;AAAA,EAAQ;AAAA;AAAA,EAGvB;AAAA,EAAqB;AAAA,EAAa;AAAA,EAAkB;AAAA,EACpD;AAAA,EAAc;AAAA,EAAgB;AAAA;AAAA,EAG9B;AAAA,EAAY;AAAA,EAAa;AAAA,EACzB;AAAA,EAAU;AAAA,EAAQ;AAAA,EAAS;AAAA,EAC3B;AAAA,EAAS;AAAA;AAAA,EAGT;AACF;AAEO,IAAM,oBAAoB,QAAQ,IAAI,2BAA2B,IACrE,MAAM,GAAG,EACT,IAAI,OAAK,EAAE,KAAK,CAAC,EACjB,OAAO,OAAO,EACd,OAAO,gBAAgB;AAEnB,IAAM,gBAAgB;","names":[]}
@@ -2,7 +2,7 @@ import {
2
2
  AGENT_CONFIG,
3
3
  DEFAULT_EXCLUDES,
4
4
  DEFAULT_MODEL
5
- } from "../../../chunk-AFEPUNAO.js";
5
+ } from "../../../chunk-TJIUA27P.js";
6
6
  import "../../../chunk-PZ5AY32C.js";
7
7
  export {
8
8
  AGENT_CONFIG,
@@ -30,13 +30,54 @@ var LLMResponseParseError = class extends Error {
30
30
  this.name = "LLMResponseParseError";
31
31
  }
32
32
  };
33
+ var VALID_COMMANDS = ["analyse", "grep", "read", "finish"];
34
+ function preprocessText(text) {
35
+ let processed = text.replace(/<think>[\s\S]*?<\/think>/gi, "");
36
+ const openingTagRegex = /<tool_call>|<tool>/gi;
37
+ const closingTagRegex = /<\/tool_call>|<\/tool>/gi;
38
+ const openingMatches = processed.match(openingTagRegex) || [];
39
+ const closingMatches = processed.match(closingTagRegex) || [];
40
+ if (openingMatches.length > closingMatches.length) {
41
+ const lastClosingMatch = /<\/tool_call>|<\/tool>/gi;
42
+ let lastClosingIndex = -1;
43
+ let match;
44
+ while ((match = lastClosingMatch.exec(processed)) !== null) {
45
+ lastClosingIndex = match.index + match[0].length;
46
+ }
47
+ if (lastClosingIndex > 0) {
48
+ processed = processed.slice(0, lastClosingIndex);
49
+ }
50
+ }
51
+ const toolCallLines = [];
52
+ const toolTagRegex = /<tool_call>([\s\S]*?)<\/tool_call>|<tool>([\s\S]*?)<\/tool>/gi;
53
+ let tagMatch;
54
+ while ((tagMatch = toolTagRegex.exec(processed)) !== null) {
55
+ const content = (tagMatch[1] || tagMatch[2] || "").trim();
56
+ if (content) {
57
+ const lines = content.split(/\r?\n/).map((l) => l.trim()).filter((l) => l);
58
+ toolCallLines.push(...lines);
59
+ }
60
+ }
61
+ const allLines = processed.split(/\r?\n/).map((l) => l.trim());
62
+ for (const line of allLines) {
63
+ if (!line) continue;
64
+ if (line.startsWith("<")) continue;
65
+ const firstWord = line.split(/\s/)[0];
66
+ if (VALID_COMMANDS.includes(firstWord)) {
67
+ if (!toolCallLines.includes(line)) {
68
+ toolCallLines.push(line);
69
+ }
70
+ }
71
+ }
72
+ return toolCallLines;
73
+ }
33
74
  var LLMResponseParser = class {
34
75
  finishSpecSplitRe = /,(?=[^,\s]+:)/;
35
76
  parse(text) {
36
77
  if (typeof text !== "string") {
37
78
  throw new TypeError("Command text must be a string.");
38
79
  }
39
- const lines = text.split(/\r?\n/).map((l) => l.trim());
80
+ const lines = preprocessText(text);
40
81
  const commands = [];
41
82
  let finishAccumulator = null;
42
83
  lines.forEach((line, idx) => {
@@ -59,7 +100,7 @@ var LLMResponseParser = class {
59
100
  finishAccumulator = this.handleFinish(parts, ctx, finishAccumulator);
60
101
  break;
61
102
  default:
62
- throw new LLMResponseParseError(`Line ${ctx.lineNumber}: Unsupported command '${cmd}'`);
103
+ break;
63
104
  }
64
105
  });
65
106
  if (finishAccumulator) {
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../tools/warp_grep/agent/parser.ts"],"sourcesContent":["// Parses assistant lines into structured tool calls\nimport type { ToolCall } from './types.js';\n\nexport class LLMResponseParseError extends Error {\n constructor(message: string) {\n super(message);\n this.name = 'LLMResponseParseError';\n }\n}\n\ntype LineContext = { lineNumber: number; raw: string };\n\nexport class LLMResponseParser {\n private readonly finishSpecSplitRe = /,(?=[^,\\s]+:)/;\n\n parse(text: string): ToolCall[] {\n if (typeof text !== 'string') {\n throw new TypeError('Command text must be a string.');\n }\n const lines = text.split(/\\r?\\n/).map(l => l.trim());\n const commands: ToolCall[] = [];\n let finishAccumulator: Map<string, number[][]> | null = null;\n\n lines.forEach((line, idx) => {\n if (!line || line.startsWith('#')) return;\n const ctx: LineContext = { lineNumber: idx + 1, raw: line };\n const parts = this.splitLine(line, ctx);\n if (parts.length === 0) return;\n const cmd = parts[0];\n switch (cmd) {\n case 'analyse':\n this.handleAnalyse(parts, ctx, commands);\n break;\n case 'grep':\n this.handleGrep(parts, ctx, commands);\n break;\n case 'read':\n this.handleRead(parts, ctx, commands);\n break;\n case 'finish':\n finishAccumulator = this.handleFinish(parts, ctx, finishAccumulator);\n break;\n default:\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: Unsupported command '${cmd}'`);\n }\n });\n\n if (finishAccumulator) {\n const map = finishAccumulator as Map<string, number[][]>;\n const entries = [...map.entries()];\n const filesPayload = entries.map(([path, ranges]) => ({\n path,\n lines: [...ranges].sort((a, b) => a[0] - b[0]) as Array<[number, number]>,\n }));\n commands.push({ name: 'finish', arguments: { files: filesPayload } });\n }\n return commands;\n }\n\n private splitLine(line: string, ctx: LineContext): string[] {\n try {\n // Split by whitespace but keep quoted blocks as one\n const parts: string[] = [];\n let current = '';\n let inSingle = false;\n for (let i = 0; i < line.length; i++) {\n const ch = line[i];\n if (ch === \"'\" && line[i - 1] !== '\\\\') {\n inSingle = !inSingle;\n current += ch;\n } else if (!inSingle && /\\s/.test(ch)) {\n if (current) {\n parts.push(current);\n current = '';\n }\n } else {\n current += ch;\n }\n }\n if (current) parts.push(current);\n return parts;\n } catch {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: Unable to parse line.`);\n }\n }\n\n private handleAnalyse(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // analyse <path> [pattern]\n if (parts.length < 2) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: analyse requires <path>`);\n }\n const path = parts[1];\n const pattern = parts[2]?.replace(/^\"|\"$/g, '') ?? null;\n commands.push({ name: 'analyse', arguments: { path, pattern } });\n }\n\n // no glob tool in MCP\n\n private handleGrep(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // grep '<pattern>' <path>\n if (parts.length < 3) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: grep requires '<pattern>' and <path>`);\n }\n const pat = parts[1];\n if (!pat.startsWith(\"'\") || !pat.endsWith(\"'\")) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: grep pattern must be single-quoted`);\n }\n commands.push({ name: 'grep', arguments: { pattern: pat.slice(1, -1), path: parts[2] } });\n }\n\n private handleRead(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // read <path>[:start-end]\n if (parts.length < 2) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: read requires <path> or <path>:<start-end>`);\n }\n const spec = parts[1];\n const rangeIdx = spec.indexOf(':');\n if (rangeIdx === -1) {\n commands.push({ name: 'read', arguments: { path: spec } });\n return;\n }\n const path = spec.slice(0, rangeIdx);\n const range = spec.slice(rangeIdx + 1);\n const [s, e] = range.split('-').map(v => parseInt(v, 10));\n if (!Number.isFinite(s) || !Number.isFinite(e)) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid read range '${range}'`);\n }\n commands.push({ name: 'read', arguments: { path, start: s, end: e } });\n }\n\n private handleFinish(parts: string[], ctx: LineContext, acc: Map<string, number[][]> | null) {\n // finish file1:1-10,20-30 file2:5-7\n const map = acc ?? new Map<string, number[][]>();\n const args = parts.slice(1);\n for (const token of args) {\n const [path, rangesText] = token.split(':', 2);\n if (!path || !rangesText) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid finish token '${token}'`);\n }\n const rangeSpecs = rangesText.split(',').filter(Boolean);\n for (const spec of rangeSpecs) {\n const [s, e] = spec.split('-').map(v => parseInt(v, 10));\n if (!Number.isFinite(s) || !Number.isFinite(e) || e < s) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid range '${spec}'`);\n }\n const arr = map.get(path) ?? [];\n arr.push([s, e]);\n map.set(path, arr);\n }\n }\n return map;\n }\n}\n\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAGO,IAAM,wBAAN,cAAoC,MAAM;AAAA,EAC/C,YAAY,SAAiB;AAC3B,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAIO,IAAM,oBAAN,MAAwB;AAAA,EACZ,oBAAoB;AAAA,EAErC,MAAM,MAA0B;AAC9B,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,UAAU,gCAAgC;AAAA,IACtD;AACA,UAAM,QAAQ,KAAK,MAAM,OAAO,EAAE,IAAI,OAAK,EAAE,KAAK,CAAC;AACnD,UAAM,WAAuB,CAAC;AAC9B,QAAI,oBAAoD;AAExD,UAAM,QAAQ,CAAC,MAAM,QAAQ;AAC3B,UAAI,CAAC,QAAQ,KAAK,WAAW,GAAG,EAAG;AACnC,YAAM,MAAmB,EAAE,YAAY,MAAM,GAAG,KAAK,KAAK;AAC1D,YAAM,QAAQ,KAAK,UAAU,MAAM,GAAG;AACtC,UAAI,MAAM,WAAW,EAAG;AACxB,YAAM,MAAM,MAAM,CAAC;AACnB,cAAQ,KAAK;AAAA,QACX,KAAK;AACH,eAAK,cAAc,OAAO,KAAK,QAAQ;AACvC;AAAA,QACF,KAAK;AACH,eAAK,WAAW,OAAO,KAAK,QAAQ;AACpC;AAAA,QACF,KAAK;AACH,eAAK,WAAW,OAAO,KAAK,QAAQ;AACpC;AAAA,QACF,KAAK;AACH,8BAAoB,KAAK,aAAa,OAAO,KAAK,iBAAiB;AACnE;AAAA,QACF;AACE,gBAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,0BAA0B,GAAG,GAAG;AAAA,MAC1F;AAAA,IACF,CAAC;AAED,QAAI,mBAAmB;AACrB,YAAM,MAAM;AACZ,YAAM,UAAU,CAAC,GAAG,IAAI,QAAQ,CAAC;AACjC,YAAM,eAAe,QAAQ,IAAI,CAAC,CAAC,MAAM,MAAM,OAAO;AAAA,QACpD;AAAA,QACA,OAAO,CAAC,GAAG,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;AAAA,MAC/C,EAAE;AACF,eAAS,KAAK,EAAE,MAAM,UAAU,WAAW,EAAE,OAAO,aAAa,EAAE,CAAC;AAAA,IACtE;AACA,WAAO;AAAA,EACT;AAAA,EAEQ,UAAU,MAAc,KAA4B;AAC1D,QAAI;AAEF,YAAM,QAAkB,CAAC;AACzB,UAAI,UAAU;AACd,UAAI,WAAW;AACf,eAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,cAAM,KAAK,KAAK,CAAC;AACjB,YAAI,OAAO,OAAO,KAAK,IAAI,CAAC,MAAM,MAAM;AACtC,qBAAW,CAAC;AACZ,qBAAW;AAAA,QACb,WAAW,CAAC,YAAY,KAAK,KAAK,EAAE,GAAG;AACrC,cAAI,SAAS;AACX,kBAAM,KAAK,OAAO;AAClB,sBAAU;AAAA,UACZ;AAAA,QACF,OAAO;AACL,qBAAW;AAAA,QACb;AAAA,MACF;AACA,UAAI,QAAS,OAAM,KAAK,OAAO;AAC/B,aAAO;AAAA,IACT,QAAQ;AACN,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,yBAAyB;AAAA,IACjF;AAAA,EACF;AAAA,EAEQ,cAAc,OAAiB,KAAkB,UAAsB;AAE7E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,2BAA2B;AAAA,IACnF;AACA,UAAM,OAAO,MAAM,CAAC;AACpB,UAAM,UAAU,MAAM,CAAC,GAAG,QAAQ,UAAU,EAAE,KAAK;AACnD,aAAS,KAAK,EAAE,MAAM,WAAW,WAAW,EAAE,MAAM,QAAQ,EAAE,CAAC;AAAA,EACjE;AAAA;AAAA,EAIQ,WAAW,OAAiB,KAAkB,UAAsB;AAE1E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,wCAAwC;AAAA,IAChG;AACA,UAAM,MAAM,MAAM,CAAC;AACnB,QAAI,CAAC,IAAI,WAAW,GAAG,KAAK,CAAC,IAAI,SAAS,GAAG,GAAG;AAC9C,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,sCAAsC;AAAA,IAC9F;AACA,aAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,SAAS,IAAI,MAAM,GAAG,EAAE,GAAG,MAAM,MAAM,CAAC,EAAE,EAAE,CAAC;AAAA,EAC1F;AAAA,EAEQ,WAAW,OAAiB,KAAkB,UAAsB;AAE1E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,8CAA8C;AAAA,IACtG;AACA,UAAM,OAAO,MAAM,CAAC;AACpB,UAAM,WAAW,KAAK,QAAQ,GAAG;AACjC,QAAI,aAAa,IAAI;AACnB,eAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,MAAM,KAAK,EAAE,CAAC;AACzD;AAAA,IACF;AACA,UAAM,OAAO,KAAK,MAAM,GAAG,QAAQ;AACnC,UAAM,QAAQ,KAAK,MAAM,WAAW,CAAC;AACrC,UAAM,CAAC,GAAG,CAAC,IAAI,MAAM,MAAM,GAAG,EAAE,IAAI,OAAK,SAAS,GAAG,EAAE,CAAC;AACxD,QAAI,CAAC,OAAO,SAAS,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,GAAG;AAC9C,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,yBAAyB,KAAK,GAAG;AAAA,IACzF;AACA,aAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,MAAM,OAAO,GAAG,KAAK,EAAE,EAAE,CAAC;AAAA,EACvE;AAAA,EAEQ,aAAa,OAAiB,KAAkB,KAAqC;AAE3F,UAAM,MAAM,OAAO,oBAAI,IAAwB;AAC/C,UAAM,OAAO,MAAM,MAAM,CAAC;AAC1B,eAAW,SAAS,MAAM;AACxB,YAAM,CAAC,MAAM,UAAU,IAAI,MAAM,MAAM,KAAK,CAAC;AAC7C,UAAI,CAAC,QAAQ,CAAC,YAAY;AACxB,cAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,2BAA2B,KAAK,GAAG;AAAA,MAC3F;AACA,YAAM,aAAa,WAAW,MAAM,GAAG,EAAE,OAAO,OAAO;AACvD,iBAAW,QAAQ,YAAY;AAC7B,cAAM,CAAC,GAAG,CAAC,IAAI,KAAK,MAAM,GAAG,EAAE,IAAI,OAAK,SAAS,GAAG,EAAE,CAAC;AACvD,YAAI,CAAC,OAAO,SAAS,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,KAAK,IAAI,GAAG;AACvD,gBAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,oBAAoB,IAAI,GAAG;AAAA,QACnF;AACA,cAAM,MAAM,IAAI,IAAI,IAAI,KAAK,CAAC;AAC9B,YAAI,KAAK,CAAC,GAAG,CAAC,CAAC;AACf,YAAI,IAAI,MAAM,GAAG;AAAA,MACnB;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;","names":[]}
1
+ {"version":3,"sources":["../../../../tools/warp_grep/agent/parser.ts"],"sourcesContent":["// Parses assistant lines into structured tool calls\nimport type { ToolCall } from './types.js';\n\nexport class LLMResponseParseError extends Error {\n constructor(message: string) {\n super(message);\n this.name = 'LLMResponseParseError';\n }\n}\n\ntype LineContext = { lineNumber: number; raw: string };\n\n// Valid tool command names\nconst VALID_COMMANDS = ['analyse', 'grep', 'read', 'finish'];\n\n/**\n * Preprocesses text to handle XML tags:\n * 1. Removes <think>...</think> blocks entirely\n * 2. Extracts content from <tool>...</tool> or <tool_call>...</tool_call> tags\n * 3. Passes through raw tool calls (lines starting with valid commands)\n * 4. Discards unclosed <tool...> tags\n */\nfunction preprocessText(text: string): string[] {\n // Step 1: Remove <think>...</think> blocks (including multiline)\n let processed = text.replace(/<think>[\\s\\S]*?<\\/think>/gi, '');\n \n // Step 2: Check for unclosed <tool or <tool_call tags and discard them\n // Find all opening tags and their positions\n const openingTagRegex = /<tool_call>|<tool>/gi;\n const closingTagRegex = /<\\/tool_call>|<\\/tool>/gi;\n \n // Count opening and closing tags\n const openingMatches = processed.match(openingTagRegex) || [];\n const closingMatches = processed.match(closingTagRegex) || [];\n \n // If there are more opening than closing tags, we have unclosed tags\n // In that case, only process complete tag pairs\n if (openingMatches.length > closingMatches.length) {\n // Remove any content after the last complete closing tag\n const lastClosingMatch = /<\\/tool_call>|<\\/tool>/gi;\n let lastClosingIndex = -1;\n let match;\n while ((match = lastClosingMatch.exec(processed)) !== null) {\n lastClosingIndex = match.index + match[0].length;\n }\n if (lastClosingIndex > 0) {\n processed = processed.slice(0, lastClosingIndex);\n }\n }\n \n // Step 3: Extract content from <tool_call>...</tool_call> and <tool>...</tool> tags\n const toolCallLines: string[] = [];\n const toolTagRegex = /<tool_call>([\\s\\S]*?)<\\/tool_call>|<tool>([\\s\\S]*?)<\\/tool>/gi;\n let tagMatch;\n \n while ((tagMatch = toolTagRegex.exec(processed)) !== null) {\n const content = (tagMatch[1] || tagMatch[2] || '').trim();\n if (content) {\n // Split content by newlines in case there are multiple tool calls in one tag\n const lines = content.split(/\\r?\\n/).map(l => l.trim()).filter(l => l);\n toolCallLines.push(...lines);\n }\n }\n \n // Step 4: Also extract raw tool calls (lines starting with valid commands)\n // This provides backwards compatibility\n const allLines = processed.split(/\\r?\\n/).map(l => l.trim());\n for (const line of allLines) {\n if (!line) continue;\n \n // Skip lines that are inside XML tags (already processed above)\n if (line.startsWith('<')) continue;\n \n // Check if line starts with a valid command\n const firstWord = line.split(/\\s/)[0];\n if (VALID_COMMANDS.includes(firstWord)) {\n // Avoid duplicates\n if (!toolCallLines.includes(line)) {\n toolCallLines.push(line);\n }\n }\n }\n \n return toolCallLines;\n}\n\nexport class LLMResponseParser {\n private readonly finishSpecSplitRe = /,(?=[^,\\s]+:)/;\n\n parse(text: string): ToolCall[] {\n if (typeof text !== 'string') {\n throw new TypeError('Command text must be a string.');\n }\n \n // Preprocess to handle XML tags\n const lines = preprocessText(text);\n \n const commands: ToolCall[] = [];\n let finishAccumulator: Map<string, number[][]> | null = null;\n\n lines.forEach((line, idx) => {\n if (!line || line.startsWith('#')) return;\n const ctx: LineContext = { lineNumber: idx + 1, raw: line };\n const parts = this.splitLine(line, ctx);\n if (parts.length === 0) return;\n const cmd = parts[0];\n switch (cmd) {\n case 'analyse':\n this.handleAnalyse(parts, ctx, commands);\n break;\n case 'grep':\n this.handleGrep(parts, ctx, commands);\n break;\n case 'read':\n this.handleRead(parts, ctx, commands);\n break;\n case 'finish':\n finishAccumulator = this.handleFinish(parts, ctx, finishAccumulator);\n break;\n default:\n // Silently ignore unknown commands after preprocessing\n // (they might be remnants of XML or other content)\n break;\n }\n });\n\n if (finishAccumulator) {\n const map = finishAccumulator as Map<string, number[][]>;\n const entries = [...map.entries()];\n const filesPayload = entries.map(([path, ranges]) => ({\n path,\n lines: [...ranges].sort((a, b) => a[0] - b[0]) as Array<[number, number]>,\n }));\n commands.push({ name: 'finish', arguments: { files: filesPayload } });\n }\n return commands;\n }\n\n private splitLine(line: string, ctx: LineContext): string[] {\n try {\n // Split by whitespace but keep quoted blocks as one\n const parts: string[] = [];\n let current = '';\n let inSingle = false;\n for (let i = 0; i < line.length; i++) {\n const ch = line[i];\n if (ch === \"'\" && line[i - 1] !== '\\\\') {\n inSingle = !inSingle;\n current += ch;\n } else if (!inSingle && /\\s/.test(ch)) {\n if (current) {\n parts.push(current);\n current = '';\n }\n } else {\n current += ch;\n }\n }\n if (current) parts.push(current);\n return parts;\n } catch {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: Unable to parse line.`);\n }\n }\n\n private handleAnalyse(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // analyse <path> [pattern]\n if (parts.length < 2) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: analyse requires <path>`);\n }\n const path = parts[1];\n const pattern = parts[2]?.replace(/^\"|\"$/g, '') ?? null;\n commands.push({ name: 'analyse', arguments: { path, pattern } });\n }\n\n // no glob tool in MCP\n\n private handleGrep(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // grep '<pattern>' <path>\n if (parts.length < 3) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: grep requires '<pattern>' and <path>`);\n }\n const pat = parts[1];\n if (!pat.startsWith(\"'\") || !pat.endsWith(\"'\")) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: grep pattern must be single-quoted`);\n }\n commands.push({ name: 'grep', arguments: { pattern: pat.slice(1, -1), path: parts[2] } });\n }\n\n private handleRead(parts: string[], ctx: LineContext, commands: ToolCall[]) {\n // read <path>[:start-end]\n if (parts.length < 2) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: read requires <path> or <path>:<start-end>`);\n }\n const spec = parts[1];\n const rangeIdx = spec.indexOf(':');\n if (rangeIdx === -1) {\n commands.push({ name: 'read', arguments: { path: spec } });\n return;\n }\n const path = spec.slice(0, rangeIdx);\n const range = spec.slice(rangeIdx + 1);\n const [s, e] = range.split('-').map(v => parseInt(v, 10));\n if (!Number.isFinite(s) || !Number.isFinite(e)) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid read range '${range}'`);\n }\n commands.push({ name: 'read', arguments: { path, start: s, end: e } });\n }\n\n private handleFinish(parts: string[], ctx: LineContext, acc: Map<string, number[][]> | null) {\n // finish file1:1-10,20-30 file2:5-7\n const map = acc ?? new Map<string, number[][]>();\n const args = parts.slice(1);\n for (const token of args) {\n const [path, rangesText] = token.split(':', 2);\n if (!path || !rangesText) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid finish token '${token}'`);\n }\n const rangeSpecs = rangesText.split(',').filter(Boolean);\n for (const spec of rangeSpecs) {\n const [s, e] = spec.split('-').map(v => parseInt(v, 10));\n if (!Number.isFinite(s) || !Number.isFinite(e) || e < s) {\n throw new LLMResponseParseError(`Line ${ctx.lineNumber}: invalid range '${spec}'`);\n }\n const arr = map.get(path) ?? [];\n arr.push([s, e]);\n map.set(path, arr);\n }\n }\n return map;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAGO,IAAM,wBAAN,cAAoC,MAAM;AAAA,EAC/C,YAAY,SAAiB;AAC3B,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAKA,IAAM,iBAAiB,CAAC,WAAW,QAAQ,QAAQ,QAAQ;AAS3D,SAAS,eAAe,MAAwB;AAE9C,MAAI,YAAY,KAAK,QAAQ,8BAA8B,EAAE;AAI7D,QAAM,kBAAkB;AACxB,QAAM,kBAAkB;AAGxB,QAAM,iBAAiB,UAAU,MAAM,eAAe,KAAK,CAAC;AAC5D,QAAM,iBAAiB,UAAU,MAAM,eAAe,KAAK,CAAC;AAI5D,MAAI,eAAe,SAAS,eAAe,QAAQ;AAEjD,UAAM,mBAAmB;AACzB,QAAI,mBAAmB;AACvB,QAAI;AACJ,YAAQ,QAAQ,iBAAiB,KAAK,SAAS,OAAO,MAAM;AAC1D,yBAAmB,MAAM,QAAQ,MAAM,CAAC,EAAE;AAAA,IAC5C;AACA,QAAI,mBAAmB,GAAG;AACxB,kBAAY,UAAU,MAAM,GAAG,gBAAgB;AAAA,IACjD;AAAA,EACF;AAGA,QAAM,gBAA0B,CAAC;AACjC,QAAM,eAAe;AACrB,MAAI;AAEJ,UAAQ,WAAW,aAAa,KAAK,SAAS,OAAO,MAAM;AACzD,UAAM,WAAW,SAAS,CAAC,KAAK,SAAS,CAAC,KAAK,IAAI,KAAK;AACxD,QAAI,SAAS;AAEX,YAAM,QAAQ,QAAQ,MAAM,OAAO,EAAE,IAAI,OAAK,EAAE,KAAK,CAAC,EAAE,OAAO,OAAK,CAAC;AACrE,oBAAc,KAAK,GAAG,KAAK;AAAA,IAC7B;AAAA,EACF;AAIA,QAAM,WAAW,UAAU,MAAM,OAAO,EAAE,IAAI,OAAK,EAAE,KAAK,CAAC;AAC3D,aAAW,QAAQ,UAAU;AAC3B,QAAI,CAAC,KAAM;AAGX,QAAI,KAAK,WAAW,GAAG,EAAG;AAG1B,UAAM,YAAY,KAAK,MAAM,IAAI,EAAE,CAAC;AACpC,QAAI,eAAe,SAAS,SAAS,GAAG;AAEtC,UAAI,CAAC,cAAc,SAAS,IAAI,GAAG;AACjC,sBAAc,KAAK,IAAI;AAAA,MACzB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAEO,IAAM,oBAAN,MAAwB;AAAA,EACZ,oBAAoB;AAAA,EAErC,MAAM,MAA0B;AAC9B,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,UAAU,gCAAgC;AAAA,IACtD;AAGA,UAAM,QAAQ,eAAe,IAAI;AAEjC,UAAM,WAAuB,CAAC;AAC9B,QAAI,oBAAoD;AAExD,UAAM,QAAQ,CAAC,MAAM,QAAQ;AAC3B,UAAI,CAAC,QAAQ,KAAK,WAAW,GAAG,EAAG;AACnC,YAAM,MAAmB,EAAE,YAAY,MAAM,GAAG,KAAK,KAAK;AAC1D,YAAM,QAAQ,KAAK,UAAU,MAAM,GAAG;AACtC,UAAI,MAAM,WAAW,EAAG;AACxB,YAAM,MAAM,MAAM,CAAC;AACnB,cAAQ,KAAK;AAAA,QACX,KAAK;AACH,eAAK,cAAc,OAAO,KAAK,QAAQ;AACvC;AAAA,QACF,KAAK;AACH,eAAK,WAAW,OAAO,KAAK,QAAQ;AACpC;AAAA,QACF,KAAK;AACH,eAAK,WAAW,OAAO,KAAK,QAAQ;AACpC;AAAA,QACF,KAAK;AACH,8BAAoB,KAAK,aAAa,OAAO,KAAK,iBAAiB;AACnE;AAAA,QACF;AAGE;AAAA,MACJ;AAAA,IACF,CAAC;AAED,QAAI,mBAAmB;AACrB,YAAM,MAAM;AACZ,YAAM,UAAU,CAAC,GAAG,IAAI,QAAQ,CAAC;AACjC,YAAM,eAAe,QAAQ,IAAI,CAAC,CAAC,MAAM,MAAM,OAAO;AAAA,QACpD;AAAA,QACA,OAAO,CAAC,GAAG,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;AAAA,MAC/C,EAAE;AACF,eAAS,KAAK,EAAE,MAAM,UAAU,WAAW,EAAE,OAAO,aAAa,EAAE,CAAC;AAAA,IACtE;AACA,WAAO;AAAA,EACT;AAAA,EAEQ,UAAU,MAAc,KAA4B;AAC1D,QAAI;AAEF,YAAM,QAAkB,CAAC;AACzB,UAAI,UAAU;AACd,UAAI,WAAW;AACf,eAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,cAAM,KAAK,KAAK,CAAC;AACjB,YAAI,OAAO,OAAO,KAAK,IAAI,CAAC,MAAM,MAAM;AACtC,qBAAW,CAAC;AACZ,qBAAW;AAAA,QACb,WAAW,CAAC,YAAY,KAAK,KAAK,EAAE,GAAG;AACrC,cAAI,SAAS;AACX,kBAAM,KAAK,OAAO;AAClB,sBAAU;AAAA,UACZ;AAAA,QACF,OAAO;AACL,qBAAW;AAAA,QACb;AAAA,MACF;AACA,UAAI,QAAS,OAAM,KAAK,OAAO;AAC/B,aAAO;AAAA,IACT,QAAQ;AACN,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,yBAAyB;AAAA,IACjF;AAAA,EACF;AAAA,EAEQ,cAAc,OAAiB,KAAkB,UAAsB;AAE7E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,2BAA2B;AAAA,IACnF;AACA,UAAM,OAAO,MAAM,CAAC;AACpB,UAAM,UAAU,MAAM,CAAC,GAAG,QAAQ,UAAU,EAAE,KAAK;AACnD,aAAS,KAAK,EAAE,MAAM,WAAW,WAAW,EAAE,MAAM,QAAQ,EAAE,CAAC;AAAA,EACjE;AAAA;AAAA,EAIQ,WAAW,OAAiB,KAAkB,UAAsB;AAE1E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,wCAAwC;AAAA,IAChG;AACA,UAAM,MAAM,MAAM,CAAC;AACnB,QAAI,CAAC,IAAI,WAAW,GAAG,KAAK,CAAC,IAAI,SAAS,GAAG,GAAG;AAC9C,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,sCAAsC;AAAA,IAC9F;AACA,aAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,SAAS,IAAI,MAAM,GAAG,EAAE,GAAG,MAAM,MAAM,CAAC,EAAE,EAAE,CAAC;AAAA,EAC1F;AAAA,EAEQ,WAAW,OAAiB,KAAkB,UAAsB;AAE1E,QAAI,MAAM,SAAS,GAAG;AACpB,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,8CAA8C;AAAA,IACtG;AACA,UAAM,OAAO,MAAM,CAAC;AACpB,UAAM,WAAW,KAAK,QAAQ,GAAG;AACjC,QAAI,aAAa,IAAI;AACnB,eAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,MAAM,KAAK,EAAE,CAAC;AACzD;AAAA,IACF;AACA,UAAM,OAAO,KAAK,MAAM,GAAG,QAAQ;AACnC,UAAM,QAAQ,KAAK,MAAM,WAAW,CAAC;AACrC,UAAM,CAAC,GAAG,CAAC,IAAI,MAAM,MAAM,GAAG,EAAE,IAAI,OAAK,SAAS,GAAG,EAAE,CAAC;AACxD,QAAI,CAAC,OAAO,SAAS,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,GAAG;AAC9C,YAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,yBAAyB,KAAK,GAAG;AAAA,IACzF;AACA,aAAS,KAAK,EAAE,MAAM,QAAQ,WAAW,EAAE,MAAM,OAAO,GAAG,KAAK,EAAE,EAAE,CAAC;AAAA,EACvE;AAAA,EAEQ,aAAa,OAAiB,KAAkB,KAAqC;AAE3F,UAAM,MAAM,OAAO,oBAAI,IAAwB;AAC/C,UAAM,OAAO,MAAM,MAAM,CAAC;AAC1B,eAAW,SAAS,MAAM;AACxB,YAAM,CAAC,MAAM,UAAU,IAAI,MAAM,MAAM,KAAK,CAAC;AAC7C,UAAI,CAAC,QAAQ,CAAC,YAAY;AACxB,cAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,2BAA2B,KAAK,GAAG;AAAA,MAC3F;AACA,YAAM,aAAa,WAAW,MAAM,GAAG,EAAE,OAAO,OAAO;AACvD,iBAAW,QAAQ,YAAY;AAC7B,cAAM,CAAC,GAAG,CAAC,IAAI,KAAK,MAAM,GAAG,EAAE,IAAI,OAAK,SAAS,GAAG,EAAE,CAAC;AACvD,YAAI,CAAC,OAAO,SAAS,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,KAAK,IAAI,GAAG;AACvD,gBAAM,IAAI,sBAAsB,QAAQ,IAAI,UAAU,oBAAoB,IAAI,GAAG;AAAA,QACnF;AACA,cAAM,MAAM,IAAI,IAAI,IAAI,KAAK,CAAC;AAC9B,YAAI,KAAK,CAAC,GAAG,CAAC,CAAC;AACf,YAAI,IAAI,MAAM,GAAG;AAAA,MACnB;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACF;","names":[]}
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  LLMResponseParseError,
3
3
  LLMResponseParser
4
- } from "../../../chunk-GTOXMAF2.js";
4
+ } from "../../../chunk-SWQPIKPY.js";
5
5
  import "../../../chunk-PZ5AY32C.js";
6
6
  export {
7
7
  LLMResponseParseError,
@@ -24,80 +24,124 @@ __export(prompt_exports, {
24
24
  getSystemPrompt: () => getSystemPrompt
25
25
  });
26
26
  module.exports = __toCommonJS(prompt_exports);
27
- var SYSTEM_PROMPT = `You are a code search agent. Your task is to find relevant code snippets based on a search query.
27
+ var SYSTEM_PROMPT = `You are a code search agent. Your task is to find all relevant code for a given query.
28
28
 
29
29
  <workflow>
30
- You operate in exactly 3 rounds of tool exploration, followed by a final answer:
30
+ You have exactly 4 turns. The 4th turn MUST be a \`finish\` call. Each turn allows up to 8 parallel tool calls.
31
31
 
32
- 1. In each round, you can make MULTIPLE tool calls (up to 8) to search in parallel. All tool results will be returned together after each round.
33
- 2. After your third round of tool calls, your next turn MUST be a single call to the \`finish\` tool with all the context you have found.
34
- </workflow>
32
+ - Turn 1: Map the territory OR dive deep (based on query specificity)
33
+ - Turn 2-3: Refine based on findings
34
+ - Turn 4: MUST call \`finish\` with all relevant code locations
35
+ - You MAY call \`finish\` early if confident\u2014but never before at least 1 search turn.
35
36
 
36
- <tool_calling>
37
- You have tools at your disposal to solve the coding task. Follow these rules regarding tool calls:
37
+ Remember, if the task feels easy to you, it is strongly desirable to call \`finish\` early using fewer turns, but quality over speed.
38
+ </workflow>
38
39
 
39
- ### 1. \`analyse\` - Explore Directories
40
- Explore directory structure in a tree-like format.
41
- **Syntax:** \`analyse <path> [pattern]\`
42
- - \`<path>\`: Directory path to analyze (defaults to \`.\`)
43
- - \`[pattern]\`: Optional regex pattern to filter names
40
+ <tools>
41
+ ### \`analyse <path> [pattern]\`
42
+ Directory tree or file search. Shows structure of a path, optionally filtered by regex pattern.
43
+ - \`path\`: Required. Directory or file path (use \`.\` for repo root)
44
+ - \`pattern\`: Optional regex to filter results
44
45
 
45
- For example:
46
+ Examples:
46
47
  \`\`\`
48
+ analyse .
47
49
  analyse src/api
48
- analyse . "test"
50
+ analyse . ".*\\.ts$"
51
+ analyse src "test.*"
49
52
  \`\`\`
50
53
 
51
- ### 2. \`read\` - Read File Contents
52
- Read entire files or specific line ranges.
53
- **Syntax:** \`read <path>[:start-end]\`
54
- - \`<path>\`: File path to read
55
- - \`[:start-end]\`: Optional 1-based, inclusive line range
54
+ ### \`read <path>[:start-end]\`
55
+ Read file contents. Line range is 1-based, inclusive.
56
+ - Returns numbered lines for easy reference
57
+ - Omit range to read entire file
56
58
 
57
- For example:
59
+ Examples:
58
60
  \`\`\`
59
61
  read src/main.py
60
- read src/database/connection.py:10-50
62
+ read src/db/conn.py:10-50
63
+ read package.json:1-20
61
64
  \`\`\`
62
65
 
63
- ### 3. \`grep\` - Search with Regex
64
- Search for regex patterns across files using ripgrep.
65
- **Syntax:** \`grep '<pattern>' <path>\`
66
- - \`'<pattern>'\`: Regex pattern (always wrap in single quotes)
67
- - \`<path>\`: Directory or file to search (use \`.\` for the repo root)
66
+ ### \`grep '<pattern>' <path>\`
67
+ Ripgrep search. Finds pattern matches across files.
68
+ - \`'<pattern>'\`: Required. Regex pattern wrapped in single quotes
69
+ - \`<path>\`: Required. Directory or file to search (use \`.\` for repo root)
68
70
 
69
- For example:
71
+ Examples:
70
72
  \`\`\`
71
- grep 'create_user' .
72
- grep 'import.*requests' src/api
73
- grep 'class\\\\s+AuthService' controllers/auth.py
73
+ grep 'class.*Service' src/
74
+ grep 'def authenticate' .
75
+ grep 'import.*from' src/components/
76
+ grep 'TODO' .
74
77
  \`\`\`
75
78
 
76
- ### 4. \`finish\` - Submit Final Answer
77
- Submit your findings when complete.
78
- **Syntax:** \`finish <file1:range1,range2...> [file2:range3...]\`
79
- - Provide file paths with colon-separated, comma-separated line ranges
79
+ ### \`finish <file1:ranges> [file2:ranges ...]\`
80
+ Submit final answer with all relevant code locations.
81
+ - Include generous line ranges\u2014don't be stingy with context
82
+ - Ranges are comma-separated: \`file.py:10-30,50-60\`
83
+ - ALWAYS include import statements at the top of files (usually lines 1-20)
84
+ - If code spans multiple files, include ALL of them
85
+ - Small files can be returned in full
80
86
 
81
- For example:
87
+ Examples:
82
88
  \`\`\`
83
- finish src/api/auth.py:25-50,75-80 src/models/user.py:10-15
89
+ finish src/auth.py:1-15,25-50,75-80 src/models/user.py:1-10,20-45
90
+ finish src/index.ts:1-100
84
91
  \`\`\`
85
- </tool_calling>
92
+ </tools>
86
93
 
87
94
  <strategy>
88
- - Use the \`analyse\`, \`grep\`, and \`read\` tools to gather information about the codebase.
89
- - Leverage the tools smartly to make full use of their potential
90
- - Make parallel tool calls within each round to investigate multiple paths or files efficiently
91
- - Be systematic and thorough within your 3-round limit
95
+ **Before your first tool call, classify the query:**
96
+
97
+ | Query Type | Turn 1 Strategy | Early Finish? |
98
+ |------------|-----------------|---------------|
99
+ | **Specific** (function name, error string, unique identifier) | 8 parallel greps on likely paths | Often by turn 2 |
100
+ | **Conceptual** (how does X work, where is Y handled) | analyse + 2-3 broad greps | Rarely early |
101
+ | **Exploratory** (find all tests, list API endpoints) | analyse at multiple depths | Usually needs 3 turns |
102
+
103
+ **Parallel call patterns:**
104
+ - **Shotgun grep**: Same pattern, 8 different directories\u2014fast coverage
105
+ - **Variant grep**: 8 pattern variations (synonyms, naming conventions)\u2014catches inconsistent codebases
106
+ - **Funnel**: 1 analyse + 7 greps\u2014orient and search simultaneously
107
+ - **Deep read**: 8 reads on files you already identified\u2014gather full context fast
92
108
  </strategy>
93
109
 
94
110
  <output_format>
95
- - Only output tool calls themselves
96
- - Do not include explanatory text, reasoning, or commentary
97
- - Each tool call should be on its own line
98
- - After 3 rounds of exploration, call \`finish\` with all relevant code snippets you found
111
+ EVERY response MUST follow this exact format:
112
+
113
+ 1. First, wrap your reasoning in \`<think>...</think>\` tags containing:
114
+ - Query classification (specific/conceptual/exploratory)
115
+ - Confidence estimate (can I finish in 1-2 turns?)
116
+ - This turn's parallel strategy
117
+ - What signals would let me finish early?
118
+
119
+ 2. Then, output tool calls wrapped in \`<tool_call>...</tool_call>\` tags, one per line.
120
+
121
+ Example:
122
+ \`\`\`
123
+ <think>
124
+ This is a specific query about authentication. I'll grep for auth-related patterns.
125
+ High confidence I can finish in 2 turns if I find the auth module.
126
+ Strategy: Shotgun grep across likely directories.
127
+ </think>
128
+ <tool_call>grep 'authenticate' src/</tool_call>
129
+ <tool_call>grep 'login' src/</tool_call>
130
+ <tool_call>analyse src/auth</tool_call>
131
+ \`\`\`
132
+
133
+ No commentary outside \`<think>\`. No explanations after tool calls.
99
134
  </output_format>
100
135
 
136
+ <finishing_requirements>
137
+ When calling \`finish\`:
138
+ - Include the import section (typically lines 1-20) of each file
139
+ - Include all function/class definitions that are relevant
140
+ - Include any type definitions, interfaces, or constants used
141
+ - Better to over-include than leave the user missing context
142
+ - If unsure about boundaries, include more rather than less
143
+ </finishing_requirements>
144
+
101
145
  Begin your exploration now to find code relevant to the query.`;
102
146
  function getSystemPrompt() {
103
147
  return SYSTEM_PROMPT;
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../tools/warp_grep/agent/prompt.ts"],"sourcesContent":["export const SYSTEM_PROMPT = `You are a code search agent. Your task is to find relevant code snippets based on a search query.\n\n<workflow>\nYou operate in exactly 3 rounds of tool exploration, followed by a final answer:\n\n1. In each round, you can make MULTIPLE tool calls (up to 8) to search in parallel. All tool results will be returned together after each round.\n2. After your third round of tool calls, your next turn MUST be a single call to the \\`finish\\` tool with all the context you have found.\n</workflow>\n\n<tool_calling>\nYou have tools at your disposal to solve the coding task. Follow these rules regarding tool calls:\n\n### 1. \\`analyse\\` - Explore Directories\nExplore directory structure in a tree-like format.\n**Syntax:** \\`analyse <path> [pattern]\\`\n- \\`<path>\\`: Directory path to analyze (defaults to \\`.\\`)\n- \\`[pattern]\\`: Optional regex pattern to filter names\n\nFor example:\n\\`\\`\\`\nanalyse src/api\nanalyse . \"test\"\n\\`\\`\\`\n\n### 2. \\`read\\` - Read File Contents\nRead entire files or specific line ranges.\n**Syntax:** \\`read <path>[:start-end]\\`\n- \\`<path>\\`: File path to read\n- \\`[:start-end]\\`: Optional 1-based, inclusive line range\n\nFor example:\n\\`\\`\\`\nread src/main.py\nread src/database/connection.py:10-50\n\\`\\`\\`\n\n### 3. \\`grep\\` - Search with Regex\nSearch for regex patterns across files using ripgrep.\n**Syntax:** \\`grep '<pattern>' <path>\\`\n- \\`'<pattern>'\\`: Regex pattern (always wrap in single quotes)\n- \\`<path>\\`: Directory or file to search (use \\`.\\` for the repo root)\n\nFor example:\n\\`\\`\\`\ngrep 'create_user' .\ngrep 'import.*requests' src/api\ngrep 'class\\\\\\\\s+AuthService' controllers/auth.py\n\\`\\`\\`\n\n### 4. \\`finish\\` - Submit Final Answer\nSubmit your findings when complete.\n**Syntax:** \\`finish <file1:range1,range2...> [file2:range3...]\\`\n- Provide file paths with colon-separated, comma-separated line ranges\n\nFor example:\n\\`\\`\\`\nfinish src/api/auth.py:25-50,75-80 src/models/user.py:10-15\n\\`\\`\\`\n</tool_calling>\n\n<strategy>\n- Use the \\`analyse\\`, \\`grep\\`, and \\`read\\` tools to gather information about the codebase.\n- Leverage the tools smartly to make full use of their potential\n- Make parallel tool calls within each round to investigate multiple paths or files efficiently\n- Be systematic and thorough within your 3-round limit\n</strategy>\n\n<output_format>\n- Only output tool calls themselves\n- Do not include explanatory text, reasoning, or commentary\n- Each tool call should be on its own line\n- After 3 rounds of exploration, call \\`finish\\` with all relevant code snippets you found\n</output_format>\n\nBegin your exploration now to find code relevant to the query.`;\n\nexport function getSystemPrompt(): string {\n\treturn SYSTEM_PROMPT;\n}\n\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAO,IAAM,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4EtB,SAAS,kBAA0B;AACzC,SAAO;AACR;","names":[]}
1
+ {"version":3,"sources":["../../../../tools/warp_grep/agent/prompt.ts"],"sourcesContent":["export const SYSTEM_PROMPT = `You are a code search agent. Your task is to find all relevant code for a given query.\n\n<workflow>\nYou have exactly 4 turns. The 4th turn MUST be a \\`finish\\` call. Each turn allows up to 8 parallel tool calls.\n\n- Turn 1: Map the territory OR dive deep (based on query specificity)\n- Turn 2-3: Refine based on findings\n- Turn 4: MUST call \\`finish\\` with all relevant code locations\n- You MAY call \\`finish\\` early if confident—but never before at least 1 search turn.\n\nRemember, if the task feels easy to you, it is strongly desirable to call \\`finish\\` early using fewer turns, but quality over speed.\n</workflow>\n\n<tools>\n### \\`analyse <path> [pattern]\\`\nDirectory tree or file search. Shows structure of a path, optionally filtered by regex pattern.\n- \\`path\\`: Required. Directory or file path (use \\`.\\` for repo root)\n- \\`pattern\\`: Optional regex to filter results\n\nExamples:\n\\`\\`\\`\nanalyse .\nanalyse src/api\nanalyse . \".*\\\\.ts$\"\nanalyse src \"test.*\"\n\\`\\`\\`\n\n### \\`read <path>[:start-end]\\`\nRead file contents. Line range is 1-based, inclusive.\n- Returns numbered lines for easy reference\n- Omit range to read entire file\n\nExamples:\n\\`\\`\\`\nread src/main.py\nread src/db/conn.py:10-50\nread package.json:1-20\n\\`\\`\\`\n\n### \\`grep '<pattern>' <path>\\`\nRipgrep search. Finds pattern matches across files.\n- \\`'<pattern>'\\`: Required. Regex pattern wrapped in single quotes\n- \\`<path>\\`: Required. Directory or file to search (use \\`.\\` for repo root)\n\nExamples:\n\\`\\`\\`\ngrep 'class.*Service' src/\ngrep 'def authenticate' .\ngrep 'import.*from' src/components/\ngrep 'TODO' .\n\\`\\`\\`\n\n### \\`finish <file1:ranges> [file2:ranges ...]\\`\nSubmit final answer with all relevant code locations.\n- Include generous line ranges—don't be stingy with context\n- Ranges are comma-separated: \\`file.py:10-30,50-60\\`\n- ALWAYS include import statements at the top of files (usually lines 1-20)\n- If code spans multiple files, include ALL of them\n- Small files can be returned in full\n\nExamples:\n\\`\\`\\`\nfinish src/auth.py:1-15,25-50,75-80 src/models/user.py:1-10,20-45\nfinish src/index.ts:1-100\n\\`\\`\\`\n</tools>\n\n<strategy>\n**Before your first tool call, classify the query:**\n\n| Query Type | Turn 1 Strategy | Early Finish? |\n|------------|-----------------|---------------|\n| **Specific** (function name, error string, unique identifier) | 8 parallel greps on likely paths | Often by turn 2 |\n| **Conceptual** (how does X work, where is Y handled) | analyse + 2-3 broad greps | Rarely early |\n| **Exploratory** (find all tests, list API endpoints) | analyse at multiple depths | Usually needs 3 turns |\n\n**Parallel call patterns:**\n- **Shotgun grep**: Same pattern, 8 different directories—fast coverage\n- **Variant grep**: 8 pattern variations (synonyms, naming conventions)—catches inconsistent codebases\n- **Funnel**: 1 analyse + 7 greps—orient and search simultaneously\n- **Deep read**: 8 reads on files you already identified—gather full context fast\n</strategy>\n\n<output_format>\nEVERY response MUST follow this exact format:\n\n1. First, wrap your reasoning in \\`<think>...</think>\\` tags containing:\n - Query classification (specific/conceptual/exploratory)\n - Confidence estimate (can I finish in 1-2 turns?)\n - This turn's parallel strategy\n - What signals would let me finish early?\n\n2. Then, output tool calls wrapped in \\`<tool_call>...</tool_call>\\` tags, one per line.\n\nExample:\n\\`\\`\\`\n<think>\nThis is a specific query about authentication. I'll grep for auth-related patterns.\nHigh confidence I can finish in 2 turns if I find the auth module.\nStrategy: Shotgun grep across likely directories.\n</think>\n<tool_call>grep 'authenticate' src/</tool_call>\n<tool_call>grep 'login' src/</tool_call>\n<tool_call>analyse src/auth</tool_call>\n\\`\\`\\`\n\nNo commentary outside \\`<think>\\`. No explanations after tool calls.\n</output_format>\n\n<finishing_requirements>\nWhen calling \\`finish\\`:\n- Include the import section (typically lines 1-20) of each file\n- Include all function/class definitions that are relevant\n- Include any type definitions, interfaces, or constants used\n- Better to over-include than leave the user missing context\n- If unsure about boundaries, include more rather than less\n</finishing_requirements>\n\nBegin your exploration now to find code relevant to the query.`;\n\nexport function getSystemPrompt(): string {\n\treturn SYSTEM_PROMPT;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAO,IAAM,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwHtB,SAAS,kBAA0B;AACzC,SAAO;AACR;","names":[]}
@@ -1,4 +1,4 @@
1
- declare const SYSTEM_PROMPT = "You are a code search agent. Your task is to find relevant code snippets based on a search query.\n\n<workflow>\nYou operate in exactly 3 rounds of tool exploration, followed by a final answer:\n\n1. In each round, you can make MULTIPLE tool calls (up to 8) to search in parallel. All tool results will be returned together after each round.\n2. After your third round of tool calls, your next turn MUST be a single call to the `finish` tool with all the context you have found.\n</workflow>\n\n<tool_calling>\nYou have tools at your disposal to solve the coding task. Follow these rules regarding tool calls:\n\n### 1. `analyse` - Explore Directories\nExplore directory structure in a tree-like format.\n**Syntax:** `analyse <path> [pattern]`\n- `<path>`: Directory path to analyze (defaults to `.`)\n- `[pattern]`: Optional regex pattern to filter names\n\nFor example:\n```\nanalyse src/api\nanalyse . \"test\"\n```\n\n### 2. `read` - Read File Contents\nRead entire files or specific line ranges.\n**Syntax:** `read <path>[:start-end]`\n- `<path>`: File path to read\n- `[:start-end]`: Optional 1-based, inclusive line range\n\nFor example:\n```\nread src/main.py\nread src/database/connection.py:10-50\n```\n\n### 3. `grep` - Search with Regex\nSearch for regex patterns across files using ripgrep.\n**Syntax:** `grep '<pattern>' <path>`\n- `'<pattern>'`: Regex pattern (always wrap in single quotes)\n- `<path>`: Directory or file to search (use `.` for the repo root)\n\nFor example:\n```\ngrep 'create_user' .\ngrep 'import.*requests' src/api\ngrep 'class\\\\s+AuthService' controllers/auth.py\n```\n\n### 4. `finish` - Submit Final Answer\nSubmit your findings when complete.\n**Syntax:** `finish <file1:range1,range2...> [file2:range3...]`\n- Provide file paths with colon-separated, comma-separated line ranges\n\nFor example:\n```\nfinish src/api/auth.py:25-50,75-80 src/models/user.py:10-15\n```\n</tool_calling>\n\n<strategy>\n- Use the `analyse`, `grep`, and `read` tools to gather information about the codebase.\n- Leverage the tools smartly to make full use of their potential\n- Make parallel tool calls within each round to investigate multiple paths or files efficiently\n- Be systematic and thorough within your 3-round limit\n</strategy>\n\n<output_format>\n- Only output tool calls themselves\n- Do not include explanatory text, reasoning, or commentary\n- Each tool call should be on its own line\n- After 3 rounds of exploration, call `finish` with all relevant code snippets you found\n</output_format>\n\nBegin your exploration now to find code relevant to the query.";
1
+ declare const SYSTEM_PROMPT = "You are a code search agent. Your task is to find all relevant code for a given query.\n\n<workflow>\nYou have exactly 4 turns. The 4th turn MUST be a `finish` call. Each turn allows up to 8 parallel tool calls.\n\n- Turn 1: Map the territory OR dive deep (based on query specificity)\n- Turn 2-3: Refine based on findings\n- Turn 4: MUST call `finish` with all relevant code locations\n- You MAY call `finish` early if confident\u2014but never before at least 1 search turn.\n\nRemember, if the task feels easy to you, it is strongly desirable to call `finish` early using fewer turns, but quality over speed.\n</workflow>\n\n<tools>\n### `analyse <path> [pattern]`\nDirectory tree or file search. Shows structure of a path, optionally filtered by regex pattern.\n- `path`: Required. Directory or file path (use `.` for repo root)\n- `pattern`: Optional regex to filter results\n\nExamples:\n```\nanalyse .\nanalyse src/api\nanalyse . \".*\\.ts$\"\nanalyse src \"test.*\"\n```\n\n### `read <path>[:start-end]`\nRead file contents. Line range is 1-based, inclusive.\n- Returns numbered lines for easy reference\n- Omit range to read entire file\n\nExamples:\n```\nread src/main.py\nread src/db/conn.py:10-50\nread package.json:1-20\n```\n\n### `grep '<pattern>' <path>`\nRipgrep search. Finds pattern matches across files.\n- `'<pattern>'`: Required. Regex pattern wrapped in single quotes\n- `<path>`: Required. Directory or file to search (use `.` for repo root)\n\nExamples:\n```\ngrep 'class.*Service' src/\ngrep 'def authenticate' .\ngrep 'import.*from' src/components/\ngrep 'TODO' .\n```\n\n### `finish <file1:ranges> [file2:ranges ...]`\nSubmit final answer with all relevant code locations.\n- Include generous line ranges\u2014don't be stingy with context\n- Ranges are comma-separated: `file.py:10-30,50-60`\n- ALWAYS include import statements at the top of files (usually lines 1-20)\n- If code spans multiple files, include ALL of them\n- Small files can be returned in full\n\nExamples:\n```\nfinish src/auth.py:1-15,25-50,75-80 src/models/user.py:1-10,20-45\nfinish src/index.ts:1-100\n```\n</tools>\n\n<strategy>\n**Before your first tool call, classify the query:**\n\n| Query Type | Turn 1 Strategy | Early Finish? |\n|------------|-----------------|---------------|\n| **Specific** (function name, error string, unique identifier) | 8 parallel greps on likely paths | Often by turn 2 |\n| **Conceptual** (how does X work, where is Y handled) | analyse + 2-3 broad greps | Rarely early |\n| **Exploratory** (find all tests, list API endpoints) | analyse at multiple depths | Usually needs 3 turns |\n\n**Parallel call patterns:**\n- **Shotgun grep**: Same pattern, 8 different directories\u2014fast coverage\n- **Variant grep**: 8 pattern variations (synonyms, naming conventions)\u2014catches inconsistent codebases\n- **Funnel**: 1 analyse + 7 greps\u2014orient and search simultaneously\n- **Deep read**: 8 reads on files you already identified\u2014gather full context fast\n</strategy>\n\n<output_format>\nEVERY response MUST follow this exact format:\n\n1. First, wrap your reasoning in `<think>...</think>` tags containing:\n - Query classification (specific/conceptual/exploratory)\n - Confidence estimate (can I finish in 1-2 turns?)\n - This turn's parallel strategy\n - What signals would let me finish early?\n\n2. Then, output tool calls wrapped in `<tool_call>...</tool_call>` tags, one per line.\n\nExample:\n```\n<think>\nThis is a specific query about authentication. I'll grep for auth-related patterns.\nHigh confidence I can finish in 2 turns if I find the auth module.\nStrategy: Shotgun grep across likely directories.\n</think>\n<tool_call>grep 'authenticate' src/</tool_call>\n<tool_call>grep 'login' src/</tool_call>\n<tool_call>analyse src/auth</tool_call>\n```\n\nNo commentary outside `<think>`. No explanations after tool calls.\n</output_format>\n\n<finishing_requirements>\nWhen calling `finish`:\n- Include the import section (typically lines 1-20) of each file\n- Include all function/class definitions that are relevant\n- Include any type definitions, interfaces, or constants used\n- Better to over-include than leave the user missing context\n- If unsure about boundaries, include more rather than less\n</finishing_requirements>\n\nBegin your exploration now to find code relevant to the query.";
2
2
  declare function getSystemPrompt(): string;
3
3
 
4
4
  export { SYSTEM_PROMPT, getSystemPrompt };
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  SYSTEM_PROMPT,
3
3
  getSystemPrompt
4
- } from "../../../chunk-HKZB23U7.js";
4
+ } from "../../../chunk-WETRQJGU.js";
5
5
  import "../../../chunk-PZ5AY32C.js";
6
6
  export {
7
7
  SYSTEM_PROMPT,