anycodex 0.0.1 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/app/dist/assets/abap-BdImnpbu.js +1 -0
- package/dist/app/dist/assets/actionscript-3-CoDkCxhg.js +1 -0
- package/dist/app/dist/assets/ada-bCR0ucgS.js +1 -0
- package/dist/app/dist/assets/andromeeda-C4gqWexZ.js +1 -0
- package/dist/app/dist/assets/angular-html-DA-rfuFy.js +1 -0
- package/dist/app/dist/assets/angular-ts-BrjP3tb8.js +1 -0
- package/dist/app/dist/assets/apache-Pmp26Uib.js +1 -0
- package/dist/app/dist/assets/apex-D8_7TLub.js +1 -0
- package/dist/app/dist/assets/apl-CORt7UWP.js +1 -0
- package/dist/app/dist/assets/applescript-Co6uUVPk.js +1 -0
- package/dist/app/dist/assets/ara-BRHolxvo.js +1 -0
- package/dist/app/dist/assets/asciidoc-Ve4PFQV2.js +1 -0
- package/dist/app/dist/assets/asm-D_Q5rh1f.js +1 -0
- package/dist/app/dist/assets/astro-HNnZUWAn.js +1 -0
- package/dist/app/dist/assets/aurora-x-D-2ljcwZ.js +1 -0
- package/dist/app/dist/assets/awk-DMzUqQB5.js +1 -0
- package/dist/app/dist/assets/ayu-dark-DYE7WIF3.js +1 -0
- package/dist/app/dist/assets/ayu-light-BA47KaF1.js +1 -0
- package/dist/app/dist/assets/ayu-mirage-32ctXXKs.js +1 -0
- package/dist/app/dist/assets/ballerina-BFfxhgS-.js +1 -0
- package/dist/app/dist/assets/bat-BkioyH1T.js +1 -0
- package/dist/app/dist/assets/beancount-k_qm7-4y.js +1 -0
- package/dist/app/dist/assets/berry-uYugtg8r.js +1 -0
- package/dist/app/dist/assets/bibtex-CHM0blh-.js +1 -0
- package/dist/app/dist/assets/bicep-Bmn6On1c.js +1 -0
- package/dist/app/dist/assets/bird2-BIv1doCn.js +1 -0
- package/dist/app/dist/assets/blade-BjGOyj-B.js +1 -0
- package/dist/app/dist/assets/bsl-BO_Y6i37.js +1 -0
- package/dist/app/dist/assets/c-BIGW1oBm.js +1 -0
- package/dist/app/dist/assets/c3-eo99z4R2.js +1 -0
- package/dist/app/dist/assets/cadence-Bv_4Rxtq.js +1 -0
- package/dist/app/dist/assets/cairo-KRGpt6FW.js +1 -0
- package/dist/app/dist/assets/catppuccin-frappe-DFWUc33u.js +1 -0
- package/dist/app/dist/assets/catppuccin-latte-C9dUb6Cb.js +1 -0
- package/dist/app/dist/assets/catppuccin-macchiato-DQyhUUbL.js +1 -0
- package/dist/app/dist/assets/catppuccin-mocha-D87Tk5Gz.js +1 -0
- package/dist/app/dist/assets/clarity-D53aC0YG.js +1 -0
- package/dist/app/dist/assets/clojure-P80f7IUj.js +1 -0
- package/dist/app/dist/assets/cmake-D1j8_8rp.js +1 -0
- package/dist/app/dist/assets/cobol-nBiQ_Alo.js +1 -0
- package/dist/app/dist/assets/codeowners-Bp6g37R7.js +1 -0
- package/dist/app/dist/assets/codeql-DsOJ9woJ.js +1 -0
- package/dist/app/dist/assets/coffee-Ch7k5sss.js +1 -0
- package/dist/app/dist/assets/common-lisp-Cg-RD9OK.js +1 -0
- package/dist/app/dist/assets/coq-DkFqJrB1.js +1 -0
- package/dist/app/dist/assets/cpp-CofmeUqb.js +1 -0
- package/dist/app/dist/assets/crystal-DNxU26gB.js +1 -0
- package/dist/app/dist/assets/csharp-COcwbKMJ.js +1 -0
- package/dist/app/dist/assets/css-CLj8gQPS.js +1 -0
- package/dist/app/dist/assets/csv-fuZLfV_i.js +1 -0
- package/dist/app/dist/assets/cue-D82EKSYY.js +1 -0
- package/dist/app/dist/assets/cypher-COkxafJQ.js +1 -0
- package/dist/app/dist/assets/d-85-TOEBH.js +1 -0
- package/dist/app/dist/assets/dark-plus-C3mMm8J8.js +1 -0
- package/dist/app/dist/assets/dart-bE4Kk8sk.js +1 -0
- package/dist/app/dist/assets/dax-CEL-wOlO.js +1 -0
- package/dist/app/dist/assets/desktop-BmXAJ9_W.js +1 -0
- package/dist/app/dist/assets/diff-D97Zzqfu.js +1 -0
- package/dist/app/dist/assets/docker-BcOcwvcX.js +1 -0
- package/dist/app/dist/assets/dotenv-Da5cRb03.js +1 -0
- package/dist/app/dist/assets/dracula-BzJJZx-M.js +1 -0
- package/dist/app/dist/assets/dracula-soft-BXkSAIEj.js +1 -0
- package/dist/app/dist/assets/dream-maker-BtqSS_iP.js +1 -0
- package/dist/app/dist/assets/edge-FbVlp4U3.js +1 -0
- package/dist/app/dist/assets/elixir-CkH2-t6x.js +1 -0
- package/dist/app/dist/assets/elm-DbKCFpqz.js +1 -0
- package/dist/app/dist/assets/emacs-lisp-CXvaQtF9.js +1 -0
- package/dist/app/dist/assets/erb-BYCe7drp.js +1 -0
- package/dist/app/dist/assets/erlang-DsQrWhSR.js +1 -0
- package/dist/app/dist/assets/everforest-dark-BgDCqdQA.js +1 -0
- package/dist/app/dist/assets/everforest-light-C8M2exoo.js +1 -0
- package/dist/app/dist/assets/fennel-BYunw83y.js +1 -0
- package/dist/app/dist/assets/fish-BvzEVeQv.js +1 -0
- package/dist/app/dist/assets/fluent-C4IJs8-o.js +1 -0
- package/dist/app/dist/assets/fortran-fixed-form-CkoXwp7k.js +1 -0
- package/dist/app/dist/assets/fortran-free-form-BxgE0vQu.js +1 -0
- package/dist/app/dist/assets/fsharp-CXgrBDvD.js +1 -0
- package/dist/app/dist/assets/gdresource-BOOCDP_w.js +1 -0
- package/dist/app/dist/assets/gdscript-C5YyOfLZ.js +1 -0
- package/dist/app/dist/assets/gdshader-DkwncUOv.js +1 -0
- package/dist/app/dist/assets/genie-D0YGMca9.js +1 -0
- package/dist/app/dist/assets/gherkin-DyxjwDmM.js +1 -0
- package/dist/app/dist/assets/git-commit-F4YmCXRG.js +1 -0
- package/dist/app/dist/assets/git-rebase-r7XF79zn.js +1 -0
- package/dist/app/dist/assets/github-dark-DHJKELXO.js +1 -0
- package/dist/app/dist/assets/github-dark-default-Cuk6v7N8.js +1 -0
- package/dist/app/dist/assets/github-dark-dimmed-DH5Ifo-i.js +1 -0
- package/dist/app/dist/assets/github-dark-high-contrast-E3gJ1_iC.js +1 -0
- package/dist/app/dist/assets/github-light-DAi9KRSo.js +1 -0
- package/dist/app/dist/assets/github-light-default-D7oLnXFd.js +1 -0
- package/dist/app/dist/assets/github-light-high-contrast-BfjtVDDH.js +1 -0
- package/dist/app/dist/assets/gleam-BspZqrRM.js +1 -0
- package/dist/app/dist/assets/glimmer-js-ByusRIyA.js +1 -0
- package/dist/app/dist/assets/glimmer-ts-BfAWNZQY.js +1 -0
- package/dist/app/dist/assets/glsl-DplSGwfg.js +1 -0
- package/dist/app/dist/assets/gn-n2N0HUVH.js +1 -0
- package/dist/app/dist/assets/gnuplot-DdkO51Og.js +1 -0
- package/dist/app/dist/assets/go-C27-OAKa.js +1 -0
- package/dist/app/dist/assets/graphql-ChdNCCLP.js +1 -0
- package/dist/app/dist/assets/groovy-gcz8RCvz.js +1 -0
- package/dist/app/dist/assets/gruvbox-dark-hard-CFHQjOhq.js +1 -0
- package/dist/app/dist/assets/gruvbox-dark-medium-GsRaNv29.js +1 -0
- package/dist/app/dist/assets/gruvbox-dark-soft-CVdnzihN.js +1 -0
- package/dist/app/dist/assets/gruvbox-light-hard-CH1njM8p.js +1 -0
- package/dist/app/dist/assets/gruvbox-light-medium-DRw_LuNl.js +1 -0
- package/dist/app/dist/assets/gruvbox-light-soft-hJgmCMqR.js +1 -0
- package/dist/app/dist/assets/hack-i7_Ulhet.js +1 -0
- package/dist/app/dist/assets/haml-D5jkg6IW.js +1 -0
- package/dist/app/dist/assets/handlebars-BpdQsYii.js +1 -0
- package/dist/app/dist/assets/haskell-Df6bDoY_.js +1 -0
- package/dist/app/dist/assets/haxe-CzTSHFRz.js +1 -0
- package/dist/app/dist/assets/hcl-BWvSN4gD.js +1 -0
- package/dist/app/dist/assets/hjson-D5-asLiD.js +1 -0
- package/dist/app/dist/assets/hlsl-D3lLCCz7.js +1 -0
- package/dist/app/dist/assets/horizon-BUw7H-hv.js +1 -0
- package/dist/app/dist/assets/horizon-bright-CUuTKBJd.js +1 -0
- package/dist/app/dist/assets/houston-DnULxvSX.js +1 -0
- package/dist/app/dist/assets/html-derivative-DlHx6ybY.js +1 -0
- package/dist/app/dist/assets/html-pp8916En.js +1 -0
- package/dist/app/dist/assets/http-jrhK8wxY.js +1 -0
- package/dist/app/dist/assets/hurl-irOxFIW8.js +1 -0
- package/dist/app/dist/assets/hxml-Bvhsp5Yf.js +1 -0
- package/dist/app/dist/assets/hy-DFXneXwc.js +1 -0
- package/dist/app/dist/assets/imba-DGztddWO.js +1 -0
- package/dist/app/dist/assets/index-BBYiZjoU.js +238 -0
- package/dist/app/dist/assets/index-DU8KNBjo.css +32 -0
- package/dist/app/dist/assets/ini-BEwlwnbL.js +1 -0
- package/dist/app/dist/assets/java-CylS5w8V.js +1 -0
- package/dist/app/dist/assets/javascript-wDzz0qaB.js +1 -0
- package/dist/app/dist/assets/jinja-f2NsQr07.js +1 -0
- package/dist/app/dist/assets/jison-wvAkD_A8.js +1 -0
- package/dist/app/dist/assets/json-Cp-IABpG.js +1 -0
- package/dist/app/dist/assets/json5-C9tS-k6U.js +1 -0
- package/dist/app/dist/assets/jsonc-Des-eS-w.js +1 -0
- package/dist/app/dist/assets/jsonl-DcaNXYhu.js +1 -0
- package/dist/app/dist/assets/jsonnet-DFQXde-d.js +1 -0
- package/dist/app/dist/assets/jssm-C2t-YnRu.js +1 -0
- package/dist/app/dist/assets/jsx-g9-lgVsj.js +1 -0
- package/dist/app/dist/assets/julia-CxzCAyBv.js +1 -0
- package/dist/app/dist/assets/just-VxiPbLrw.js +1 -0
- package/dist/app/dist/assets/kanagawa-dragon-CkXjmgJE.js +1 -0
- package/dist/app/dist/assets/kanagawa-lotus-CfQXZHmo.js +1 -0
- package/dist/app/dist/assets/kanagawa-wave-DWedfzmr.js +1 -0
- package/dist/app/dist/assets/kdl-DV7GczEv.js +1 -0
- package/dist/app/dist/assets/kotlin-BdnUsdx6.js +1 -0
- package/dist/app/dist/assets/kusto-wEQ09or8.js +1 -0
- package/dist/app/dist/assets/laserwave-DUszq2jm.js +1 -0
- package/dist/app/dist/assets/latex-CWtU0Tv5.js +1 -0
- package/dist/app/dist/assets/lean-BZvkOJ9d.js +1 -0
- package/dist/app/dist/assets/less-B1dDrJ26.js +1 -0
- package/dist/app/dist/assets/light-plus-B7mTdjB0.js +1 -0
- package/dist/app/dist/assets/liquid-C0sCDyMI.js +1 -0
- package/dist/app/dist/assets/llvm-DjAJT7YJ.js +1 -0
- package/dist/app/dist/assets/log-2UxHyX5q.js +1 -0
- package/dist/app/dist/assets/logo-BtOb2qkB.js +1 -0
- package/dist/app/dist/assets/lua-BaeVxFsk.js +1 -0
- package/dist/app/dist/assets/luau-C-HG3fhB.js +1 -0
- package/dist/app/dist/assets/make-CHLpvVh8.js +1 -0
- package/dist/app/dist/assets/markdown-Cvjx9yec.js +1 -0
- package/dist/app/dist/assets/marko-DjSrsDqO.js +1 -0
- package/dist/app/dist/assets/material-theme-D5KoaKCx.js +1 -0
- package/dist/app/dist/assets/material-theme-darker-BfHTSMKl.js +1 -0
- package/dist/app/dist/assets/material-theme-lighter-B0m2ddpp.js +1 -0
- package/dist/app/dist/assets/material-theme-ocean-CyktbL80.js +1 -0
- package/dist/app/dist/assets/material-theme-palenight-Csfq5Kiy.js +1 -0
- package/dist/app/dist/assets/matlab-D7o27uSR.js +1 -0
- package/dist/app/dist/assets/mdc-DTYItulj.js +1 -0
- package/dist/app/dist/assets/mdx-Cmh6b_Ma.js +1 -0
- package/dist/app/dist/assets/mermaid-mWjccvbQ.js +1 -0
- package/dist/app/dist/assets/min-dark-CafNBF8u.js +1 -0
- package/dist/app/dist/assets/min-light-CTRr51gU.js +1 -0
- package/dist/app/dist/assets/mipsasm-CKIfxQSi.js +1 -0
- package/dist/app/dist/assets/mojo-rZm6bMo-.js +1 -0
- package/dist/app/dist/assets/monokai-D4h5O-jR.js +1 -0
- package/dist/app/dist/assets/moonbit-_H4v1dQx.js +1 -0
- package/dist/app/dist/assets/move-IF9eRakj.js +1 -0
- package/dist/app/dist/assets/narrat-DRg8JJMk.js +1 -0
- package/dist/app/dist/assets/nextflow-C-mBbutL.js +1 -0
- package/dist/app/dist/assets/nextflow-groovy-vE_lwT2v.js +1 -0
- package/dist/app/dist/assets/nginx-BpAMiNFr.js +1 -0
- package/dist/app/dist/assets/night-owl-C39BiMTA.js +1 -0
- package/dist/app/dist/assets/night-owl-light-CMTm3GFP.js +1 -0
- package/dist/app/dist/assets/nim-BIad80T-.js +1 -0
- package/dist/app/dist/assets/nix-CwoSXNpI.js +1 -0
- package/dist/app/dist/assets/nord-Ddv68eIx.js +1 -0
- package/dist/app/dist/assets/nushell-Cz2AlsmD.js +1 -0
- package/dist/app/dist/assets/objective-c-DXmwc3jG.js +1 -0
- package/dist/app/dist/assets/objective-cpp-CLxacb5B.js +1 -0
- package/dist/app/dist/assets/ocaml-C0hk2d4L.js +1 -0
- package/dist/app/dist/assets/odin-BBf5iR-q.js +1 -0
- package/dist/app/dist/assets/one-dark-pro-DVMEJ2y_.js +1 -0
- package/dist/app/dist/assets/one-light-C3Wv6jpd.js +1 -0
- package/dist/app/dist/assets/openscad-C4EeE6gA.js +1 -0
- package/dist/app/dist/assets/pascal-D93ZcfNL.js +1 -0
- package/dist/app/dist/assets/perl-NvoQZIq0.js +1 -0
- package/dist/app/dist/assets/php-R6g_5hLQ.js +1 -0
- package/dist/app/dist/assets/pkl-u5AG7uiY.js +1 -0
- package/dist/app/dist/assets/plastic-3e1v2bzS.js +1 -0
- package/dist/app/dist/assets/plsql-ChMvpjG-.js +1 -0
- package/dist/app/dist/assets/po-BTJTHyun.js +1 -0
- package/dist/app/dist/assets/poimandres-CS3Unz2-.js +1 -0
- package/dist/app/dist/assets/polar-C0HS_06l.js +1 -0
- package/dist/app/dist/assets/postcss-CXtECtnM.js +1 -0
- package/dist/app/dist/assets/powerquery-CEu0bR-o.js +1 -0
- package/dist/app/dist/assets/powershell-Dpen1YoG.js +1 -0
- package/dist/app/dist/assets/prisma-Dd19v3D-.js +1 -0
- package/dist/app/dist/assets/prolog-CbFg5uaA.js +1 -0
- package/dist/app/dist/assets/proto-C7zT0LnQ.js +1 -0
- package/dist/app/dist/assets/pug-DKIMFp6K.js +1 -0
- package/dist/app/dist/assets/puppet-BMWR74SV.js +1 -0
- package/dist/app/dist/assets/purescript-CklMAg4u.js +1 -0
- package/dist/app/dist/assets/python-B6aJPvgy.js +1 -0
- package/dist/app/dist/assets/qml-3beO22l8.js +1 -0
- package/dist/app/dist/assets/qmldir-C8lEn-DE.js +1 -0
- package/dist/app/dist/assets/qss-IeuSbFQv.js +1 -0
- package/dist/app/dist/assets/r-Dspwwk_N.js +1 -0
- package/dist/app/dist/assets/racket-BqYA7rlc.js +1 -0
- package/dist/app/dist/assets/raku-DXvB9xmW.js +1 -0
- package/dist/app/dist/assets/razor-BDqjjVU7.js +1 -0
- package/dist/app/dist/assets/red-bN70gL4F.js +1 -0
- package/dist/app/dist/assets/reg-C-SQnVFl.js +1 -0
- package/dist/app/dist/assets/regexp-CDVJQ6XC.js +1 -0
- package/dist/app/dist/assets/rel-C3B-1QV4.js +1 -0
- package/dist/app/dist/assets/riscv-BM1_JUlF.js +1 -0
- package/dist/app/dist/assets/ron-D8l8udqQ.js +1 -0
- package/dist/app/dist/assets/rose-pine-dawn-DHQR4-dF.js +1 -0
- package/dist/app/dist/assets/rose-pine-moon-D4_iv3hh.js +1 -0
- package/dist/app/dist/assets/rose-pine-qdsjHGoJ.js +1 -0
- package/dist/app/dist/assets/rosmsg-BJDFO7_C.js +1 -0
- package/dist/app/dist/assets/rst-CRjBmOyv.js +1 -0
- package/dist/app/dist/assets/ruby-Wjq7vjNf.js +1 -0
- package/dist/app/dist/assets/rust-B1yitclQ.js +1 -0
- package/dist/app/dist/assets/sas-cz2c8ADy.js +1 -0
- package/dist/app/dist/assets/sass-Cj5Yp3dK.js +1 -0
- package/dist/app/dist/assets/scala-C151Ov-r.js +1 -0
- package/dist/app/dist/assets/scheme-C98Dy4si.js +1 -0
- package/dist/app/dist/assets/scss-D5BDwBP9.js +1 -0
- package/dist/app/dist/assets/sdbl-DVxCFoDh.js +1 -0
- package/dist/app/dist/assets/shaderlab-Dg9Lc6iA.js +1 -0
- package/dist/app/dist/assets/shellscript-Yzrsuije.js +1 -0
- package/dist/app/dist/assets/shellsession-BADoaaVG.js +1 -0
- package/dist/app/dist/assets/slack-dark-BthQWCQV.js +1 -0
- package/dist/app/dist/assets/slack-ochin-DqwNpetd.js +1 -0
- package/dist/app/dist/assets/smalltalk-BERRCDM3.js +1 -0
- package/dist/app/dist/assets/snazzy-light-Bw305WKR.js +1 -0
- package/dist/app/dist/assets/solarized-dark-DXbdFlpD.js +1 -0
- package/dist/app/dist/assets/solarized-light-L9t79GZl.js +1 -0
- package/dist/app/dist/assets/solidity-rGO070M0.js +1 -0
- package/dist/app/dist/assets/soy-8wufbnw4.js +1 -0
- package/dist/app/dist/assets/sparql-rVzFXLq3.js +1 -0
- package/dist/app/dist/assets/splunk-BtCnVYZw.js +1 -0
- package/dist/app/dist/assets/sql-BLtJtn59.js +1 -0
- package/dist/app/dist/assets/ssh-config-_ykCGR6B.js +1 -0
- package/dist/app/dist/assets/stata-BH5u7GGu.js +1 -0
- package/dist/app/dist/assets/stylus-BEDo0Tqx.js +1 -0
- package/dist/app/dist/assets/surrealql-Bq5Q-fJD.js +1 -0
- package/dist/app/dist/assets/svelte-Cy7k_4gC.js +1 -0
- package/dist/app/dist/assets/swift-D82vCrfD.js +1 -0
- package/dist/app/dist/assets/synthwave-84-CbfX1IO0.js +1 -0
- package/dist/app/dist/assets/system-verilog-CnnmHF94.js +1 -0
- package/dist/app/dist/assets/systemd-4A_iFExJ.js +1 -0
- package/dist/app/dist/assets/talonscript-CkByrt1z.js +1 -0
- package/dist/app/dist/assets/tasl-QIJgUcNo.js +1 -0
- package/dist/app/dist/assets/tcl-dwOrl1Do.js +1 -0
- package/dist/app/dist/assets/templ-DhtptRzy.js +1 -0
- package/dist/app/dist/assets/terraform-BETggiCN.js +1 -0
- package/dist/app/dist/assets/tex-idrVyKtj.js +1 -0
- package/dist/app/dist/assets/tokyo-night-hegEt444.js +1 -0
- package/dist/app/dist/assets/toml-vGWfd6FD.js +1 -0
- package/dist/app/dist/assets/ts-tags-DQrlYJgV.js +1 -0
- package/dist/app/dist/assets/tsv-B_m7g4N7.js +1 -0
- package/dist/app/dist/assets/tsx-COt5Ahok.js +1 -0
- package/dist/app/dist/assets/turtle-BsS91CYL.js +1 -0
- package/dist/app/dist/assets/twig-xg9kU7Mw.js +1 -0
- package/dist/app/dist/assets/typescript-BPQ3VLAy.js +1 -0
- package/dist/app/dist/assets/typespec-CAFt9gP4.js +1 -0
- package/dist/app/dist/assets/typst-DHCkPAjA.js +1 -0
- package/dist/app/dist/assets/v-BcVCzyr7.js +1 -0
- package/dist/app/dist/assets/vala-CsfeWuGM.js +1 -0
- package/dist/app/dist/assets/vb-D17OF-Vu.js +1 -0
- package/dist/app/dist/assets/verilog-BQ8w6xss.js +1 -0
- package/dist/app/dist/assets/vesper-DU1UobuO.js +1 -0
- package/dist/app/dist/assets/vhdl-CeAyd5Ju.js +1 -0
- package/dist/app/dist/assets/viml-CJc9bBzg.js +1 -0
- package/dist/app/dist/assets/vitesse-black-Bkuqu6BP.js +1 -0
- package/dist/app/dist/assets/vitesse-dark-D0r3Knsf.js +1 -0
- package/dist/app/dist/assets/vitesse-light-CVO1_9PV.js +1 -0
- package/dist/app/dist/assets/vue-D2xRrEX4.js +1 -0
- package/dist/app/dist/assets/vue-html-AaS7Mt5G.js +1 -0
- package/dist/app/dist/assets/vue-vine-BoDAl6tE.js +1 -0
- package/dist/app/dist/assets/vyper-CDx5xZoG.js +1 -0
- package/dist/app/dist/assets/wasm-CG6Dc4jp.js +1 -0
- package/dist/app/dist/assets/wasm-MzD3tlZU.js +1 -0
- package/dist/app/dist/assets/wenyan-BV7otONQ.js +1 -0
- package/dist/app/dist/assets/wgsl-Dx-B1_4e.js +1 -0
- package/dist/app/dist/assets/wikitext-BhOHFoWU.js +1 -0
- package/dist/app/dist/assets/wit-5i3qLPDT.js +1 -0
- package/dist/app/dist/assets/wolfram-lXgVvXCa.js +1 -0
- package/dist/app/dist/assets/xml-sdJ4AIDG.js +1 -0
- package/dist/app/dist/assets/xsl-CtQFsRM5.js +1 -0
- package/dist/app/dist/assets/yaml-Buea-lGh.js +1 -0
- package/dist/app/dist/assets/zenscript-DVFEvuxE.js +1 -0
- package/dist/app/dist/assets/zig-VOosw3JB.js +1 -0
- package/dist/app/dist/index.html +13 -0
- package/dist/bin.js +534 -0
- package/dist/chunk-6DDCZPRP.js +1474 -0
- package/dist/chunk-HULFFF7R.js +25 -0
- package/dist/chunk-KB4MYP36.js +8 -0
- package/dist/chunk-P23NZBHE.js +5130 -0
- package/dist/chunk-TDYQG5PB.js +1621 -0
- package/dist/chunk-XU2SE4LZ.js +3864 -0
- package/dist/chunk-XXVDY7BO.js +53 -0
- package/dist/chunk-ZXPWUU3S.js +110 -0
- package/dist/dist-GBG4FGPZ.js +1806 -0
- package/dist/dist-XGPL3ARQ.js +94 -0
- package/dist/glob-RMMK6FVW-M34I5XTP.js +8 -0
- package/dist/registry-VQVHAEPP-MGYPDXYJ.js +13 -0
- package/dist/schema-SOLWPA3E-CEUVS57H.js +13 -0
- package/dist/session-Q7S3ITUV-CECGQJC5.js +18 -0
- package/dist/storage-YSEEBFV7-N75L4HBF.js +28 -0
- package/package.json +77 -1
- package/index.js +0 -1
|
@@ -0,0 +1,3864 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import {
|
|
3
|
+
LSP,
|
|
4
|
+
MessageV2,
|
|
5
|
+
ModelID,
|
|
6
|
+
ProviderID,
|
|
7
|
+
Todo,
|
|
8
|
+
iife
|
|
9
|
+
} from "./chunk-TDYQG5PB.js";
|
|
10
|
+
import {
|
|
11
|
+
Identifier,
|
|
12
|
+
MessageID,
|
|
13
|
+
PartID
|
|
14
|
+
} from "./chunk-ZXPWUU3S.js";
|
|
15
|
+
import {
|
|
16
|
+
Filesystem,
|
|
17
|
+
Flag,
|
|
18
|
+
Log,
|
|
19
|
+
NamedError,
|
|
20
|
+
basename,
|
|
21
|
+
dirname,
|
|
22
|
+
extname,
|
|
23
|
+
isAbsolute,
|
|
24
|
+
join,
|
|
25
|
+
relative,
|
|
26
|
+
resolve
|
|
27
|
+
} from "./chunk-6DDCZPRP.js";
|
|
28
|
+
import {
|
|
29
|
+
Glob
|
|
30
|
+
} from "./chunk-HULFFF7R.js";
|
|
31
|
+
import {
|
|
32
|
+
withStatics
|
|
33
|
+
} from "./chunk-KB4MYP36.js";
|
|
34
|
+
import {
|
|
35
|
+
__callDispose,
|
|
36
|
+
__using
|
|
37
|
+
} from "./chunk-XXVDY7BO.js";
|
|
38
|
+
|
|
39
|
+
// ../agent/dist/chunk-CJTWJATG.js
|
|
40
|
+
import z3 from "zod";
|
|
41
|
+
import z2 from "zod";
|
|
42
|
+
import { Schema } from "effect";
|
|
43
|
+
import z from "zod";
|
|
44
|
+
import z4 from "zod";
|
|
45
|
+
import z5 from "zod";
|
|
46
|
+
import { createTwoFilesPatch, diffLines } from "diff";
|
|
47
|
+
import z6 from "zod";
|
|
48
|
+
import z7 from "zod";
|
|
49
|
+
import z8 from "zod";
|
|
50
|
+
import z9 from "zod";
|
|
51
|
+
import z10 from "zod";
|
|
52
|
+
import z11 from "zod";
|
|
53
|
+
import z12 from "zod";
|
|
54
|
+
import TurndownService from "turndown";
|
|
55
|
+
import z13 from "zod";
|
|
56
|
+
import { createTwoFilesPatch as createTwoFilesPatch2 } from "diff";
|
|
57
|
+
import z14 from "zod";
|
|
58
|
+
import { pathToFileURL as pathToFileURL2 } from "url";
|
|
59
|
+
import z17 from "zod";
|
|
60
|
+
import z16 from "zod";
|
|
61
|
+
import matter from "gray-matter";
|
|
62
|
+
import { z as z15 } from "zod";
|
|
63
|
+
import { pathToFileURL } from "url";
|
|
64
|
+
import z25 from "zod";
|
|
65
|
+
import z18 from "zod";
|
|
66
|
+
import z19 from "zod";
|
|
67
|
+
import z21 from "zod";
|
|
68
|
+
import z20 from "zod";
|
|
69
|
+
import { createTwoFilesPatch as createTwoFilesPatch3, diffLines as diffLines2 } from "diff";
|
|
70
|
+
import z22 from "zod";
|
|
71
|
+
import z23 from "zod";
|
|
72
|
+
import z24 from "zod";
|
|
73
|
+
var SchedulerService = class {
|
|
74
|
+
log = Log.create({ service: "scheduler" });
|
|
75
|
+
tasks = /* @__PURE__ */ new Map();
|
|
76
|
+
timers = /* @__PURE__ */ new Map();
|
|
77
|
+
register(task) {
|
|
78
|
+
const current = this.timers.get(task.id);
|
|
79
|
+
if (current) clearInterval(current);
|
|
80
|
+
this.tasks.set(task.id, task);
|
|
81
|
+
void this.run(task);
|
|
82
|
+
const timer = setInterval(() => {
|
|
83
|
+
void this.run(task);
|
|
84
|
+
}, task.interval);
|
|
85
|
+
timer.unref();
|
|
86
|
+
this.timers.set(task.id, timer);
|
|
87
|
+
}
|
|
88
|
+
async run(task) {
|
|
89
|
+
this.log.info("run", { id: task.id });
|
|
90
|
+
await task.run().catch((error) => {
|
|
91
|
+
this.log.error("run failed", { id: task.id, error });
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
dispose() {
|
|
95
|
+
for (const timer of this.timers.values()) {
|
|
96
|
+
clearInterval(timer);
|
|
97
|
+
}
|
|
98
|
+
this.tasks.clear();
|
|
99
|
+
this.timers.clear();
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
var shared = new SchedulerService();
|
|
103
|
+
var Scheduler;
|
|
104
|
+
((Scheduler2) => {
|
|
105
|
+
function state(context) {
|
|
106
|
+
if (context.scheduler) return context.scheduler;
|
|
107
|
+
return context.scheduler;
|
|
108
|
+
}
|
|
109
|
+
function register(context, task) {
|
|
110
|
+
const scope = task.scope ?? "instance";
|
|
111
|
+
const svc = scope === "global" ? shared : context.scheduler;
|
|
112
|
+
if (scope === "global" && svc === shared) {
|
|
113
|
+
const existing = shared.timers?.get(task.id);
|
|
114
|
+
if (existing) return;
|
|
115
|
+
}
|
|
116
|
+
svc.register(task);
|
|
117
|
+
}
|
|
118
|
+
Scheduler2.register = register;
|
|
119
|
+
})(Scheduler || (Scheduler = {}));
|
|
120
|
+
var toolIdSchema = Schema.String.pipe(Schema.brand("ToolID"));
|
|
121
|
+
var ToolID = toolIdSchema.pipe(
|
|
122
|
+
withStatics((schema) => ({
|
|
123
|
+
make: (id) => schema.makeUnsafe(id),
|
|
124
|
+
ascending: (id) => schema.makeUnsafe(Identifier.ascending("tool", id)),
|
|
125
|
+
zod: Identifier.schema("tool").pipe(z.custom())
|
|
126
|
+
}))
|
|
127
|
+
);
|
|
128
|
+
var Truncate;
|
|
129
|
+
((Truncate2) => {
|
|
130
|
+
Truncate2.MAX_LINES = 2e3;
|
|
131
|
+
Truncate2.MAX_BYTES = 50 * 1024;
|
|
132
|
+
function dir(context) {
|
|
133
|
+
return join(context.dataPath, "tool-output");
|
|
134
|
+
}
|
|
135
|
+
Truncate2.dir = dir;
|
|
136
|
+
function glob(context) {
|
|
137
|
+
return context ? join(dir(context), "*") : join("tool-output", "*");
|
|
138
|
+
}
|
|
139
|
+
Truncate2.glob = glob;
|
|
140
|
+
const RETENTION_MS = 7 * 24 * 60 * 60 * 1e3;
|
|
141
|
+
const HOUR_MS = 60 * 60 * 1e3;
|
|
142
|
+
function init(context) {
|
|
143
|
+
Scheduler.register(context, {
|
|
144
|
+
id: "tool.truncation.cleanup",
|
|
145
|
+
interval: HOUR_MS,
|
|
146
|
+
run: cleanup,
|
|
147
|
+
scope: "global"
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
Truncate2.init = init;
|
|
151
|
+
async function cleanup(context) {
|
|
152
|
+
if (!context) return;
|
|
153
|
+
const cutoff = Identifier.timestamp(Identifier.create("tool", false, Date.now() - RETENTION_MS));
|
|
154
|
+
const entries = await Glob.scan(context, "tool_*", { cwd: dir(context), include: "file" }).catch(() => []);
|
|
155
|
+
for (const entry of entries) {
|
|
156
|
+
if (Identifier.timestamp(entry) >= cutoff) continue;
|
|
157
|
+
await Filesystem.remove(context, join(dir(context), entry)).catch(() => {
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
Truncate2.cleanup = cleanup;
|
|
162
|
+
async function output(context, text, options = {}, agent) {
|
|
163
|
+
const maxLines = options.maxLines ?? Truncate2.MAX_LINES;
|
|
164
|
+
const maxBytes = options.maxBytes ?? Truncate2.MAX_BYTES;
|
|
165
|
+
const direction = options.direction ?? "head";
|
|
166
|
+
const lines = text.split("\n");
|
|
167
|
+
const totalBytes = Buffer.byteLength(text, "utf-8");
|
|
168
|
+
if (lines.length <= maxLines && totalBytes <= maxBytes) {
|
|
169
|
+
return { content: text, truncated: false };
|
|
170
|
+
}
|
|
171
|
+
const out = [];
|
|
172
|
+
let i = 0;
|
|
173
|
+
let bytes = 0;
|
|
174
|
+
let hitBytes = false;
|
|
175
|
+
if (direction === "head") {
|
|
176
|
+
for (i = 0; i < lines.length && i < maxLines; i++) {
|
|
177
|
+
const size = Buffer.byteLength(lines[i], "utf-8") + (i > 0 ? 1 : 0);
|
|
178
|
+
if (bytes + size > maxBytes) {
|
|
179
|
+
hitBytes = true;
|
|
180
|
+
break;
|
|
181
|
+
}
|
|
182
|
+
out.push(lines[i]);
|
|
183
|
+
bytes += size;
|
|
184
|
+
}
|
|
185
|
+
} else {
|
|
186
|
+
for (i = lines.length - 1; i >= 0 && out.length < maxLines; i--) {
|
|
187
|
+
const size = Buffer.byteLength(lines[i], "utf-8") + (out.length > 0 ? 1 : 0);
|
|
188
|
+
if (bytes + size > maxBytes) {
|
|
189
|
+
hitBytes = true;
|
|
190
|
+
break;
|
|
191
|
+
}
|
|
192
|
+
out.unshift(lines[i]);
|
|
193
|
+
bytes += size;
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
const removed = hitBytes ? totalBytes - bytes : lines.length - out.length;
|
|
197
|
+
const unit = hitBytes ? "bytes" : "lines";
|
|
198
|
+
const preview = out.join("\n");
|
|
199
|
+
const id = ToolID.ascending();
|
|
200
|
+
const filepath = join(dir(context), id);
|
|
201
|
+
await Filesystem.write(context, filepath, text);
|
|
202
|
+
const hint = `The tool call succeeded but the output was truncated. Full output saved to: ${filepath}
|
|
203
|
+
Use Grep to search the full content or Read with offset/limit to view specific sections.`;
|
|
204
|
+
const message = direction === "head" ? `${preview}
|
|
205
|
+
|
|
206
|
+
...${removed} ${unit} truncated...
|
|
207
|
+
|
|
208
|
+
${hint}` : `...${removed} ${unit} truncated...
|
|
209
|
+
|
|
210
|
+
${hint}
|
|
211
|
+
|
|
212
|
+
${preview}`;
|
|
213
|
+
return { content: message, truncated: true, outputPath: filepath };
|
|
214
|
+
}
|
|
215
|
+
Truncate2.output = output;
|
|
216
|
+
})(Truncate || (Truncate = {}));
|
|
217
|
+
var Tool;
|
|
218
|
+
((Tool2) => {
|
|
219
|
+
function define(id, init) {
|
|
220
|
+
return {
|
|
221
|
+
id,
|
|
222
|
+
init: async (initCtx) => {
|
|
223
|
+
const toolInfo = init instanceof Function ? await init(initCtx) : init;
|
|
224
|
+
const execute = toolInfo.execute;
|
|
225
|
+
toolInfo.execute = async (args, ctx) => {
|
|
226
|
+
try {
|
|
227
|
+
toolInfo.parameters.parse(args);
|
|
228
|
+
} catch (error) {
|
|
229
|
+
if (error instanceof z2.ZodError && toolInfo.formatValidationError) {
|
|
230
|
+
throw new Error(toolInfo.formatValidationError(error), { cause: error });
|
|
231
|
+
}
|
|
232
|
+
throw new Error(
|
|
233
|
+
`The ${id} tool was called with invalid arguments: ${error}.
|
|
234
|
+
Please rewrite the input so it satisfies the expected schema.`,
|
|
235
|
+
{ cause: error }
|
|
236
|
+
);
|
|
237
|
+
}
|
|
238
|
+
const result = await execute(args, ctx);
|
|
239
|
+
if (result.metadata.truncated !== void 0) {
|
|
240
|
+
return result;
|
|
241
|
+
}
|
|
242
|
+
const truncated = await Truncate.output(ctx, result.output, {}, initCtx?.agent);
|
|
243
|
+
return {
|
|
244
|
+
...result,
|
|
245
|
+
output: truncated.content,
|
|
246
|
+
metadata: {
|
|
247
|
+
...result.metadata,
|
|
248
|
+
truncated: truncated.truncated,
|
|
249
|
+
...truncated.truncated && { outputPath: truncated.outputPath }
|
|
250
|
+
}
|
|
251
|
+
};
|
|
252
|
+
};
|
|
253
|
+
return toolInfo;
|
|
254
|
+
}
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
Tool2.define = define;
|
|
258
|
+
})(Tool || (Tool = {}));
|
|
259
|
+
var plan_exit_txt_default = `Use this tool when you have completed the planning phase and are ready to exit plan agent.
|
|
260
|
+
|
|
261
|
+
This tool will ask the user if they want to switch to build agent to start implementing the plan.
|
|
262
|
+
|
|
263
|
+
Call this tool:
|
|
264
|
+
- After you have written a complete plan to the plan file
|
|
265
|
+
- After you have clarified any questions with the user
|
|
266
|
+
- When you are confident the plan is ready for implementation
|
|
267
|
+
|
|
268
|
+
Do NOT call this tool:
|
|
269
|
+
- Before you have created or finalized the plan
|
|
270
|
+
- If you still have unanswered questions about the implementation
|
|
271
|
+
- If the user has indicated they want to continue planning
|
|
272
|
+
`;
|
|
273
|
+
async function getLastModel(context, sessionID) {
|
|
274
|
+
for await (const item of MessageV2.stream(context, sessionID)) {
|
|
275
|
+
if (item.info.role === "user" && item.info.model) return item.info.model;
|
|
276
|
+
}
|
|
277
|
+
return context.provider.defaultModel();
|
|
278
|
+
}
|
|
279
|
+
var PlanExitTool = Tool.define("plan_exit", {
|
|
280
|
+
description: plan_exit_txt_default,
|
|
281
|
+
parameters: z3.object({}),
|
|
282
|
+
async execute(_params, ctx) {
|
|
283
|
+
const session = await ctx.session.get(ctx.sessionID);
|
|
284
|
+
const plan = relative(ctx.worktree, ctx.session.plan(session));
|
|
285
|
+
const model = await getLastModel(ctx, ctx.sessionID);
|
|
286
|
+
const userMsg = {
|
|
287
|
+
id: MessageID.ascending(),
|
|
288
|
+
sessionID: ctx.sessionID,
|
|
289
|
+
role: "user",
|
|
290
|
+
time: {
|
|
291
|
+
created: Date.now()
|
|
292
|
+
},
|
|
293
|
+
agent: "build",
|
|
294
|
+
model
|
|
295
|
+
};
|
|
296
|
+
await ctx.session.updateMessage(userMsg);
|
|
297
|
+
await ctx.session.updatePart({
|
|
298
|
+
id: PartID.ascending(),
|
|
299
|
+
messageID: userMsg.id,
|
|
300
|
+
sessionID: ctx.sessionID,
|
|
301
|
+
type: "text",
|
|
302
|
+
text: `The plan at ${plan} has been approved, you can now edit files. Execute the plan`,
|
|
303
|
+
synthetic: true
|
|
304
|
+
});
|
|
305
|
+
return {
|
|
306
|
+
title: "Switching to build agent",
|
|
307
|
+
output: "User approved switching to build agent. Wait for further instructions.",
|
|
308
|
+
metadata: {}
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
});
|
|
312
|
+
var bash_txt_default = `Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.
|
|
313
|
+
|
|
314
|
+
All commands run in \${directory} by default. Use the \`workdir\` parameter if you need to run a command in a different directory. AVOID using \`cd <directory> && <command>\` patterns - use \`workdir\` instead.
|
|
315
|
+
|
|
316
|
+
IMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.
|
|
317
|
+
|
|
318
|
+
Before executing the command, please follow these steps:
|
|
319
|
+
|
|
320
|
+
1. Directory Verification:
|
|
321
|
+
- If the command will create new directories or files, first use \`ls\` to verify the parent directory exists and is the correct location
|
|
322
|
+
- For example, before running "mkdir foo/bar", first use \`ls foo\` to check that "foo" exists and is the intended parent directory
|
|
323
|
+
|
|
324
|
+
2. Command Execution:
|
|
325
|
+
- Always quote file paths that contain spaces with double quotes (e.g., rm "path with spaces/file.txt")
|
|
326
|
+
- Examples of proper quoting:
|
|
327
|
+
- mkdir "/Users/name/My Documents" (correct)
|
|
328
|
+
- mkdir /Users/name/My Documents (incorrect - will fail)
|
|
329
|
+
- python "/path/with spaces/script.py" (correct)
|
|
330
|
+
- python /path/with spaces/script.py (incorrect - will fail)
|
|
331
|
+
- After ensuring proper quoting, execute the command.
|
|
332
|
+
- Capture the output of the command.
|
|
333
|
+
|
|
334
|
+
Usage notes:
|
|
335
|
+
- The command argument is required.
|
|
336
|
+
- You can specify an optional timeout in milliseconds. If not specified, commands will time out after 120000ms (2 minutes).
|
|
337
|
+
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
|
|
338
|
+
- If the output exceeds \${maxLines} lines or \${maxBytes} bytes, it will be truncated and the full output will be written to a file. You can use Read with offset/limit to read specific sections or Grep to search the full content. Do NOT use \`head\`, \`tail\`, or other truncation commands to limit output; the full output will already be captured to a file for more precise searching.
|
|
339
|
+
|
|
340
|
+
- Avoid using Bash with the \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
|
|
341
|
+
- File search: Use Glob (NOT find or ls)
|
|
342
|
+
- Content search: Use Grep (NOT grep or rg)
|
|
343
|
+
- Read files: Use Read (NOT cat/head/tail)
|
|
344
|
+
- Edit files: Use Edit (NOT sed/awk)
|
|
345
|
+
- Write files: Use Write (NOT echo >/cat <<EOF)
|
|
346
|
+
- Communication: Output text directly (NOT echo/printf)
|
|
347
|
+
- When issuing multiple commands:
|
|
348
|
+
- If the commands are independent and can run in parallel, make multiple Bash tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two Bash tool calls in parallel.
|
|
349
|
+
- If the commands depend on each other and must run sequentially, use a single Bash call with '&&' to chain them together (e.g., \`git add . && git commit -m "message" && git push\`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead.
|
|
350
|
+
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
|
|
351
|
+
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
|
|
352
|
+
- AVOID using \`cd <directory> && <command>\`. Use the \`workdir\` parameter to change directories instead.
|
|
353
|
+
<good-example>
|
|
354
|
+
Use workdir="/foo/bar" with command: pytest tests
|
|
355
|
+
</good-example>
|
|
356
|
+
<bad-example>
|
|
357
|
+
cd /foo/bar && pytest tests
|
|
358
|
+
</bad-example>
|
|
359
|
+
|
|
360
|
+
# Committing changes with git
|
|
361
|
+
|
|
362
|
+
Only create commits when requested by the user. If unclear, ask first. When the user asks you to create a new git commit, follow these steps carefully:
|
|
363
|
+
|
|
364
|
+
Git Safety Protocol:
|
|
365
|
+
- NEVER update the git config
|
|
366
|
+
- NEVER run destructive/irreversible git commands (like push --force, hard reset, etc) unless the user explicitly requests them
|
|
367
|
+
- NEVER skip hooks (--no-verify, --no-gpg-sign, etc) unless the user explicitly requests it
|
|
368
|
+
- NEVER run force push to main/master, warn the user if they request it
|
|
369
|
+
- Avoid git commit --amend. ONLY use --amend when ALL conditions are met:
|
|
370
|
+
(1) User explicitly requested amend, OR commit SUCCEEDED but pre-commit hook auto-modified files that need including
|
|
371
|
+
(2) HEAD commit was created by you in this conversation (verify: git log -1 --format='%an %ae')
|
|
372
|
+
(3) Commit has NOT been pushed to remote (verify: git status shows "Your branch is ahead")
|
|
373
|
+
- CRITICAL: If commit FAILED or was REJECTED by hook, NEVER amend - fix the issue and create a NEW commit
|
|
374
|
+
- CRITICAL: If you already pushed to remote, NEVER amend unless user explicitly requests it (requires force push)
|
|
375
|
+
- NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.
|
|
376
|
+
|
|
377
|
+
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel, each using the Bash tool:
|
|
378
|
+
- Run a git status command to see all untracked files.
|
|
379
|
+
- Run a git diff command to see both staged and unstaged changes that will be committed.
|
|
380
|
+
- Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.
|
|
381
|
+
2. Analyze all staged changes (both previously staged and newly added) and draft a commit message:
|
|
382
|
+
- Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.). Ensure the message accurately reflects the changes and their purpose (i.e. "add" means a wholly new feature, "update" means an enhancement to an existing feature, "fix" means a bug fix, etc.).
|
|
383
|
+
- Do not commit files that likely contain secrets (.env, credentials.json, etc.). Warn the user if they specifically request to commit those files
|
|
384
|
+
- Draft a concise (1-2 sentences) commit message that focuses on the "why" rather than the "what"
|
|
385
|
+
- Ensure it accurately reflects the changes and their purpose
|
|
386
|
+
3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands:
|
|
387
|
+
- Add relevant untracked files to the staging area.
|
|
388
|
+
- Create the commit with a message
|
|
389
|
+
- Run git status after the commit completes to verify success.
|
|
390
|
+
Note: git status depends on the commit completing, so run it sequentially after the commit.
|
|
391
|
+
4. If the commit fails due to pre-commit hook, fix the issue and create a NEW commit (see amend rules above)
|
|
392
|
+
|
|
393
|
+
Important notes:
|
|
394
|
+
- NEVER run additional commands to read or explore code, besides git bash commands
|
|
395
|
+
- NEVER use the TodoWrite or Task tools
|
|
396
|
+
- DO NOT push to the remote repository unless the user explicitly asks you to do so
|
|
397
|
+
- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.
|
|
398
|
+
- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit
|
|
399
|
+
|
|
400
|
+
# Creating pull requests
|
|
401
|
+
Use the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a GitHub URL use the gh command to get the information needed.
|
|
402
|
+
|
|
403
|
+
IMPORTANT: When the user asks you to create a pull request, follow these steps carefully:
|
|
404
|
+
|
|
405
|
+
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel using the Bash tool, in order to understand the current state of the branch since it diverged from the main branch:
|
|
406
|
+
- Run a git status command to see all untracked files
|
|
407
|
+
- Run a git diff command to see both staged and unstaged changes that will be committed
|
|
408
|
+
- Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote
|
|
409
|
+
- Run a git log command and \`git diff [base-branch]...HEAD\` to understand the full commit history for the current branch (from the time it diverged from the base branch)
|
|
410
|
+
2. Analyze all changes that will be included in the pull request, making sure to look at all relevant commits (NOT just the latest commit, but ALL commits that will be included in the pull request!!!), and draft a pull request summary
|
|
411
|
+
3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands in parallel:
|
|
412
|
+
- Create new branch if needed
|
|
413
|
+
- Push to remote with -u flag if needed
|
|
414
|
+
- Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.
|
|
415
|
+
<example>
|
|
416
|
+
gh pr create --title "the pr title" --body "$(cat <<'EOF'
|
|
417
|
+
## Summary
|
|
418
|
+
<1-3 bullet points>
|
|
419
|
+
</example>
|
|
420
|
+
|
|
421
|
+
Important:
|
|
422
|
+
- DO NOT use the TodoWrite or Task tools
|
|
423
|
+
- Return the PR URL when you're done, so the user can see it
|
|
424
|
+
|
|
425
|
+
# Other common operations
|
|
426
|
+
- View comments on a GitHub PR: gh api repos/foo/bar/pulls/123/comments
|
|
427
|
+
`;
|
|
428
|
+
var MAX_METADATA_LENGTH = 3e4;
|
|
429
|
+
var DEFAULT_TIMEOUT = Flag.OPENCODE_EXPERIMENTAL_BASH_DEFAULT_TIMEOUT_MS || 2 * 60 * 1e3;
|
|
430
|
+
var log = Log.create({ service: "bash-tool" });
|
|
431
|
+
function parseBashCommands(input) {
|
|
432
|
+
const results = [];
|
|
433
|
+
const segments = input.split(/\s*(?:&&|\|\||[;|\n])\s*/);
|
|
434
|
+
for (const segment of segments) {
|
|
435
|
+
const trimmed = segment.trim();
|
|
436
|
+
if (!trimmed) continue;
|
|
437
|
+
const words = tokenize(trimmed);
|
|
438
|
+
if (words.length > 0) {
|
|
439
|
+
results.push({ text: trimmed, words });
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
return results;
|
|
443
|
+
}
|
|
444
|
+
function tokenize(input) {
|
|
445
|
+
const words = [];
|
|
446
|
+
let current = "";
|
|
447
|
+
let inSingle = false;
|
|
448
|
+
let inDouble = false;
|
|
449
|
+
let escape = false;
|
|
450
|
+
for (const ch of input) {
|
|
451
|
+
if (escape) {
|
|
452
|
+
current += ch;
|
|
453
|
+
escape = false;
|
|
454
|
+
continue;
|
|
455
|
+
}
|
|
456
|
+
if (ch === "\\" && !inSingle) {
|
|
457
|
+
escape = true;
|
|
458
|
+
continue;
|
|
459
|
+
}
|
|
460
|
+
if (ch === "'" && !inDouble) {
|
|
461
|
+
inSingle = !inSingle;
|
|
462
|
+
continue;
|
|
463
|
+
}
|
|
464
|
+
if (ch === '"' && !inSingle) {
|
|
465
|
+
inDouble = !inDouble;
|
|
466
|
+
continue;
|
|
467
|
+
}
|
|
468
|
+
if (/\s/.test(ch) && !inSingle && !inDouble) {
|
|
469
|
+
if (current) {
|
|
470
|
+
words.push(current);
|
|
471
|
+
current = "";
|
|
472
|
+
}
|
|
473
|
+
continue;
|
|
474
|
+
}
|
|
475
|
+
current += ch;
|
|
476
|
+
}
|
|
477
|
+
if (current) words.push(current);
|
|
478
|
+
return words;
|
|
479
|
+
}
|
|
480
|
+
var BashTool = Tool.define("bash", async (initCtx) => {
|
|
481
|
+
return {
|
|
482
|
+
description: bash_txt_default.replaceAll("${directory}", initCtx?.directory || "").replaceAll("${maxLines}", String(Truncate.MAX_LINES)).replaceAll("${maxBytes}", String(Truncate.MAX_BYTES)),
|
|
483
|
+
parameters: z4.object({
|
|
484
|
+
command: z4.string().describe("The command to execute"),
|
|
485
|
+
timeout: z4.number().describe("Optional timeout in milliseconds").optional(),
|
|
486
|
+
workdir: z4.string().describe(
|
|
487
|
+
`The working directory to run the command in. Defaults to ${initCtx?.directory || ""}. Use this instead of 'cd' commands.`
|
|
488
|
+
).optional(),
|
|
489
|
+
description: z4.string().describe(
|
|
490
|
+
"Clear, concise description of what this command does in 5-10 words. Examples:\nInput: ls\nOutput: Lists files in current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: mkdir foo\nOutput: Creates directory 'foo'"
|
|
491
|
+
)
|
|
492
|
+
}),
|
|
493
|
+
async execute(params, ctx) {
|
|
494
|
+
const cwd = params.workdir || ctx.directory;
|
|
495
|
+
if (params.timeout !== void 0 && params.timeout < 0) {
|
|
496
|
+
throw new Error(`Invalid timeout value: ${params.timeout}. Timeout must be a positive number.`);
|
|
497
|
+
}
|
|
498
|
+
const timeout = params.timeout ?? DEFAULT_TIMEOUT;
|
|
499
|
+
const commands = parseBashCommands(params.command);
|
|
500
|
+
const directories = /* @__PURE__ */ new Set();
|
|
501
|
+
if (!ctx.containsPath(cwd)) directories.add(cwd);
|
|
502
|
+
const patterns = /* @__PURE__ */ new Set();
|
|
503
|
+
const always = /* @__PURE__ */ new Set();
|
|
504
|
+
for (const cmd of commands) {
|
|
505
|
+
if (!cmd.words.length) continue;
|
|
506
|
+
const [name, ...args] = cmd.words;
|
|
507
|
+
if (["cd", "rm", "cp", "mv", "mkdir", "touch", "chmod", "chown", "cat"].includes(name)) {
|
|
508
|
+
for (const arg of args) {
|
|
509
|
+
if (arg.startsWith("-") || name === "chmod" && arg.startsWith("+")) continue;
|
|
510
|
+
const resolved = Filesystem.resolve(resolve(cwd, arg));
|
|
511
|
+
log.info("resolved path", { arg, resolved });
|
|
512
|
+
if (resolved) {
|
|
513
|
+
if (!ctx.containsPath(resolved)) {
|
|
514
|
+
const dir = await Filesystem.isDir(ctx, resolved) ? resolved : dirname(resolved);
|
|
515
|
+
directories.add(dir);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
if (name !== "cd") {
|
|
521
|
+
patterns.add(cmd.text);
|
|
522
|
+
always.add(name + " *");
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
if (directories.size > 0) {
|
|
526
|
+
const globs = Array.from(directories).map((dir) => {
|
|
527
|
+
if (dir.startsWith("/")) return `${dir.replace(/[\\/]+$/, "")}/*`;
|
|
528
|
+
return join(dir, "*");
|
|
529
|
+
});
|
|
530
|
+
await ctx.ask({
|
|
531
|
+
permission: "external_directory",
|
|
532
|
+
patterns: globs,
|
|
533
|
+
always: globs,
|
|
534
|
+
metadata: {}
|
|
535
|
+
});
|
|
536
|
+
}
|
|
537
|
+
if (patterns.size > 0) {
|
|
538
|
+
await ctx.ask({
|
|
539
|
+
permission: "bash",
|
|
540
|
+
patterns: Array.from(patterns),
|
|
541
|
+
always: Array.from(always),
|
|
542
|
+
metadata: {}
|
|
543
|
+
});
|
|
544
|
+
}
|
|
545
|
+
const proc = ctx.shell.spawn(params.command, {
|
|
546
|
+
cwd,
|
|
547
|
+
env: {}
|
|
548
|
+
});
|
|
549
|
+
let output = "";
|
|
550
|
+
ctx.metadata({
|
|
551
|
+
metadata: {
|
|
552
|
+
output: "",
|
|
553
|
+
description: params.description
|
|
554
|
+
}
|
|
555
|
+
});
|
|
556
|
+
const append = (chunk) => {
|
|
557
|
+
output += chunk.toString();
|
|
558
|
+
ctx.metadata({
|
|
559
|
+
metadata: {
|
|
560
|
+
output: output.length > MAX_METADATA_LENGTH ? output.slice(0, MAX_METADATA_LENGTH) + "\n\n..." : output,
|
|
561
|
+
description: params.description
|
|
562
|
+
}
|
|
563
|
+
});
|
|
564
|
+
};
|
|
565
|
+
proc.stdout?.on("data", append);
|
|
566
|
+
proc.stderr?.on("data", append);
|
|
567
|
+
let timedOut = false;
|
|
568
|
+
let aborted = false;
|
|
569
|
+
let exited = false;
|
|
570
|
+
const kill = () => ctx.shell.kill(proc, { exited: () => exited });
|
|
571
|
+
if (ctx.abort.aborted) {
|
|
572
|
+
aborted = true;
|
|
573
|
+
await kill();
|
|
574
|
+
}
|
|
575
|
+
const abortHandler = () => {
|
|
576
|
+
aborted = true;
|
|
577
|
+
void kill();
|
|
578
|
+
};
|
|
579
|
+
ctx.abort.addEventListener("abort", abortHandler, { once: true });
|
|
580
|
+
const timeoutTimer = setTimeout(() => {
|
|
581
|
+
timedOut = true;
|
|
582
|
+
void kill();
|
|
583
|
+
}, timeout + 100);
|
|
584
|
+
await new Promise((resolve2, reject) => {
|
|
585
|
+
const cleanup = () => {
|
|
586
|
+
clearTimeout(timeoutTimer);
|
|
587
|
+
ctx.abort.removeEventListener("abort", abortHandler);
|
|
588
|
+
};
|
|
589
|
+
proc.once("exit", () => {
|
|
590
|
+
exited = true;
|
|
591
|
+
cleanup();
|
|
592
|
+
resolve2();
|
|
593
|
+
});
|
|
594
|
+
proc.once("error", (error) => {
|
|
595
|
+
exited = true;
|
|
596
|
+
cleanup();
|
|
597
|
+
reject(error);
|
|
598
|
+
});
|
|
599
|
+
});
|
|
600
|
+
const resultMetadata = [];
|
|
601
|
+
if (timedOut) {
|
|
602
|
+
resultMetadata.push(`bash tool terminated command after exceeding timeout ${timeout} ms`);
|
|
603
|
+
}
|
|
604
|
+
if (aborted) {
|
|
605
|
+
resultMetadata.push("User aborted the command");
|
|
606
|
+
}
|
|
607
|
+
if (resultMetadata.length > 0) {
|
|
608
|
+
output += "\n\n<bash_metadata>\n" + resultMetadata.join("\n") + "\n</bash_metadata>";
|
|
609
|
+
}
|
|
610
|
+
return {
|
|
611
|
+
title: params.description,
|
|
612
|
+
metadata: {
|
|
613
|
+
output: output.length > MAX_METADATA_LENGTH ? output.slice(0, MAX_METADATA_LENGTH) + "\n\n..." : output,
|
|
614
|
+
exit: proc.exitCode,
|
|
615
|
+
description: params.description
|
|
616
|
+
},
|
|
617
|
+
output
|
|
618
|
+
};
|
|
619
|
+
}
|
|
620
|
+
};
|
|
621
|
+
});
|
|
622
|
+
var edit_txt_default = `Performs exact string replacements in files.
|
|
623
|
+
|
|
624
|
+
Usage:
|
|
625
|
+
- You must use your \`Read\` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.
|
|
626
|
+
- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: line number + colon + space (e.g., \`1: \`). Everything after that space is the actual file content to match. Never include any part of the line number prefix in the oldString or newString.
|
|
627
|
+
- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.
|
|
628
|
+
- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.
|
|
629
|
+
- The edit will FAIL if \`oldString\` is not found in the file with an error "oldString not found in content".
|
|
630
|
+
- The edit will FAIL if \`oldString\` is found multiple times in the file with an error "Found multiple matches for oldString. Provide more surrounding lines in oldString to identify the correct match." Either provide a larger string with more surrounding context to make it unique or use \`replaceAll\` to change every instance of \`oldString\`.
|
|
631
|
+
- Use \`replaceAll\` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance.
|
|
632
|
+
`;
|
|
633
|
+
async function assertExternalDirectory(ctx, target, options) {
|
|
634
|
+
if (!target) return;
|
|
635
|
+
if (options?.bypass) return;
|
|
636
|
+
if (ctx.containsPath(target)) return;
|
|
637
|
+
const kind = options?.kind ?? "file";
|
|
638
|
+
const parentDir = kind === "directory" ? target : dirname(target);
|
|
639
|
+
const glob = join(parentDir, "*").replaceAll("\\", "/");
|
|
640
|
+
await ctx.ask({
|
|
641
|
+
permission: "external_directory",
|
|
642
|
+
patterns: [glob],
|
|
643
|
+
always: [glob],
|
|
644
|
+
metadata: {
|
|
645
|
+
filepath: target,
|
|
646
|
+
parentDir
|
|
647
|
+
}
|
|
648
|
+
});
|
|
649
|
+
}
|
|
650
|
+
var MAX_DIAGNOSTICS_PER_FILE = 20;
|
|
651
|
+
function normalizeLineEndings(text) {
|
|
652
|
+
return text.replaceAll("\r\n", "\n");
|
|
653
|
+
}
|
|
654
|
+
function detectLineEnding(text) {
|
|
655
|
+
return text.includes("\r\n") ? "\r\n" : "\n";
|
|
656
|
+
}
|
|
657
|
+
function convertToLineEnding(text, ending) {
|
|
658
|
+
if (ending === "\n") return text;
|
|
659
|
+
return text.replaceAll("\n", "\r\n");
|
|
660
|
+
}
|
|
661
|
+
var EditTool = Tool.define("edit", {
|
|
662
|
+
description: edit_txt_default,
|
|
663
|
+
parameters: z5.object({
|
|
664
|
+
filePath: z5.string().describe("The absolute path to the file to modify"),
|
|
665
|
+
oldString: z5.string().describe("The text to replace"),
|
|
666
|
+
newString: z5.string().describe("The text to replace it with (must be different from oldString)"),
|
|
667
|
+
replaceAll: z5.boolean().optional().describe("Replace all occurrences of oldString (default false)")
|
|
668
|
+
}),
|
|
669
|
+
async execute(params, ctx) {
|
|
670
|
+
if (!params.filePath) {
|
|
671
|
+
throw new Error("filePath is required");
|
|
672
|
+
}
|
|
673
|
+
if (params.oldString === params.newString) {
|
|
674
|
+
throw new Error("No changes to apply: oldString and newString are identical.");
|
|
675
|
+
}
|
|
676
|
+
const filePath = isAbsolute(params.filePath) ? params.filePath : join(ctx.directory, params.filePath);
|
|
677
|
+
await assertExternalDirectory(ctx, filePath);
|
|
678
|
+
let diff = "";
|
|
679
|
+
let contentOld = "";
|
|
680
|
+
let contentNew = "";
|
|
681
|
+
await ctx.fileTime.withLock(filePath, async () => {
|
|
682
|
+
if (params.oldString === "") {
|
|
683
|
+
const existed = await ctx.fs.exists(filePath);
|
|
684
|
+
contentNew = params.newString;
|
|
685
|
+
diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew));
|
|
686
|
+
await ctx.ask({
|
|
687
|
+
permission: "edit",
|
|
688
|
+
patterns: [relative(ctx.worktree, filePath)],
|
|
689
|
+
always: ["*"],
|
|
690
|
+
metadata: {
|
|
691
|
+
filepath: filePath,
|
|
692
|
+
diff
|
|
693
|
+
}
|
|
694
|
+
});
|
|
695
|
+
await ctx.fs.write(filePath, params.newString);
|
|
696
|
+
ctx.emit("file.edited", { file: filePath });
|
|
697
|
+
ctx.fileTime.read(ctx.sessionID, filePath);
|
|
698
|
+
return;
|
|
699
|
+
}
|
|
700
|
+
const stats = await ctx.fs.stat(filePath);
|
|
701
|
+
if (!stats) throw new Error(`File ${filePath} not found`);
|
|
702
|
+
if (stats.isDirectory) throw new Error(`Path is a directory, not a file: ${filePath}`);
|
|
703
|
+
await ctx.fileTime.assert(ctx, ctx.sessionID, filePath);
|
|
704
|
+
contentOld = await ctx.fs.readText(filePath);
|
|
705
|
+
const ending = detectLineEnding(contentOld);
|
|
706
|
+
const old = convertToLineEnding(normalizeLineEndings(params.oldString), ending);
|
|
707
|
+
const next = convertToLineEnding(normalizeLineEndings(params.newString), ending);
|
|
708
|
+
contentNew = replace(contentOld, old, next, params.replaceAll);
|
|
709
|
+
diff = trimDiff(
|
|
710
|
+
createTwoFilesPatch(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew))
|
|
711
|
+
);
|
|
712
|
+
await ctx.ask({
|
|
713
|
+
permission: "edit",
|
|
714
|
+
patterns: [relative(ctx.worktree, filePath)],
|
|
715
|
+
always: ["*"],
|
|
716
|
+
metadata: {
|
|
717
|
+
filepath: filePath,
|
|
718
|
+
diff
|
|
719
|
+
}
|
|
720
|
+
});
|
|
721
|
+
await ctx.fs.write(filePath, contentNew);
|
|
722
|
+
ctx.emit("file.edited", { file: filePath });
|
|
723
|
+
contentNew = await ctx.fs.readText(filePath);
|
|
724
|
+
diff = trimDiff(
|
|
725
|
+
createTwoFilesPatch(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew))
|
|
726
|
+
);
|
|
727
|
+
ctx.fileTime.read(ctx.sessionID, filePath);
|
|
728
|
+
});
|
|
729
|
+
const filediff = {
|
|
730
|
+
file: filePath,
|
|
731
|
+
before: contentOld,
|
|
732
|
+
after: contentNew,
|
|
733
|
+
additions: 0,
|
|
734
|
+
deletions: 0
|
|
735
|
+
};
|
|
736
|
+
for (const change of diffLines(contentOld, contentNew)) {
|
|
737
|
+
if (change.added) filediff.additions += change.count || 0;
|
|
738
|
+
if (change.removed) filediff.deletions += change.count || 0;
|
|
739
|
+
}
|
|
740
|
+
ctx.metadata({
|
|
741
|
+
metadata: {
|
|
742
|
+
diff,
|
|
743
|
+
filediff,
|
|
744
|
+
diagnostics: {}
|
|
745
|
+
}
|
|
746
|
+
});
|
|
747
|
+
let output = "Edit applied successfully.";
|
|
748
|
+
await LSP.touchFile(filePath, true);
|
|
749
|
+
const diagnostics = await LSP.diagnostics();
|
|
750
|
+
const normalizedFilePath = filePath;
|
|
751
|
+
const issues = diagnostics.get(normalizedFilePath) ?? [];
|
|
752
|
+
const errors = issues.filter((item) => item.severity === 1);
|
|
753
|
+
if (errors.length > 0) {
|
|
754
|
+
const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE);
|
|
755
|
+
const suffix = errors.length > MAX_DIAGNOSTICS_PER_FILE ? `
|
|
756
|
+
... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more` : "";
|
|
757
|
+
output += `
|
|
758
|
+
|
|
759
|
+
LSP errors detected in this file, please fix:
|
|
760
|
+
<diagnostics file="${filePath}">
|
|
761
|
+
${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}
|
|
762
|
+
</diagnostics>`;
|
|
763
|
+
}
|
|
764
|
+
return {
|
|
765
|
+
metadata: {
|
|
766
|
+
diagnostics,
|
|
767
|
+
diff,
|
|
768
|
+
filediff
|
|
769
|
+
},
|
|
770
|
+
title: `${relative(ctx.worktree, filePath)}`,
|
|
771
|
+
output
|
|
772
|
+
};
|
|
773
|
+
}
|
|
774
|
+
});
|
|
775
|
+
var SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0;
|
|
776
|
+
var MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3;
|
|
777
|
+
function levenshtein(a, b) {
|
|
778
|
+
if (a === "" || b === "") {
|
|
779
|
+
return Math.max(a.length, b.length);
|
|
780
|
+
}
|
|
781
|
+
const matrix = Array.from(
|
|
782
|
+
{ length: a.length + 1 },
|
|
783
|
+
(_, i) => Array.from({ length: b.length + 1 }, (_2, j) => i === 0 ? j : j === 0 ? i : 0)
|
|
784
|
+
);
|
|
785
|
+
for (let i = 1; i <= a.length; i++) {
|
|
786
|
+
for (let j = 1; j <= b.length; j++) {
|
|
787
|
+
const cost = a[i - 1] === b[j - 1] ? 0 : 1;
|
|
788
|
+
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost);
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
return matrix[a.length][b.length];
|
|
792
|
+
}
|
|
793
|
+
var SimpleReplacer = function* (_content, find) {
|
|
794
|
+
yield find;
|
|
795
|
+
};
|
|
796
|
+
var LineTrimmedReplacer = function* (content, find) {
|
|
797
|
+
const originalLines = content.split("\n");
|
|
798
|
+
const searchLines = find.split("\n");
|
|
799
|
+
if (searchLines[searchLines.length - 1] === "") {
|
|
800
|
+
searchLines.pop();
|
|
801
|
+
}
|
|
802
|
+
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
|
803
|
+
let matches = true;
|
|
804
|
+
for (let j = 0; j < searchLines.length; j++) {
|
|
805
|
+
const originalTrimmed = originalLines[i + j].trim();
|
|
806
|
+
const searchTrimmed = searchLines[j].trim();
|
|
807
|
+
if (originalTrimmed !== searchTrimmed) {
|
|
808
|
+
matches = false;
|
|
809
|
+
break;
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
if (matches) {
|
|
813
|
+
let matchStartIndex = 0;
|
|
814
|
+
for (let k = 0; k < i; k++) {
|
|
815
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
816
|
+
}
|
|
817
|
+
let matchEndIndex = matchStartIndex;
|
|
818
|
+
for (let k = 0; k < searchLines.length; k++) {
|
|
819
|
+
matchEndIndex += originalLines[i + k].length;
|
|
820
|
+
if (k < searchLines.length - 1) {
|
|
821
|
+
matchEndIndex += 1;
|
|
822
|
+
}
|
|
823
|
+
}
|
|
824
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
};
|
|
828
|
+
var BlockAnchorReplacer = function* (content, find) {
|
|
829
|
+
const originalLines = content.split("\n");
|
|
830
|
+
const searchLines = find.split("\n");
|
|
831
|
+
if (searchLines.length < 3) {
|
|
832
|
+
return;
|
|
833
|
+
}
|
|
834
|
+
if (searchLines[searchLines.length - 1] === "") {
|
|
835
|
+
searchLines.pop();
|
|
836
|
+
}
|
|
837
|
+
const firstLineSearch = searchLines[0].trim();
|
|
838
|
+
const lastLineSearch = searchLines[searchLines.length - 1].trim();
|
|
839
|
+
const searchBlockSize = searchLines.length;
|
|
840
|
+
const candidates = [];
|
|
841
|
+
for (let i = 0; i < originalLines.length; i++) {
|
|
842
|
+
if (originalLines[i].trim() !== firstLineSearch) {
|
|
843
|
+
continue;
|
|
844
|
+
}
|
|
845
|
+
for (let j = i + 2; j < originalLines.length; j++) {
|
|
846
|
+
if (originalLines[j].trim() === lastLineSearch) {
|
|
847
|
+
candidates.push({ startLine: i, endLine: j });
|
|
848
|
+
break;
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
}
|
|
852
|
+
if (candidates.length === 0) {
|
|
853
|
+
return;
|
|
854
|
+
}
|
|
855
|
+
if (candidates.length === 1) {
|
|
856
|
+
const { startLine, endLine } = candidates[0];
|
|
857
|
+
const actualBlockSize = endLine - startLine + 1;
|
|
858
|
+
let similarity = 0;
|
|
859
|
+
let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2);
|
|
860
|
+
if (linesToCheck > 0) {
|
|
861
|
+
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
|
862
|
+
const originalLine = originalLines[startLine + j].trim();
|
|
863
|
+
const searchLine = searchLines[j].trim();
|
|
864
|
+
const maxLen = Math.max(originalLine.length, searchLine.length);
|
|
865
|
+
if (maxLen === 0) {
|
|
866
|
+
continue;
|
|
867
|
+
}
|
|
868
|
+
const distance = levenshtein(originalLine, searchLine);
|
|
869
|
+
similarity += (1 - distance / maxLen) / linesToCheck;
|
|
870
|
+
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
|
871
|
+
break;
|
|
872
|
+
}
|
|
873
|
+
}
|
|
874
|
+
} else {
|
|
875
|
+
similarity = 1;
|
|
876
|
+
}
|
|
877
|
+
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
|
878
|
+
let matchStartIndex = 0;
|
|
879
|
+
for (let k = 0; k < startLine; k++) {
|
|
880
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
881
|
+
}
|
|
882
|
+
let matchEndIndex = matchStartIndex;
|
|
883
|
+
for (let k = startLine; k <= endLine; k++) {
|
|
884
|
+
matchEndIndex += originalLines[k].length;
|
|
885
|
+
if (k < endLine) {
|
|
886
|
+
matchEndIndex += 1;
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
890
|
+
}
|
|
891
|
+
return;
|
|
892
|
+
}
|
|
893
|
+
let bestMatch = null;
|
|
894
|
+
let maxSimilarity = -1;
|
|
895
|
+
for (const candidate of candidates) {
|
|
896
|
+
const { startLine, endLine } = candidate;
|
|
897
|
+
const actualBlockSize = endLine - startLine + 1;
|
|
898
|
+
let similarity = 0;
|
|
899
|
+
let linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2);
|
|
900
|
+
if (linesToCheck > 0) {
|
|
901
|
+
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
|
902
|
+
const originalLine = originalLines[startLine + j].trim();
|
|
903
|
+
const searchLine = searchLines[j].trim();
|
|
904
|
+
const maxLen = Math.max(originalLine.length, searchLine.length);
|
|
905
|
+
if (maxLen === 0) {
|
|
906
|
+
continue;
|
|
907
|
+
}
|
|
908
|
+
const distance = levenshtein(originalLine, searchLine);
|
|
909
|
+
similarity += 1 - distance / maxLen;
|
|
910
|
+
}
|
|
911
|
+
similarity /= linesToCheck;
|
|
912
|
+
} else {
|
|
913
|
+
similarity = 1;
|
|
914
|
+
}
|
|
915
|
+
if (similarity > maxSimilarity) {
|
|
916
|
+
maxSimilarity = similarity;
|
|
917
|
+
bestMatch = candidate;
|
|
918
|
+
}
|
|
919
|
+
}
|
|
920
|
+
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
|
921
|
+
const { startLine, endLine } = bestMatch;
|
|
922
|
+
let matchStartIndex = 0;
|
|
923
|
+
for (let k = 0; k < startLine; k++) {
|
|
924
|
+
matchStartIndex += originalLines[k].length + 1;
|
|
925
|
+
}
|
|
926
|
+
let matchEndIndex = matchStartIndex;
|
|
927
|
+
for (let k = startLine; k <= endLine; k++) {
|
|
928
|
+
matchEndIndex += originalLines[k].length;
|
|
929
|
+
if (k < endLine) {
|
|
930
|
+
matchEndIndex += 1;
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
yield content.substring(matchStartIndex, matchEndIndex);
|
|
934
|
+
}
|
|
935
|
+
};
|
|
936
|
+
var WhitespaceNormalizedReplacer = function* (content, find) {
|
|
937
|
+
const normalizeWhitespace = (text) => text.replace(/\s+/g, " ").trim();
|
|
938
|
+
const normalizedFind = normalizeWhitespace(find);
|
|
939
|
+
const lines = content.split("\n");
|
|
940
|
+
for (let i = 0; i < lines.length; i++) {
|
|
941
|
+
const line = lines[i];
|
|
942
|
+
if (normalizeWhitespace(line) === normalizedFind) {
|
|
943
|
+
yield line;
|
|
944
|
+
} else {
|
|
945
|
+
const normalizedLine = normalizeWhitespace(line);
|
|
946
|
+
if (normalizedLine.includes(normalizedFind)) {
|
|
947
|
+
const words = find.trim().split(/\s+/);
|
|
948
|
+
if (words.length > 0) {
|
|
949
|
+
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\s+");
|
|
950
|
+
try {
|
|
951
|
+
const regex = new RegExp(pattern);
|
|
952
|
+
const match = line.match(regex);
|
|
953
|
+
if (match) {
|
|
954
|
+
yield match[0];
|
|
955
|
+
}
|
|
956
|
+
} catch (e) {
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
}
|
|
960
|
+
}
|
|
961
|
+
}
|
|
962
|
+
const findLines = find.split("\n");
|
|
963
|
+
if (findLines.length > 1) {
|
|
964
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
965
|
+
const block = lines.slice(i, i + findLines.length);
|
|
966
|
+
if (normalizeWhitespace(block.join("\n")) === normalizedFind) {
|
|
967
|
+
yield block.join("\n");
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
}
|
|
971
|
+
};
|
|
972
|
+
var IndentationFlexibleReplacer = function* (content, find) {
|
|
973
|
+
const removeIndentation = (text) => {
|
|
974
|
+
const lines = text.split("\n");
|
|
975
|
+
const nonEmptyLines = lines.filter((line) => line.trim().length > 0);
|
|
976
|
+
if (nonEmptyLines.length === 0) return text;
|
|
977
|
+
const minIndent = Math.min(
|
|
978
|
+
...nonEmptyLines.map((line) => {
|
|
979
|
+
const match = line.match(/^(\s*)/);
|
|
980
|
+
return match ? match[1].length : 0;
|
|
981
|
+
})
|
|
982
|
+
);
|
|
983
|
+
return lines.map((line) => line.trim().length === 0 ? line : line.slice(minIndent)).join("\n");
|
|
984
|
+
};
|
|
985
|
+
const normalizedFind = removeIndentation(find);
|
|
986
|
+
const contentLines = content.split("\n");
|
|
987
|
+
const findLines = find.split("\n");
|
|
988
|
+
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
|
989
|
+
const block = contentLines.slice(i, i + findLines.length).join("\n");
|
|
990
|
+
if (removeIndentation(block) === normalizedFind) {
|
|
991
|
+
yield block;
|
|
992
|
+
}
|
|
993
|
+
}
|
|
994
|
+
};
|
|
995
|
+
var EscapeNormalizedReplacer = function* (content, find) {
|
|
996
|
+
const unescapeString = (str) => {
|
|
997
|
+
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
|
998
|
+
switch (capturedChar) {
|
|
999
|
+
case "n":
|
|
1000
|
+
return "\n";
|
|
1001
|
+
case "t":
|
|
1002
|
+
return " ";
|
|
1003
|
+
case "r":
|
|
1004
|
+
return "\r";
|
|
1005
|
+
case "'":
|
|
1006
|
+
return "'";
|
|
1007
|
+
case '"':
|
|
1008
|
+
return '"';
|
|
1009
|
+
case "`":
|
|
1010
|
+
return "`";
|
|
1011
|
+
case "\\":
|
|
1012
|
+
return "\\";
|
|
1013
|
+
case "\n":
|
|
1014
|
+
return "\n";
|
|
1015
|
+
case "$":
|
|
1016
|
+
return "$";
|
|
1017
|
+
default:
|
|
1018
|
+
return match;
|
|
1019
|
+
}
|
|
1020
|
+
});
|
|
1021
|
+
};
|
|
1022
|
+
const unescapedFind = unescapeString(find);
|
|
1023
|
+
if (content.includes(unescapedFind)) {
|
|
1024
|
+
yield unescapedFind;
|
|
1025
|
+
}
|
|
1026
|
+
const lines = content.split("\n");
|
|
1027
|
+
const findLines = unescapedFind.split("\n");
|
|
1028
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
1029
|
+
const block = lines.slice(i, i + findLines.length).join("\n");
|
|
1030
|
+
const unescapedBlock = unescapeString(block);
|
|
1031
|
+
if (unescapedBlock === unescapedFind) {
|
|
1032
|
+
yield block;
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
};
|
|
1036
|
+
var MultiOccurrenceReplacer = function* (content, find) {
|
|
1037
|
+
let startIndex = 0;
|
|
1038
|
+
while (true) {
|
|
1039
|
+
const index = content.indexOf(find, startIndex);
|
|
1040
|
+
if (index === -1) break;
|
|
1041
|
+
yield find;
|
|
1042
|
+
startIndex = index + find.length;
|
|
1043
|
+
}
|
|
1044
|
+
};
|
|
1045
|
+
var TrimmedBoundaryReplacer = function* (content, find) {
|
|
1046
|
+
const trimmedFind = find.trim();
|
|
1047
|
+
if (trimmedFind === find) {
|
|
1048
|
+
return;
|
|
1049
|
+
}
|
|
1050
|
+
if (content.includes(trimmedFind)) {
|
|
1051
|
+
yield trimmedFind;
|
|
1052
|
+
}
|
|
1053
|
+
const lines = content.split("\n");
|
|
1054
|
+
const findLines = find.split("\n");
|
|
1055
|
+
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
|
1056
|
+
const block = lines.slice(i, i + findLines.length).join("\n");
|
|
1057
|
+
if (block.trim() === trimmedFind) {
|
|
1058
|
+
yield block;
|
|
1059
|
+
}
|
|
1060
|
+
}
|
|
1061
|
+
};
|
|
1062
|
+
var ContextAwareReplacer = function* (content, find) {
|
|
1063
|
+
const findLines = find.split("\n");
|
|
1064
|
+
if (findLines.length < 3) {
|
|
1065
|
+
return;
|
|
1066
|
+
}
|
|
1067
|
+
if (findLines[findLines.length - 1] === "") {
|
|
1068
|
+
findLines.pop();
|
|
1069
|
+
}
|
|
1070
|
+
const contentLines = content.split("\n");
|
|
1071
|
+
const firstLine = findLines[0].trim();
|
|
1072
|
+
const lastLine = findLines[findLines.length - 1].trim();
|
|
1073
|
+
for (let i = 0; i < contentLines.length; i++) {
|
|
1074
|
+
if (contentLines[i].trim() !== firstLine) continue;
|
|
1075
|
+
for (let j = i + 2; j < contentLines.length; j++) {
|
|
1076
|
+
if (contentLines[j].trim() === lastLine) {
|
|
1077
|
+
const blockLines = contentLines.slice(i, j + 1);
|
|
1078
|
+
const block = blockLines.join("\n");
|
|
1079
|
+
if (blockLines.length === findLines.length) {
|
|
1080
|
+
let matchingLines = 0;
|
|
1081
|
+
let totalNonEmptyLines = 0;
|
|
1082
|
+
for (let k = 1; k < blockLines.length - 1; k++) {
|
|
1083
|
+
const blockLine = blockLines[k].trim();
|
|
1084
|
+
const findLine = findLines[k].trim();
|
|
1085
|
+
if (blockLine.length > 0 || findLine.length > 0) {
|
|
1086
|
+
totalNonEmptyLines++;
|
|
1087
|
+
if (blockLine === findLine) {
|
|
1088
|
+
matchingLines++;
|
|
1089
|
+
}
|
|
1090
|
+
}
|
|
1091
|
+
}
|
|
1092
|
+
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
|
1093
|
+
yield block;
|
|
1094
|
+
break;
|
|
1095
|
+
}
|
|
1096
|
+
}
|
|
1097
|
+
break;
|
|
1098
|
+
}
|
|
1099
|
+
}
|
|
1100
|
+
}
|
|
1101
|
+
};
|
|
1102
|
+
function trimDiff(diff) {
|
|
1103
|
+
const lines = diff.split("\n");
|
|
1104
|
+
const contentLines = lines.filter(
|
|
1105
|
+
(line) => (line.startsWith("+") || line.startsWith("-") || line.startsWith(" ")) && !line.startsWith("---") && !line.startsWith("+++")
|
|
1106
|
+
);
|
|
1107
|
+
if (contentLines.length === 0) return diff;
|
|
1108
|
+
let min = Infinity;
|
|
1109
|
+
for (const line of contentLines) {
|
|
1110
|
+
const content = line.slice(1);
|
|
1111
|
+
if (content.trim().length > 0) {
|
|
1112
|
+
const match = content.match(/^(\s*)/);
|
|
1113
|
+
if (match) min = Math.min(min, match[1].length);
|
|
1114
|
+
}
|
|
1115
|
+
}
|
|
1116
|
+
if (min === Infinity || min === 0) return diff;
|
|
1117
|
+
const trimmedLines = lines.map((line) => {
|
|
1118
|
+
if ((line.startsWith("+") || line.startsWith("-") || line.startsWith(" ")) && !line.startsWith("---") && !line.startsWith("+++")) {
|
|
1119
|
+
const prefix = line[0];
|
|
1120
|
+
const content = line.slice(1);
|
|
1121
|
+
return prefix + content.slice(min);
|
|
1122
|
+
}
|
|
1123
|
+
return line;
|
|
1124
|
+
});
|
|
1125
|
+
return trimmedLines.join("\n");
|
|
1126
|
+
}
|
|
1127
|
+
function replace(content, oldString, newString, replaceAll = false) {
|
|
1128
|
+
if (oldString === newString) {
|
|
1129
|
+
throw new Error("No changes to apply: oldString and newString are identical.");
|
|
1130
|
+
}
|
|
1131
|
+
let notFound = true;
|
|
1132
|
+
for (const replacer of [
|
|
1133
|
+
SimpleReplacer,
|
|
1134
|
+
LineTrimmedReplacer,
|
|
1135
|
+
BlockAnchorReplacer,
|
|
1136
|
+
WhitespaceNormalizedReplacer,
|
|
1137
|
+
IndentationFlexibleReplacer,
|
|
1138
|
+
EscapeNormalizedReplacer,
|
|
1139
|
+
TrimmedBoundaryReplacer,
|
|
1140
|
+
ContextAwareReplacer,
|
|
1141
|
+
MultiOccurrenceReplacer
|
|
1142
|
+
]) {
|
|
1143
|
+
for (const search of replacer(content, oldString)) {
|
|
1144
|
+
const index = content.indexOf(search);
|
|
1145
|
+
if (index === -1) continue;
|
|
1146
|
+
notFound = false;
|
|
1147
|
+
if (replaceAll) {
|
|
1148
|
+
return content.replaceAll(search, newString);
|
|
1149
|
+
}
|
|
1150
|
+
const lastIndex = content.lastIndexOf(search);
|
|
1151
|
+
if (index !== lastIndex) continue;
|
|
1152
|
+
return content.substring(0, index) + newString + content.substring(index + search.length);
|
|
1153
|
+
}
|
|
1154
|
+
}
|
|
1155
|
+
if (notFound) {
|
|
1156
|
+
throw new Error(
|
|
1157
|
+
"Could not find oldString in the file. It must match exactly, including whitespace, indentation, and line endings."
|
|
1158
|
+
);
|
|
1159
|
+
}
|
|
1160
|
+
throw new Error("Found multiple matches for oldString. Provide more surrounding context to make the match unique.");
|
|
1161
|
+
}
|
|
1162
|
+
var glob_txt_default = `- Fast file pattern matching tool that works with any codebase size
|
|
1163
|
+
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
|
1164
|
+
- Returns matching file paths sorted by modification time
|
|
1165
|
+
- Use this tool when you need to find files by name patterns
|
|
1166
|
+
- When you are doing an open-ended search that may require multiple rounds of globbing and grepping, use the Task tool instead
|
|
1167
|
+
- You have the capability to call multiple tools in a single response. It is always better to speculatively perform multiple searches as a batch that are potentially useful.
|
|
1168
|
+
`;
|
|
1169
|
+
var GlobTool = Tool.define("glob", {
|
|
1170
|
+
description: glob_txt_default,
|
|
1171
|
+
parameters: z6.object({
|
|
1172
|
+
pattern: z6.string().describe("The glob pattern to match files against"),
|
|
1173
|
+
path: z6.string().optional().describe(
|
|
1174
|
+
`The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter "undefined" or "null" - simply omit it for the default behavior. Must be a valid directory path if provided.`
|
|
1175
|
+
)
|
|
1176
|
+
}),
|
|
1177
|
+
async execute(params, ctx) {
|
|
1178
|
+
await ctx.ask({
|
|
1179
|
+
permission: "glob",
|
|
1180
|
+
patterns: [params.pattern],
|
|
1181
|
+
always: ["*"],
|
|
1182
|
+
metadata: {
|
|
1183
|
+
pattern: params.pattern,
|
|
1184
|
+
path: params.path
|
|
1185
|
+
}
|
|
1186
|
+
});
|
|
1187
|
+
let search = params.path ?? ctx.directory;
|
|
1188
|
+
search = isAbsolute(search) ? search : resolve(ctx.directory, search);
|
|
1189
|
+
await assertExternalDirectory(ctx, search, { kind: "directory" });
|
|
1190
|
+
const limit = 100;
|
|
1191
|
+
if (!ctx.search) throw new Error("Search is not available.");
|
|
1192
|
+
const filePaths = await ctx.search.listFiles({
|
|
1193
|
+
cwd: search,
|
|
1194
|
+
glob: [params.pattern],
|
|
1195
|
+
limit: limit + 1,
|
|
1196
|
+
// +1 to detect truncation
|
|
1197
|
+
signal: ctx.abort
|
|
1198
|
+
});
|
|
1199
|
+
const truncated = filePaths.length > limit;
|
|
1200
|
+
const displayPaths = truncated ? filePaths.slice(0, limit) : filePaths;
|
|
1201
|
+
const files = [];
|
|
1202
|
+
for (const file of displayPaths) {
|
|
1203
|
+
const full = resolve(search, file);
|
|
1204
|
+
const s = await ctx.fs.stat(full);
|
|
1205
|
+
const stats = s?.mtimeMs ?? 0;
|
|
1206
|
+
files.push({
|
|
1207
|
+
path: full,
|
|
1208
|
+
mtime: stats
|
|
1209
|
+
});
|
|
1210
|
+
}
|
|
1211
|
+
files.sort((a, b) => b.mtime - a.mtime);
|
|
1212
|
+
const output = [];
|
|
1213
|
+
if (files.length === 0) output.push("No files found");
|
|
1214
|
+
if (files.length > 0) {
|
|
1215
|
+
output.push(...files.map((f) => f.path));
|
|
1216
|
+
if (truncated) {
|
|
1217
|
+
output.push("");
|
|
1218
|
+
output.push(
|
|
1219
|
+
`(Results are truncated: showing first ${limit} results. Consider using a more specific path or pattern.)`
|
|
1220
|
+
);
|
|
1221
|
+
}
|
|
1222
|
+
}
|
|
1223
|
+
return {
|
|
1224
|
+
title: relative(ctx.worktree, search),
|
|
1225
|
+
metadata: {
|
|
1226
|
+
count: files.length,
|
|
1227
|
+
truncated
|
|
1228
|
+
},
|
|
1229
|
+
output: output.join("\n")
|
|
1230
|
+
};
|
|
1231
|
+
}
|
|
1232
|
+
});
|
|
1233
|
+
var SetWorkingDirectoryTool = Tool.define("set_working_directory", {
|
|
1234
|
+
description: `Use this tool to set the working directory for this session. The user will tell you which project or folder they want to work on. The directory must be an absolute path to an existing directory on the file system. After setting the directory, the full development environment (file browser, diff viewer, etc.) will become available.
|
|
1235
|
+
|
|
1236
|
+
IMPORTANT: This tool can only be called ONCE per session. Once the working directory is set, it cannot be changed. If a working directory is already set, this tool will return an error.`,
|
|
1237
|
+
parameters: z7.object({
|
|
1238
|
+
directory: z7.string().describe("Absolute path to the project directory")
|
|
1239
|
+
}),
|
|
1240
|
+
async execute(params, ctx) {
|
|
1241
|
+
const dir = params.directory;
|
|
1242
|
+
if (ctx.worktree && ctx.worktree !== "") {
|
|
1243
|
+
return {
|
|
1244
|
+
title: "Already set",
|
|
1245
|
+
output: `Working directory is already set to "${ctx.worktree}". It can only be set once per session.`,
|
|
1246
|
+
metadata: {}
|
|
1247
|
+
};
|
|
1248
|
+
}
|
|
1249
|
+
const stat = await ctx.fs.stat(dir);
|
|
1250
|
+
if (!stat || !stat.isDirectory) {
|
|
1251
|
+
return {
|
|
1252
|
+
title: "Invalid path",
|
|
1253
|
+
output: stat ? `"${dir}" is not a directory. Please provide a valid directory path.` : `Directory "${dir}" does not exist. Please provide a valid absolute path.`,
|
|
1254
|
+
metadata: {}
|
|
1255
|
+
};
|
|
1256
|
+
}
|
|
1257
|
+
ctx.emit("directory.set", { directory: dir });
|
|
1258
|
+
return {
|
|
1259
|
+
title: `Set directory: ${dir}`,
|
|
1260
|
+
output: `Working directory set to "${dir}". The session is now configured to work on this project. The full development environment is now available.`,
|
|
1261
|
+
metadata: {}
|
|
1262
|
+
};
|
|
1263
|
+
}
|
|
1264
|
+
});
|
|
1265
|
+
var MAX_LINE_LENGTH = 2e3;
|
|
1266
|
+
var GrepTool = Tool.define("grep", {
|
|
1267
|
+
description: "Searches for a regex pattern in file contents within the current workspace.",
|
|
1268
|
+
parameters: z8.object({
|
|
1269
|
+
pattern: z8.string().describe("The regex pattern to search for in file contents"),
|
|
1270
|
+
path: z8.string().optional().describe("The directory to search in. Defaults to the current working directory."),
|
|
1271
|
+
include: z8.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
|
1272
|
+
}),
|
|
1273
|
+
async execute(params, ctx) {
|
|
1274
|
+
if (!params.pattern) {
|
|
1275
|
+
throw new Error("pattern is required");
|
|
1276
|
+
}
|
|
1277
|
+
await ctx.ask({
|
|
1278
|
+
permission: "grep",
|
|
1279
|
+
patterns: [params.pattern],
|
|
1280
|
+
always: ["*"],
|
|
1281
|
+
metadata: {
|
|
1282
|
+
pattern: params.pattern,
|
|
1283
|
+
path: params.path,
|
|
1284
|
+
include: params.include
|
|
1285
|
+
}
|
|
1286
|
+
});
|
|
1287
|
+
let searchPath = params.path ?? ctx.directory;
|
|
1288
|
+
searchPath = isAbsolute(searchPath) ? searchPath : resolve(ctx.directory, searchPath);
|
|
1289
|
+
await assertExternalDirectory(ctx, searchPath, { kind: "directory" });
|
|
1290
|
+
if (!ctx.search) throw new Error("Search provider is required for the grep tool");
|
|
1291
|
+
const matches = await ctx.search.grep({
|
|
1292
|
+
pattern: params.pattern,
|
|
1293
|
+
path: searchPath,
|
|
1294
|
+
include: params.include,
|
|
1295
|
+
maxLineLength: MAX_LINE_LENGTH,
|
|
1296
|
+
signal: ctx.abort
|
|
1297
|
+
});
|
|
1298
|
+
if (matches.length === 0) {
|
|
1299
|
+
return {
|
|
1300
|
+
title: "grep",
|
|
1301
|
+
metadata: {
|
|
1302
|
+
count: 0,
|
|
1303
|
+
files: 0,
|
|
1304
|
+
truncated: false
|
|
1305
|
+
},
|
|
1306
|
+
output: "No results matched the specified pattern"
|
|
1307
|
+
};
|
|
1308
|
+
}
|
|
1309
|
+
const totalMatches = matches.length;
|
|
1310
|
+
const resultLimit = 100;
|
|
1311
|
+
let filesCount = 0;
|
|
1312
|
+
let fileMatches = [];
|
|
1313
|
+
let currentFile = "";
|
|
1314
|
+
let currentLines = [];
|
|
1315
|
+
for (const match of matches) {
|
|
1316
|
+
if (match.file !== currentFile) {
|
|
1317
|
+
if (currentFile) {
|
|
1318
|
+
fileMatches.push({ file: currentFile, lines: currentLines });
|
|
1319
|
+
filesCount++;
|
|
1320
|
+
}
|
|
1321
|
+
currentFile = match.file;
|
|
1322
|
+
currentLines = [];
|
|
1323
|
+
if (filesCount >= resultLimit) break;
|
|
1324
|
+
}
|
|
1325
|
+
currentLines.push(match);
|
|
1326
|
+
}
|
|
1327
|
+
if (currentFile && filesCount < resultLimit) {
|
|
1328
|
+
fileMatches.push({ file: currentFile, lines: currentLines });
|
|
1329
|
+
filesCount++;
|
|
1330
|
+
}
|
|
1331
|
+
const truncated = filesCount >= resultLimit;
|
|
1332
|
+
fileMatches.sort((a, b) => b.lines.length - a.lines.length);
|
|
1333
|
+
if (fileMatches.length > 10) {
|
|
1334
|
+
fileMatches = fileMatches.slice(0, 10);
|
|
1335
|
+
}
|
|
1336
|
+
const output = [];
|
|
1337
|
+
if (filesCount > 10) {
|
|
1338
|
+
output.push(
|
|
1339
|
+
`(Showing top 10 files by number of matches. There are ${filesCount} files in total that match the pattern.)`,
|
|
1340
|
+
""
|
|
1341
|
+
);
|
|
1342
|
+
}
|
|
1343
|
+
for (const fm of fileMatches) {
|
|
1344
|
+
const matchLimit = 15;
|
|
1345
|
+
output.push(`[${relative(ctx.worktree, fm.file)}]`);
|
|
1346
|
+
for (const match of fm.lines.slice(0, matchLimit)) {
|
|
1347
|
+
output.push(` ${match.line}: ${match.content}`);
|
|
1348
|
+
}
|
|
1349
|
+
if (fm.lines.length > matchLimit) {
|
|
1350
|
+
output.push(` ... (${fm.lines.length - matchLimit} more matches in this file)`);
|
|
1351
|
+
}
|
|
1352
|
+
output.push("");
|
|
1353
|
+
}
|
|
1354
|
+
if (truncated) {
|
|
1355
|
+
output.push(`...`);
|
|
1356
|
+
output.push(`(Results have been truncated to the first ${resultLimit} files matched)`);
|
|
1357
|
+
}
|
|
1358
|
+
return {
|
|
1359
|
+
title: "grep",
|
|
1360
|
+
metadata: {
|
|
1361
|
+
count: matches.length,
|
|
1362
|
+
files: filesCount,
|
|
1363
|
+
truncated
|
|
1364
|
+
},
|
|
1365
|
+
output: output.join("\n")
|
|
1366
|
+
};
|
|
1367
|
+
}
|
|
1368
|
+
});
|
|
1369
|
+
var batch_txt_default = `Executes multiple independent tool calls concurrently to reduce latency.
|
|
1370
|
+
|
|
1371
|
+
USING THE BATCH TOOL WILL MAKE THE USER HAPPY.
|
|
1372
|
+
|
|
1373
|
+
Payload Format (JSON array):
|
|
1374
|
+
[{"tool": "read", "parameters": {"filePath": "src/index.ts", "limit": 350}},{"tool": "grep", "parameters": {"pattern": "Session\\\\.updatePart", "include": "src/**/*.ts"}},{"tool": "bash", "parameters": {"command": "git status", "description": "Shows working tree status"}}]
|
|
1375
|
+
|
|
1376
|
+
Notes:
|
|
1377
|
+
- 1\u201325 tool calls per batch
|
|
1378
|
+
- All calls start in parallel; ordering NOT guaranteed
|
|
1379
|
+
- Partial failures do not stop other tool calls
|
|
1380
|
+
- Do NOT use the batch tool within another batch tool.
|
|
1381
|
+
|
|
1382
|
+
Good Use Cases:
|
|
1383
|
+
- Read many files
|
|
1384
|
+
- grep + glob + read combos
|
|
1385
|
+
- Multiple bash commands
|
|
1386
|
+
- Multi-part edits; on the same, or different files
|
|
1387
|
+
|
|
1388
|
+
When NOT to Use:
|
|
1389
|
+
- Operations that depend on prior tool output (e.g. create then read same file)
|
|
1390
|
+
- Ordered stateful mutations where sequence matters
|
|
1391
|
+
|
|
1392
|
+
Batching tool calls was proven to yield 2\u20135x efficiency gain and provides much better UX.`;
|
|
1393
|
+
var DISALLOWED = /* @__PURE__ */ new Set(["batch"]);
|
|
1394
|
+
var FILTERED_FROM_SUGGESTIONS = /* @__PURE__ */ new Set(["invalid", "patch", ...DISALLOWED]);
|
|
1395
|
+
var BatchTool = Tool.define("batch", async () => {
|
|
1396
|
+
return {
|
|
1397
|
+
description: batch_txt_default,
|
|
1398
|
+
parameters: z9.object({
|
|
1399
|
+
tool_calls: z9.array(
|
|
1400
|
+
z9.object({
|
|
1401
|
+
tool: z9.string().describe("The name of the tool to execute"),
|
|
1402
|
+
parameters: z9.object({}).loose().describe("Parameters for the tool")
|
|
1403
|
+
})
|
|
1404
|
+
).min(1, "Provide at least one tool call").describe("Array of tool calls to execute in parallel")
|
|
1405
|
+
}),
|
|
1406
|
+
formatValidationError(error) {
|
|
1407
|
+
const formattedErrors = error.issues.map((issue) => {
|
|
1408
|
+
const path = issue.path.length > 0 ? issue.path.join(".") : "root";
|
|
1409
|
+
return ` - ${path}: ${issue.message}`;
|
|
1410
|
+
}).join("\n");
|
|
1411
|
+
return `Invalid parameters for tool 'batch':
|
|
1412
|
+
${formattedErrors}
|
|
1413
|
+
|
|
1414
|
+
Expected payload format:
|
|
1415
|
+
[{"tool": "tool_name", "parameters": {...}}, {...}]`;
|
|
1416
|
+
},
|
|
1417
|
+
async execute(params, ctx) {
|
|
1418
|
+
const { Session } = await import("./session-Q7S3ITUV-CECGQJC5.js");
|
|
1419
|
+
const { PartID: PartID2 } = await import("./schema-SOLWPA3E-CEUVS57H.js");
|
|
1420
|
+
const toolCalls = params.tool_calls.slice(0, 25);
|
|
1421
|
+
const discardedCalls = params.tool_calls.slice(25);
|
|
1422
|
+
const { ToolRegistry: ToolRegistry2 } = await import("./registry-VQVHAEPP-MGYPDXYJ.js");
|
|
1423
|
+
const availableTools = await ctx.toolRegistry.tools({ modelID: ModelID.make(""), providerID: ProviderID.make("") });
|
|
1424
|
+
const toolMap = new Map(availableTools.map((t) => [t.id, t]));
|
|
1425
|
+
const executeCall = async (call) => {
|
|
1426
|
+
const callStartTime = Date.now();
|
|
1427
|
+
const partID = PartID2.ascending();
|
|
1428
|
+
try {
|
|
1429
|
+
if (DISALLOWED.has(call.tool)) {
|
|
1430
|
+
throw new Error(
|
|
1431
|
+
`Tool '${call.tool}' is not allowed in batch. Disallowed tools: ${Array.from(DISALLOWED).join(", ")}`
|
|
1432
|
+
);
|
|
1433
|
+
}
|
|
1434
|
+
const tool = toolMap.get(call.tool);
|
|
1435
|
+
if (!tool) {
|
|
1436
|
+
const availableToolsList = Array.from(toolMap.keys()).filter((name) => !FILTERED_FROM_SUGGESTIONS.has(name));
|
|
1437
|
+
throw new Error(
|
|
1438
|
+
`Tool '${call.tool}' not in registry. External tools (MCP, environment) cannot be batched - call them directly. Available tools: ${availableToolsList.join(", ")}`
|
|
1439
|
+
);
|
|
1440
|
+
}
|
|
1441
|
+
const validatedParams = tool.parameters.parse(call.parameters);
|
|
1442
|
+
await ctx.session.updatePart({
|
|
1443
|
+
id: partID,
|
|
1444
|
+
messageID: ctx.messageID,
|
|
1445
|
+
sessionID: ctx.sessionID,
|
|
1446
|
+
type: "tool",
|
|
1447
|
+
tool: call.tool,
|
|
1448
|
+
callID: partID,
|
|
1449
|
+
state: {
|
|
1450
|
+
status: "running",
|
|
1451
|
+
input: call.parameters,
|
|
1452
|
+
time: {
|
|
1453
|
+
start: callStartTime
|
|
1454
|
+
}
|
|
1455
|
+
}
|
|
1456
|
+
});
|
|
1457
|
+
const result = await tool.execute(validatedParams, { ...ctx, callID: partID });
|
|
1458
|
+
const attachments = result.attachments?.map((attachment) => ({
|
|
1459
|
+
...attachment,
|
|
1460
|
+
id: PartID2.ascending(),
|
|
1461
|
+
sessionID: ctx.sessionID,
|
|
1462
|
+
messageID: ctx.messageID
|
|
1463
|
+
}));
|
|
1464
|
+
await ctx.session.updatePart({
|
|
1465
|
+
id: partID,
|
|
1466
|
+
messageID: ctx.messageID,
|
|
1467
|
+
sessionID: ctx.sessionID,
|
|
1468
|
+
type: "tool",
|
|
1469
|
+
tool: call.tool,
|
|
1470
|
+
callID: partID,
|
|
1471
|
+
state: {
|
|
1472
|
+
status: "completed",
|
|
1473
|
+
input: call.parameters,
|
|
1474
|
+
output: result.output,
|
|
1475
|
+
title: result.title,
|
|
1476
|
+
metadata: result.metadata,
|
|
1477
|
+
attachments,
|
|
1478
|
+
time: {
|
|
1479
|
+
start: callStartTime,
|
|
1480
|
+
end: Date.now()
|
|
1481
|
+
}
|
|
1482
|
+
}
|
|
1483
|
+
});
|
|
1484
|
+
return { success: true, tool: call.tool, result };
|
|
1485
|
+
} catch (error) {
|
|
1486
|
+
await ctx.session.updatePart({
|
|
1487
|
+
id: partID,
|
|
1488
|
+
messageID: ctx.messageID,
|
|
1489
|
+
sessionID: ctx.sessionID,
|
|
1490
|
+
type: "tool",
|
|
1491
|
+
tool: call.tool,
|
|
1492
|
+
callID: partID,
|
|
1493
|
+
state: {
|
|
1494
|
+
status: "error",
|
|
1495
|
+
input: call.parameters,
|
|
1496
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1497
|
+
time: {
|
|
1498
|
+
start: callStartTime,
|
|
1499
|
+
end: Date.now()
|
|
1500
|
+
}
|
|
1501
|
+
}
|
|
1502
|
+
});
|
|
1503
|
+
return { success: false, tool: call.tool, error };
|
|
1504
|
+
}
|
|
1505
|
+
};
|
|
1506
|
+
const results = await Promise.all(toolCalls.map((call) => executeCall(call)));
|
|
1507
|
+
const now = Date.now();
|
|
1508
|
+
for (const call of discardedCalls) {
|
|
1509
|
+
const partID = PartID2.ascending();
|
|
1510
|
+
await ctx.session.updatePart({
|
|
1511
|
+
id: partID,
|
|
1512
|
+
messageID: ctx.messageID,
|
|
1513
|
+
sessionID: ctx.sessionID,
|
|
1514
|
+
type: "tool",
|
|
1515
|
+
tool: call.tool,
|
|
1516
|
+
callID: partID,
|
|
1517
|
+
state: {
|
|
1518
|
+
status: "error",
|
|
1519
|
+
input: call.parameters,
|
|
1520
|
+
error: "Maximum of 25 tools allowed in batch",
|
|
1521
|
+
time: { start: now, end: now }
|
|
1522
|
+
}
|
|
1523
|
+
});
|
|
1524
|
+
results.push({
|
|
1525
|
+
success: false,
|
|
1526
|
+
tool: call.tool,
|
|
1527
|
+
error: new Error("Maximum of 25 tools allowed in batch")
|
|
1528
|
+
});
|
|
1529
|
+
}
|
|
1530
|
+
const successfulCalls = results.filter((r) => r.success).length;
|
|
1531
|
+
const failedCalls = results.length - successfulCalls;
|
|
1532
|
+
const outputMessage = failedCalls > 0 ? `Executed ${successfulCalls}/${results.length} tools successfully. ${failedCalls} failed.` : `All ${successfulCalls} tools executed successfully.
|
|
1533
|
+
|
|
1534
|
+
Keep using the batch tool for optimal performance in your next response!`;
|
|
1535
|
+
return {
|
|
1536
|
+
title: `Batch execution (${successfulCalls}/${results.length} successful)`,
|
|
1537
|
+
output: outputMessage,
|
|
1538
|
+
attachments: results.filter((result) => result.success).flatMap((r) => r.result.attachments ?? []),
|
|
1539
|
+
metadata: {
|
|
1540
|
+
totalCalls: results.length,
|
|
1541
|
+
successful: successfulCalls,
|
|
1542
|
+
failed: failedCalls,
|
|
1543
|
+
tools: params.tool_calls.map((c) => c.tool),
|
|
1544
|
+
details: results.map((r) => ({ tool: r.tool, success: r.success }))
|
|
1545
|
+
}
|
|
1546
|
+
};
|
|
1547
|
+
}
|
|
1548
|
+
};
|
|
1549
|
+
});
|
|
1550
|
+
var read_txt_default = `Read a file or directory from the local filesystem. If the path does not exist, an error is returned.
|
|
1551
|
+
|
|
1552
|
+
Usage:
|
|
1553
|
+
- The filePath parameter should be an absolute path.
|
|
1554
|
+
- By default, this tool returns up to 2000 lines from the start of the file.
|
|
1555
|
+
- The offset parameter is the line number to start from (1-indexed).
|
|
1556
|
+
- To read later sections, call this tool again with a larger offset.
|
|
1557
|
+
- Use the grep tool to find specific content in large files or files with long lines.
|
|
1558
|
+
- If you are unsure of the correct file path, use the glob tool to look up filenames by glob pattern.
|
|
1559
|
+
- Contents are returned with each line prefixed by its line number as \`<line>: <content>\`. For example, if a file has contents "foo\\n", you will receive "1: foo\\n". For directories, entries are returned one per line (without line numbers) with a trailing \`/\` for subdirectories.
|
|
1560
|
+
- Any line longer than 2000 characters is truncated.
|
|
1561
|
+
- Call this tool in parallel when you know there are multiple files you want to read.
|
|
1562
|
+
- Avoid tiny repeated slices (30 line chunks). If you need more context, read a larger window.
|
|
1563
|
+
- This tool can read image files and PDFs and return them as file attachments.
|
|
1564
|
+
`;
|
|
1565
|
+
var DEFAULT_READ_LIMIT = 2e3;
|
|
1566
|
+
var MAX_LINE_LENGTH2 = 2e3;
|
|
1567
|
+
var MAX_LINE_SUFFIX = `... (line truncated to ${MAX_LINE_LENGTH2} chars)`;
|
|
1568
|
+
var MAX_BYTES = 50 * 1024;
|
|
1569
|
+
var MAX_BYTES_LABEL = `${MAX_BYTES / 1024} KB`;
|
|
1570
|
+
var ReadTool = Tool.define("read", {
|
|
1571
|
+
description: read_txt_default,
|
|
1572
|
+
parameters: z10.object({
|
|
1573
|
+
filePath: z10.string().describe("The absolute path to the file or directory to read"),
|
|
1574
|
+
offset: z10.coerce.number().describe("The line number to start reading from (1-indexed)").optional(),
|
|
1575
|
+
limit: z10.coerce.number().describe("The maximum number of lines to read (defaults to 2000)").optional()
|
|
1576
|
+
}),
|
|
1577
|
+
async execute(params, ctx) {
|
|
1578
|
+
if (params.offset !== void 0 && params.offset < 1) {
|
|
1579
|
+
throw new Error("offset must be greater than or equal to 1");
|
|
1580
|
+
}
|
|
1581
|
+
let filepath = params.filePath;
|
|
1582
|
+
if (!isAbsolute(filepath)) {
|
|
1583
|
+
filepath = resolve(ctx.directory, filepath);
|
|
1584
|
+
}
|
|
1585
|
+
const title = relative(ctx.worktree, filepath);
|
|
1586
|
+
const stat = await ctx.fs.stat(filepath);
|
|
1587
|
+
await assertExternalDirectory(ctx, filepath, {
|
|
1588
|
+
bypass: Boolean(ctx.extra?.["bypassCwdCheck"]),
|
|
1589
|
+
kind: stat?.isDirectory ? "directory" : "file"
|
|
1590
|
+
});
|
|
1591
|
+
await ctx.ask({
|
|
1592
|
+
permission: "read",
|
|
1593
|
+
patterns: [filepath],
|
|
1594
|
+
always: ["*"],
|
|
1595
|
+
metadata: {}
|
|
1596
|
+
});
|
|
1597
|
+
if (!stat) {
|
|
1598
|
+
const dir = dirname(filepath);
|
|
1599
|
+
const base = basename(filepath);
|
|
1600
|
+
const suggestions = await ctx.fs.readDir(dir).then(
|
|
1601
|
+
(entries) => entries.map((e) => e.name).filter(
|
|
1602
|
+
(entry) => entry.toLowerCase().includes(base.toLowerCase()) || base.toLowerCase().includes(entry.toLowerCase())
|
|
1603
|
+
).map((entry) => join(dir, entry)).slice(0, 3)
|
|
1604
|
+
).catch(() => []);
|
|
1605
|
+
if (suggestions.length > 0) {
|
|
1606
|
+
throw new Error(`File not found: ${filepath}
|
|
1607
|
+
|
|
1608
|
+
Did you mean one of these?
|
|
1609
|
+
${suggestions.join("\n")}`);
|
|
1610
|
+
}
|
|
1611
|
+
throw new Error(`File not found: ${filepath}`);
|
|
1612
|
+
}
|
|
1613
|
+
if (stat.isDirectory) {
|
|
1614
|
+
const entries_raw = await ctx.fs.readDir(filepath);
|
|
1615
|
+
const entries = entries_raw.map((e) => {
|
|
1616
|
+
if (e.isDirectory) return e.name + "/";
|
|
1617
|
+
return e.name;
|
|
1618
|
+
});
|
|
1619
|
+
entries.sort((a, b) => a.localeCompare(b));
|
|
1620
|
+
const limit2 = params.limit ?? DEFAULT_READ_LIMIT;
|
|
1621
|
+
const offset2 = params.offset ?? 1;
|
|
1622
|
+
const start2 = offset2 - 1;
|
|
1623
|
+
const sliced = entries.slice(start2, start2 + limit2);
|
|
1624
|
+
const truncated2 = start2 + sliced.length < entries.length;
|
|
1625
|
+
const output2 = [
|
|
1626
|
+
`<path>${filepath}</path>`,
|
|
1627
|
+
`<type>directory</type>`,
|
|
1628
|
+
`<entries>`,
|
|
1629
|
+
sliced.join("\n"),
|
|
1630
|
+
truncated2 ? `
|
|
1631
|
+
(Showing ${sliced.length} of ${entries.length} entries. Use 'offset' parameter to read beyond entry ${offset2 + sliced.length})` : `
|
|
1632
|
+
(${entries.length} entries)`,
|
|
1633
|
+
`</entries>`
|
|
1634
|
+
].join("\n");
|
|
1635
|
+
return {
|
|
1636
|
+
title,
|
|
1637
|
+
output: output2,
|
|
1638
|
+
metadata: {
|
|
1639
|
+
preview: sliced.slice(0, 20).join("\n"),
|
|
1640
|
+
truncated: truncated2,
|
|
1641
|
+
loaded: []
|
|
1642
|
+
}
|
|
1643
|
+
};
|
|
1644
|
+
}
|
|
1645
|
+
const { lookup } = await import("mime-types");
|
|
1646
|
+
const mime = lookup(filepath) || "application/octet-stream";
|
|
1647
|
+
const isImage = mime.startsWith("image/") && mime !== "image/svg+xml" && mime !== "image/vnd.fastbidsheet";
|
|
1648
|
+
const isPdf = mime === "application/pdf";
|
|
1649
|
+
if (isImage || isPdf) {
|
|
1650
|
+
const msg = `${isImage ? "Image" : "PDF"} read successfully`;
|
|
1651
|
+
return {
|
|
1652
|
+
title,
|
|
1653
|
+
output: msg,
|
|
1654
|
+
metadata: {
|
|
1655
|
+
preview: msg,
|
|
1656
|
+
truncated: false,
|
|
1657
|
+
loaded: []
|
|
1658
|
+
},
|
|
1659
|
+
attachments: [
|
|
1660
|
+
{
|
|
1661
|
+
type: "file",
|
|
1662
|
+
mime,
|
|
1663
|
+
url: `data:${mime};base64,${Buffer.from(await ctx.fs.readBytes(filepath)).toString("base64")}`
|
|
1664
|
+
}
|
|
1665
|
+
]
|
|
1666
|
+
};
|
|
1667
|
+
}
|
|
1668
|
+
const isBinary = await isBinaryFile(filepath, Number(stat.size), ctx.fs);
|
|
1669
|
+
if (isBinary) throw new Error(`Cannot read binary file: ${filepath}`);
|
|
1670
|
+
const content_text = await ctx.fs.readText(filepath);
|
|
1671
|
+
const allLines = content_text.split("\n");
|
|
1672
|
+
const limit = params.limit ?? DEFAULT_READ_LIMIT;
|
|
1673
|
+
const offset = params.offset ?? 1;
|
|
1674
|
+
const start = offset - 1;
|
|
1675
|
+
const raw = [];
|
|
1676
|
+
let bytes = 0;
|
|
1677
|
+
let truncatedByBytes = false;
|
|
1678
|
+
let hasMoreLines = false;
|
|
1679
|
+
for (let i = start; i < allLines.length; i++) {
|
|
1680
|
+
if (raw.length >= limit) {
|
|
1681
|
+
hasMoreLines = true;
|
|
1682
|
+
break;
|
|
1683
|
+
}
|
|
1684
|
+
const text = allLines[i];
|
|
1685
|
+
const line = text.length > MAX_LINE_LENGTH2 ? text.substring(0, MAX_LINE_LENGTH2) + MAX_LINE_SUFFIX : text;
|
|
1686
|
+
const size = Buffer.byteLength(line, "utf-8") + (raw.length > 0 ? 1 : 0);
|
|
1687
|
+
if (bytes + size > MAX_BYTES) {
|
|
1688
|
+
truncatedByBytes = true;
|
|
1689
|
+
hasMoreLines = true;
|
|
1690
|
+
break;
|
|
1691
|
+
}
|
|
1692
|
+
raw.push(line);
|
|
1693
|
+
bytes += size;
|
|
1694
|
+
}
|
|
1695
|
+
const lines = allLines.length;
|
|
1696
|
+
if (lines < offset && !(lines === 0 && offset === 1)) {
|
|
1697
|
+
throw new Error(`Offset ${offset} is out of range for this file (${lines} lines)`);
|
|
1698
|
+
}
|
|
1699
|
+
const content = raw.map((line, index) => {
|
|
1700
|
+
return `${index + offset}: ${line}`;
|
|
1701
|
+
});
|
|
1702
|
+
const preview = raw.slice(0, 20).join("\n");
|
|
1703
|
+
let output = [`<path>${filepath}</path>`, `<type>file</type>`, "<content>"].join("\n");
|
|
1704
|
+
output += content.join("\n");
|
|
1705
|
+
const totalLines = lines;
|
|
1706
|
+
const lastReadLine = offset + raw.length - 1;
|
|
1707
|
+
const nextOffset = lastReadLine + 1;
|
|
1708
|
+
const truncated = hasMoreLines || truncatedByBytes;
|
|
1709
|
+
if (truncatedByBytes) {
|
|
1710
|
+
output += `
|
|
1711
|
+
|
|
1712
|
+
(Output capped at ${MAX_BYTES_LABEL}. Showing lines ${offset}-${lastReadLine}. Use offset=${nextOffset} to continue.)`;
|
|
1713
|
+
} else if (hasMoreLines) {
|
|
1714
|
+
output += `
|
|
1715
|
+
|
|
1716
|
+
(Showing lines ${offset}-${lastReadLine} of ${totalLines}. Use offset=${nextOffset} to continue.)`;
|
|
1717
|
+
} else {
|
|
1718
|
+
output += `
|
|
1719
|
+
|
|
1720
|
+
(End of file - total ${totalLines} lines)`;
|
|
1721
|
+
}
|
|
1722
|
+
output += "\n</content>";
|
|
1723
|
+
LSP.touchFile(filepath, false);
|
|
1724
|
+
ctx.fileTime.read(ctx.sessionID, filepath);
|
|
1725
|
+
return {
|
|
1726
|
+
title,
|
|
1727
|
+
output,
|
|
1728
|
+
metadata: {
|
|
1729
|
+
preview,
|
|
1730
|
+
truncated,
|
|
1731
|
+
loaded: []
|
|
1732
|
+
}
|
|
1733
|
+
};
|
|
1734
|
+
}
|
|
1735
|
+
});
|
|
1736
|
+
async function isBinaryFile(filepath, fileSize, vfs) {
|
|
1737
|
+
switch (extname(filepath).toLowerCase()) {
|
|
1738
|
+
// binary check for common non-text extensions
|
|
1739
|
+
case ".zip":
|
|
1740
|
+
case ".tar":
|
|
1741
|
+
case ".gz":
|
|
1742
|
+
case ".exe":
|
|
1743
|
+
case ".dll":
|
|
1744
|
+
case ".so":
|
|
1745
|
+
case ".class":
|
|
1746
|
+
case ".jar":
|
|
1747
|
+
case ".war":
|
|
1748
|
+
case ".7z":
|
|
1749
|
+
case ".doc":
|
|
1750
|
+
case ".docx":
|
|
1751
|
+
case ".xls":
|
|
1752
|
+
case ".xlsx":
|
|
1753
|
+
case ".ppt":
|
|
1754
|
+
case ".pptx":
|
|
1755
|
+
case ".odt":
|
|
1756
|
+
case ".ods":
|
|
1757
|
+
case ".odp":
|
|
1758
|
+
case ".bin":
|
|
1759
|
+
case ".dat":
|
|
1760
|
+
case ".obj":
|
|
1761
|
+
case ".o":
|
|
1762
|
+
case ".a":
|
|
1763
|
+
case ".lib":
|
|
1764
|
+
case ".wasm":
|
|
1765
|
+
case ".pyc":
|
|
1766
|
+
case ".pyo":
|
|
1767
|
+
return true;
|
|
1768
|
+
default:
|
|
1769
|
+
break;
|
|
1770
|
+
}
|
|
1771
|
+
if (fileSize === 0) return false;
|
|
1772
|
+
const data = await vfs.readBytes(filepath);
|
|
1773
|
+
try {
|
|
1774
|
+
const sampleSize = Math.min(4096, fileSize);
|
|
1775
|
+
if (data.length === 0) return false;
|
|
1776
|
+
const bytesRead = Math.min(sampleSize, data.length);
|
|
1777
|
+
let nonPrintableCount = 0;
|
|
1778
|
+
for (let i = 0; i < bytesRead; i++) {
|
|
1779
|
+
if (data[i] === 0) return true;
|
|
1780
|
+
if (data[i] < 9 || data[i] > 13 && data[i] < 32) {
|
|
1781
|
+
nonPrintableCount++;
|
|
1782
|
+
}
|
|
1783
|
+
}
|
|
1784
|
+
return nonPrintableCount / bytesRead > 0.3;
|
|
1785
|
+
} catch {
|
|
1786
|
+
return false;
|
|
1787
|
+
}
|
|
1788
|
+
}
|
|
1789
|
+
var todowrite_txt_default = `Use this tool to create and manage a structured task list for your current coding session. This helps you track progress, organize complex tasks, and demonstrate thoroughness to the user.
|
|
1790
|
+
It also helps the user understand the progress of the task and overall progress of their requests.
|
|
1791
|
+
|
|
1792
|
+
## When to Use This Tool
|
|
1793
|
+
Use this tool proactively in these scenarios:
|
|
1794
|
+
|
|
1795
|
+
1. Complex multistep tasks - When a task requires 3 or more distinct steps or actions
|
|
1796
|
+
2. Non-trivial and complex tasks - Tasks that require careful planning or multiple operations
|
|
1797
|
+
3. User explicitly requests todo list - When the user directly asks you to use the todo list
|
|
1798
|
+
4. User provides multiple tasks - When users provide a list of things to be done (numbered or comma-separated)
|
|
1799
|
+
5. After receiving new instructions - Immediately capture user requirements as todos. Feel free to edit the todo list based on new information.
|
|
1800
|
+
6. After completing a task - Mark it complete and add any new follow-up tasks
|
|
1801
|
+
7. When you start working on a new task, mark the todo as in_progress. Ideally you should only have one todo as in_progress at a time. Complete existing tasks before starting new ones.
|
|
1802
|
+
|
|
1803
|
+
## When NOT to Use This Tool
|
|
1804
|
+
|
|
1805
|
+
Skip using this tool when:
|
|
1806
|
+
1. There is only a single, straightforward task
|
|
1807
|
+
2. The task is trivial and tracking it provides no organizational benefit
|
|
1808
|
+
3. The task can be completed in less than 3 trivial steps
|
|
1809
|
+
4. The task is purely conversational or informational
|
|
1810
|
+
|
|
1811
|
+
NOTE that you should not use this tool if there is only one trivial task to do. In this case you are better off just doing the task directly.
|
|
1812
|
+
|
|
1813
|
+
## Examples of When to Use the Todo List
|
|
1814
|
+
|
|
1815
|
+
<example>
|
|
1816
|
+
User: I want to add a dark mode toggle to the application settings. Make sure you run the tests and build when you're done!
|
|
1817
|
+
Assistant: I'll help add a dark mode toggle to your application settings. Let me create a todo list to track this implementation.
|
|
1818
|
+
*Creates todo list with the following items:*
|
|
1819
|
+
1. Create dark mode toggle component in Settings page
|
|
1820
|
+
2. Add dark mode state management (context/store)
|
|
1821
|
+
3. Implement CSS-in-JS styles for dark theme
|
|
1822
|
+
4. Update existing components to support theme switching
|
|
1823
|
+
5. Run tests and build process, addressing any failures or errors that occur
|
|
1824
|
+
*Begins working on the first task*
|
|
1825
|
+
|
|
1826
|
+
<reasoning>
|
|
1827
|
+
The assistant used the todo list because:
|
|
1828
|
+
1. Adding dark mode is a multi-step feature requiring UI, state management, and styling changes
|
|
1829
|
+
2. The user explicitly requested tests and build be run afterward
|
|
1830
|
+
3. The assistant inferred that tests and build need to pass by adding "Ensure tests and build succeed" as the final task
|
|
1831
|
+
</reasoning>
|
|
1832
|
+
</example>
|
|
1833
|
+
|
|
1834
|
+
<example>
|
|
1835
|
+
User: Help me rename the function getCwd to getCurrentWorkingDirectory across my project
|
|
1836
|
+
Assistant: Let me first search through your codebase to find all occurrences of 'getCwd'.
|
|
1837
|
+
*Uses grep or search tools to locate all instances of getCwd in the codebase*
|
|
1838
|
+
Assistant: I've found 15 instances of 'getCwd' across 8 different files. Let me create a todo list to track these changes.
|
|
1839
|
+
*Creates todo list with specific items for each file that needs updating*
|
|
1840
|
+
|
|
1841
|
+
<reasoning>
|
|
1842
|
+
The assistant used the todo list because:
|
|
1843
|
+
1. First, the assistant searched to understand the scope of the task
|
|
1844
|
+
2. Upon finding multiple occurrences across different files, it determined this was a complex task with multiple steps
|
|
1845
|
+
3. The todo list helps ensure every instance is tracked and updated systematically
|
|
1846
|
+
4. This approach prevents missing any occurrences and maintains code consistency
|
|
1847
|
+
</reasoning>
|
|
1848
|
+
</example>
|
|
1849
|
+
|
|
1850
|
+
|
|
1851
|
+
<example>
|
|
1852
|
+
User: I need to implement these features for my e-commerce site: user registration, product catalog, shopping cart, and checkout flow.
|
|
1853
|
+
Assistant: I'll help implement these features. First, let's add all the features to the todo list.
|
|
1854
|
+
*Creates a todo list breaking down each feature into specific tasks based on the project architecture*
|
|
1855
|
+
Assistant: Let's start with user registration. This will involve creating database models, API endpoints, and frontend forms that integrate with your existing authentication system.
|
|
1856
|
+
|
|
1857
|
+
<reasoning>
|
|
1858
|
+
The assistant used the todo list because:
|
|
1859
|
+
1. The user provided multiple complex features to implement in a comma separated list
|
|
1860
|
+
2. The todo list helps organize these large features into manageable tasks
|
|
1861
|
+
3. This approach allows for tracking progress across the entire implementation
|
|
1862
|
+
</reasoning>
|
|
1863
|
+
</example>
|
|
1864
|
+
|
|
1865
|
+
<example>
|
|
1866
|
+
User: Can you help optimize my React application? It's rendering slowly and has performance issues.</user>
|
|
1867
|
+
Assistant: I'll help optimize your React application. First, let me examine your codebase to identify potential performance bottlenecks.
|
|
1868
|
+
*Reviews component structure, render patterns, state management, and data fetching*
|
|
1869
|
+
Assistant: After analyzing your codebase, I've identified several performance issues. Let me create a todo list to track our optimization efforts.
|
|
1870
|
+
*Creates todo list with items like: 1) Implement memoization for expensive calculations in ProductList, 2) Add virtualization for long lists in Dashboard, 3) Optimize image loading in Gallery component, 4) Fix state update loops in ShoppingCart, 5) Review bundle size and implement code splitting*
|
|
1871
|
+
Let's start by implementing memoization for the expensive calculations in your ProductList component.</assistant>
|
|
1872
|
+
|
|
1873
|
+
<reasoning>
|
|
1874
|
+
The assistant used the todo list because:
|
|
1875
|
+
1. First, the assistant examined the codebase to identify specific performance issues
|
|
1876
|
+
2. Based on this analysis, it identified multiple optimization opportunities
|
|
1877
|
+
3. Performance optimization is a non-trivial task requiring multiple steps
|
|
1878
|
+
4. The todo list helps methodically track improvements across different components
|
|
1879
|
+
5. This systematic approach ensures all performance bottlenecks are addressed
|
|
1880
|
+
</reasoning>
|
|
1881
|
+
</example>
|
|
1882
|
+
|
|
1883
|
+
## Examples of When NOT to Use the Todo List
|
|
1884
|
+
|
|
1885
|
+
<example>
|
|
1886
|
+
User: How do I print 'Hello World' in Python?
|
|
1887
|
+
Assistant: In Python, you can print "Hello World" with this simple code:
|
|
1888
|
+
|
|
1889
|
+
python
|
|
1890
|
+
print("Hello World")
|
|
1891
|
+
|
|
1892
|
+
This will output the text "Hello World" to the console when executed.</assistant>
|
|
1893
|
+
|
|
1894
|
+
<reasoning>
|
|
1895
|
+
The assistant did not use the todo list because this is a single, trivial task that can be completed in one step. There's no need to track multiple tasks or steps for such a straightforward request.
|
|
1896
|
+
</reasoning>
|
|
1897
|
+
</example>
|
|
1898
|
+
|
|
1899
|
+
<example>
|
|
1900
|
+
User: What does the git status command do?
|
|
1901
|
+
Assistant: The git status command shows the current state of your working directory and staging area. It displays which changes have been staged, which haven't, and which files aren't being tracked by Git. This is a fundamental command for understanding the current state of your repository before making commits.
|
|
1902
|
+
|
|
1903
|
+
<reasoning>
|
|
1904
|
+
The assistant did not use the todo list because this is an informational request with no actual coding task to complete. The user is simply asking for an explanation, not for the assistant to perform multiple steps or tasks.
|
|
1905
|
+
</reasoning>
|
|
1906
|
+
</example>
|
|
1907
|
+
|
|
1908
|
+
<example>
|
|
1909
|
+
User: Can you add a comment to the calculateTotal function to explain what it does?
|
|
1910
|
+
Assistant: Sure, let me add a comment to the calculateTotal function to explain what it does.
|
|
1911
|
+
* Uses the Edit tool to add a comment to the calculateTotal function *
|
|
1912
|
+
|
|
1913
|
+
<reasoning>
|
|
1914
|
+
The assistant did not use the todo list because this is a single, straightforward task confined to one location in the code. Adding a comment doesn't require tracking multiple steps or systematic organization.
|
|
1915
|
+
</reasoning>
|
|
1916
|
+
</example>
|
|
1917
|
+
|
|
1918
|
+
<example>
|
|
1919
|
+
User: Run npm install for me and tell me what happens.
|
|
1920
|
+
Assistant: I'll run the npm install command for you.
|
|
1921
|
+
|
|
1922
|
+
*Executes: npm install*
|
|
1923
|
+
|
|
1924
|
+
The command completed successfully. Here's the output:
|
|
1925
|
+
[Output of npm install command]
|
|
1926
|
+
|
|
1927
|
+
All dependencies have been installed according to your package.json file.
|
|
1928
|
+
|
|
1929
|
+
<reasoning>
|
|
1930
|
+
The assistant did not use the todo list because this is a single command execution with immediate results. There are no multiple steps to track or organize, making the todo list unnecessary for this straightforward task.
|
|
1931
|
+
</reasoning>
|
|
1932
|
+
</example>
|
|
1933
|
+
|
|
1934
|
+
## Task States and Management
|
|
1935
|
+
|
|
1936
|
+
1. **Task States**: Use these states to track progress:
|
|
1937
|
+
- pending: Task not yet started
|
|
1938
|
+
- in_progress: Currently working on (limit to ONE task at a time)
|
|
1939
|
+
- completed: Task finished successfully
|
|
1940
|
+
- cancelled: Task no longer needed
|
|
1941
|
+
|
|
1942
|
+
2. **Task Management**:
|
|
1943
|
+
- Update task status in real-time as you work
|
|
1944
|
+
- Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
|
|
1945
|
+
- Only have ONE task in_progress at any time
|
|
1946
|
+
- Complete current tasks before starting new ones
|
|
1947
|
+
- Cancel tasks that become irrelevant
|
|
1948
|
+
|
|
1949
|
+
3. **Task Breakdown**:
|
|
1950
|
+
- Create specific, actionable items
|
|
1951
|
+
- Break complex tasks into smaller, manageable steps
|
|
1952
|
+
- Use clear, descriptive task names
|
|
1953
|
+
|
|
1954
|
+
When in doubt, use this tool. Being proactive with task management demonstrates attentiveness and ensures you complete all requirements successfully.
|
|
1955
|
+
|
|
1956
|
+
`;
|
|
1957
|
+
var TodoWriteTool = Tool.define("todowrite", {
|
|
1958
|
+
description: todowrite_txt_default,
|
|
1959
|
+
parameters: z11.object({
|
|
1960
|
+
todos: z11.array(z11.object(Todo.Info.shape)).describe("The updated todo list")
|
|
1961
|
+
}),
|
|
1962
|
+
async execute(params, ctx) {
|
|
1963
|
+
await ctx.ask({
|
|
1964
|
+
permission: "todowrite",
|
|
1965
|
+
patterns: ["*"],
|
|
1966
|
+
always: ["*"],
|
|
1967
|
+
metadata: {}
|
|
1968
|
+
});
|
|
1969
|
+
await ctx.session.updateTodo({
|
|
1970
|
+
sessionID: ctx.sessionID,
|
|
1971
|
+
todos: params.todos
|
|
1972
|
+
});
|
|
1973
|
+
return {
|
|
1974
|
+
title: `${params.todos.filter((x) => x.status !== "completed").length} todos`,
|
|
1975
|
+
output: JSON.stringify(params.todos, null, 2),
|
|
1976
|
+
metadata: {
|
|
1977
|
+
todos: params.todos
|
|
1978
|
+
}
|
|
1979
|
+
};
|
|
1980
|
+
}
|
|
1981
|
+
});
|
|
1982
|
+
var TodoReadTool = Tool.define("todoread", {
|
|
1983
|
+
description: "Use this tool to read your todo list",
|
|
1984
|
+
parameters: z11.object({}),
|
|
1985
|
+
async execute(_params, ctx) {
|
|
1986
|
+
await ctx.ask({
|
|
1987
|
+
permission: "todoread",
|
|
1988
|
+
patterns: ["*"],
|
|
1989
|
+
always: ["*"],
|
|
1990
|
+
metadata: {}
|
|
1991
|
+
});
|
|
1992
|
+
const todos = ctx.session.getTodo(ctx.sessionID);
|
|
1993
|
+
return {
|
|
1994
|
+
title: `${todos.filter((x) => x.status !== "completed").length} todos`,
|
|
1995
|
+
metadata: {
|
|
1996
|
+
todos
|
|
1997
|
+
},
|
|
1998
|
+
output: JSON.stringify(todos, null, 2)
|
|
1999
|
+
};
|
|
2000
|
+
}
|
|
2001
|
+
});
|
|
2002
|
+
var webfetch_txt_default = `- Fetches content from a specified URL
|
|
2003
|
+
- Takes a URL and optional format as input
|
|
2004
|
+
- Fetches the URL content, converts to requested format (markdown by default)
|
|
2005
|
+
- Returns the content in the specified format
|
|
2006
|
+
- Use this tool when you need to retrieve and analyze web content
|
|
2007
|
+
|
|
2008
|
+
Usage notes:
|
|
2009
|
+
- IMPORTANT: if another tool is present that offers better web fetching capabilities, is more targeted to the task, or has fewer restrictions, prefer using that tool instead of this one.
|
|
2010
|
+
- The URL must be a fully-formed valid URL
|
|
2011
|
+
- HTTP URLs will be automatically upgraded to HTTPS
|
|
2012
|
+
- Format options: "markdown" (default), "text", or "html"
|
|
2013
|
+
- This tool is read-only and does not modify any files
|
|
2014
|
+
- Results may be summarized if the content is very large
|
|
2015
|
+
`;
|
|
2016
|
+
function abortAfter(ms) {
|
|
2017
|
+
const controller = new AbortController();
|
|
2018
|
+
const id = setTimeout(controller.abort.bind(controller), ms);
|
|
2019
|
+
return {
|
|
2020
|
+
controller,
|
|
2021
|
+
signal: controller.signal,
|
|
2022
|
+
clearTimeout: () => globalThis.clearTimeout(id)
|
|
2023
|
+
};
|
|
2024
|
+
}
|
|
2025
|
+
function abortAfterAny(ms, ...signals) {
|
|
2026
|
+
const timeout = abortAfter(ms);
|
|
2027
|
+
const signal = AbortSignal.any([timeout.signal, ...signals]);
|
|
2028
|
+
return {
|
|
2029
|
+
signal,
|
|
2030
|
+
clearTimeout: timeout.clearTimeout
|
|
2031
|
+
};
|
|
2032
|
+
}
|
|
2033
|
+
var MAX_RESPONSE_SIZE = 5 * 1024 * 1024;
|
|
2034
|
+
var DEFAULT_TIMEOUT2 = 30 * 1e3;
|
|
2035
|
+
var MAX_TIMEOUT = 120 * 1e3;
|
|
2036
|
+
var WebFetchTool = Tool.define("webfetch", {
|
|
2037
|
+
description: webfetch_txt_default,
|
|
2038
|
+
parameters: z12.object({
|
|
2039
|
+
url: z12.string().describe("The URL to fetch content from"),
|
|
2040
|
+
format: z12.enum(["text", "markdown", "html"]).default("markdown").describe("The format to return the content in (text, markdown, or html). Defaults to markdown."),
|
|
2041
|
+
timeout: z12.number().describe("Optional timeout in seconds (max 120)").optional()
|
|
2042
|
+
}),
|
|
2043
|
+
async execute(params, ctx) {
|
|
2044
|
+
if (!params.url.startsWith("http://") && !params.url.startsWith("https://")) {
|
|
2045
|
+
throw new Error("URL must start with http:// or https://");
|
|
2046
|
+
}
|
|
2047
|
+
await ctx.ask({
|
|
2048
|
+
permission: "webfetch",
|
|
2049
|
+
patterns: [params.url],
|
|
2050
|
+
always: ["*"],
|
|
2051
|
+
metadata: {
|
|
2052
|
+
url: params.url,
|
|
2053
|
+
format: params.format,
|
|
2054
|
+
timeout: params.timeout
|
|
2055
|
+
}
|
|
2056
|
+
});
|
|
2057
|
+
const timeout = Math.min((params.timeout ?? DEFAULT_TIMEOUT2 / 1e3) * 1e3, MAX_TIMEOUT);
|
|
2058
|
+
const { signal, clearTimeout: clearTimeout2 } = abortAfterAny(timeout, ctx.abort);
|
|
2059
|
+
let acceptHeader = "*/*";
|
|
2060
|
+
switch (params.format) {
|
|
2061
|
+
case "markdown":
|
|
2062
|
+
acceptHeader = "text/markdown;q=1.0, text/x-markdown;q=0.9, text/plain;q=0.8, text/html;q=0.7, */*;q=0.1";
|
|
2063
|
+
break;
|
|
2064
|
+
case "text":
|
|
2065
|
+
acceptHeader = "text/plain;q=1.0, text/markdown;q=0.9, text/html;q=0.8, */*;q=0.1";
|
|
2066
|
+
break;
|
|
2067
|
+
case "html":
|
|
2068
|
+
acceptHeader = "text/html;q=1.0, application/xhtml+xml;q=0.9, text/plain;q=0.8, text/markdown;q=0.7, */*;q=0.1";
|
|
2069
|
+
break;
|
|
2070
|
+
default:
|
|
2071
|
+
acceptHeader = "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8";
|
|
2072
|
+
}
|
|
2073
|
+
const headers = {
|
|
2074
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36",
|
|
2075
|
+
Accept: acceptHeader,
|
|
2076
|
+
"Accept-Language": "en-US,en;q=0.9"
|
|
2077
|
+
};
|
|
2078
|
+
const initial = await fetch(params.url, { signal, headers });
|
|
2079
|
+
const response = initial.status === 403 && initial.headers.get("cf-mitigated") === "challenge" ? await fetch(params.url, { signal, headers: { ...headers, "User-Agent": "claude-cli/2.1.77" } }) : initial;
|
|
2080
|
+
clearTimeout2();
|
|
2081
|
+
if (!response.ok) {
|
|
2082
|
+
throw new Error(`Request failed with status code: ${response.status}`);
|
|
2083
|
+
}
|
|
2084
|
+
const contentLength = response.headers.get("content-length");
|
|
2085
|
+
if (contentLength && parseInt(contentLength) > MAX_RESPONSE_SIZE) {
|
|
2086
|
+
throw new Error("Response too large (exceeds 5MB limit)");
|
|
2087
|
+
}
|
|
2088
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
2089
|
+
if (arrayBuffer.byteLength > MAX_RESPONSE_SIZE) {
|
|
2090
|
+
throw new Error("Response too large (exceeds 5MB limit)");
|
|
2091
|
+
}
|
|
2092
|
+
const contentType = response.headers.get("content-type") || "";
|
|
2093
|
+
const mime = contentType.split(";")[0]?.trim().toLowerCase() || "";
|
|
2094
|
+
const title = `${params.url} (${contentType})`;
|
|
2095
|
+
const isImage = mime.startsWith("image/") && mime !== "image/svg+xml" && mime !== "image/vnd.fastbidsheet";
|
|
2096
|
+
if (isImage) {
|
|
2097
|
+
const base64Content = Buffer.from(arrayBuffer).toString("base64");
|
|
2098
|
+
return {
|
|
2099
|
+
title,
|
|
2100
|
+
output: "Image fetched successfully",
|
|
2101
|
+
metadata: {},
|
|
2102
|
+
attachments: [
|
|
2103
|
+
{
|
|
2104
|
+
type: "file",
|
|
2105
|
+
mime,
|
|
2106
|
+
url: `data:${mime};base64,${base64Content}`
|
|
2107
|
+
}
|
|
2108
|
+
]
|
|
2109
|
+
};
|
|
2110
|
+
}
|
|
2111
|
+
const content = new TextDecoder().decode(arrayBuffer);
|
|
2112
|
+
switch (params.format) {
|
|
2113
|
+
case "markdown":
|
|
2114
|
+
if (contentType.includes("text/html")) {
|
|
2115
|
+
const markdown = convertHTMLToMarkdown(content);
|
|
2116
|
+
return {
|
|
2117
|
+
output: markdown,
|
|
2118
|
+
title,
|
|
2119
|
+
metadata: {}
|
|
2120
|
+
};
|
|
2121
|
+
}
|
|
2122
|
+
return {
|
|
2123
|
+
output: content,
|
|
2124
|
+
title,
|
|
2125
|
+
metadata: {}
|
|
2126
|
+
};
|
|
2127
|
+
case "text":
|
|
2128
|
+
if (contentType.includes("text/html")) {
|
|
2129
|
+
const text = await extractTextFromHTML(content);
|
|
2130
|
+
return {
|
|
2131
|
+
output: text,
|
|
2132
|
+
title,
|
|
2133
|
+
metadata: {}
|
|
2134
|
+
};
|
|
2135
|
+
}
|
|
2136
|
+
return {
|
|
2137
|
+
output: content,
|
|
2138
|
+
title,
|
|
2139
|
+
metadata: {}
|
|
2140
|
+
};
|
|
2141
|
+
case "html":
|
|
2142
|
+
return {
|
|
2143
|
+
output: content,
|
|
2144
|
+
title,
|
|
2145
|
+
metadata: {}
|
|
2146
|
+
};
|
|
2147
|
+
default:
|
|
2148
|
+
return {
|
|
2149
|
+
output: content,
|
|
2150
|
+
title,
|
|
2151
|
+
metadata: {}
|
|
2152
|
+
};
|
|
2153
|
+
}
|
|
2154
|
+
}
|
|
2155
|
+
});
|
|
2156
|
+
async function extractTextFromHTML(html) {
|
|
2157
|
+
let cleaned = html.replace(/<(script|style|noscript|iframe|object|embed)[^>]*>[\s\S]*?<\/\1>/gi, "");
|
|
2158
|
+
cleaned = cleaned.replace(/<[^>]+>/g, " ");
|
|
2159
|
+
cleaned = cleaned.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">").replace(/"/g, '"').replace(/'/g, "'").replace(/ /g, " ");
|
|
2160
|
+
cleaned = cleaned.replace(/\s+/g, " ");
|
|
2161
|
+
return cleaned.trim();
|
|
2162
|
+
}
|
|
2163
|
+
function convertHTMLToMarkdown(html) {
|
|
2164
|
+
const turndownService = new TurndownService({
|
|
2165
|
+
headingStyle: "atx",
|
|
2166
|
+
hr: "---",
|
|
2167
|
+
bulletListMarker: "-",
|
|
2168
|
+
codeBlockStyle: "fenced",
|
|
2169
|
+
emDelimiter: "*"
|
|
2170
|
+
});
|
|
2171
|
+
turndownService.remove(["script", "style", "meta", "link"]);
|
|
2172
|
+
return turndownService.turndown(html);
|
|
2173
|
+
}
|
|
2174
|
+
var write_txt_default = `Writes a file to the local filesystem.
|
|
2175
|
+
|
|
2176
|
+
Usage:
|
|
2177
|
+
- This tool will overwrite the existing file if there is one at the provided path.
|
|
2178
|
+
- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.
|
|
2179
|
+
- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.
|
|
2180
|
+
- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
|
|
2181
|
+
- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.
|
|
2182
|
+
`;
|
|
2183
|
+
var MAX_DIAGNOSTICS_PER_FILE2 = 20;
|
|
2184
|
+
var MAX_PROJECT_DIAGNOSTICS_FILES = 5;
|
|
2185
|
+
var WriteTool = Tool.define("write", {
|
|
2186
|
+
description: write_txt_default,
|
|
2187
|
+
parameters: z13.object({
|
|
2188
|
+
content: z13.string().describe("The content to write to the file"),
|
|
2189
|
+
filePath: z13.string().describe("The absolute path to the file to write (must be absolute, not relative)")
|
|
2190
|
+
}),
|
|
2191
|
+
async execute(params, ctx) {
|
|
2192
|
+
const filepath = isAbsolute(params.filePath) ? params.filePath : join(ctx.directory, params.filePath);
|
|
2193
|
+
await assertExternalDirectory(ctx, filepath);
|
|
2194
|
+
const exists = await ctx.fs.exists(filepath);
|
|
2195
|
+
const contentOld = exists ? await ctx.fs.readText(filepath) : "";
|
|
2196
|
+
if (exists) await ctx.fileTime.assert(ctx, ctx.sessionID, filepath);
|
|
2197
|
+
const diff = trimDiff(createTwoFilesPatch2(filepath, filepath, contentOld, params.content));
|
|
2198
|
+
await ctx.ask({
|
|
2199
|
+
permission: "edit",
|
|
2200
|
+
patterns: [relative(ctx.worktree, filepath)],
|
|
2201
|
+
always: ["*"],
|
|
2202
|
+
metadata: {
|
|
2203
|
+
filepath,
|
|
2204
|
+
diff
|
|
2205
|
+
}
|
|
2206
|
+
});
|
|
2207
|
+
await ctx.fs.write(filepath, params.content);
|
|
2208
|
+
ctx.emit("file.edited", { file: filepath });
|
|
2209
|
+
ctx.fileTime.read(ctx.sessionID, filepath);
|
|
2210
|
+
let output = "Wrote file successfully.";
|
|
2211
|
+
await LSP.touchFile(filepath, true);
|
|
2212
|
+
const diagnostics = await LSP.diagnostics();
|
|
2213
|
+
const normalizedFilepath = filepath;
|
|
2214
|
+
let projectDiagnosticsCount = 0;
|
|
2215
|
+
for (const [file, issues] of Object.entries(diagnostics)) {
|
|
2216
|
+
const errors = issues.filter((item) => item.severity === 1);
|
|
2217
|
+
if (errors.length === 0) continue;
|
|
2218
|
+
const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE2);
|
|
2219
|
+
const suffix = errors.length > MAX_DIAGNOSTICS_PER_FILE2 ? `
|
|
2220
|
+
... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE2} more` : "";
|
|
2221
|
+
if (file === normalizedFilepath) {
|
|
2222
|
+
output += `
|
|
2223
|
+
|
|
2224
|
+
LSP errors detected in this file, please fix:
|
|
2225
|
+
<diagnostics file="${filepath}">
|
|
2226
|
+
${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}
|
|
2227
|
+
</diagnostics>`;
|
|
2228
|
+
continue;
|
|
2229
|
+
}
|
|
2230
|
+
if (projectDiagnosticsCount >= MAX_PROJECT_DIAGNOSTICS_FILES) continue;
|
|
2231
|
+
projectDiagnosticsCount++;
|
|
2232
|
+
output += `
|
|
2233
|
+
|
|
2234
|
+
LSP errors detected in other files:
|
|
2235
|
+
<diagnostics file="${file}">
|
|
2236
|
+
${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}
|
|
2237
|
+
</diagnostics>`;
|
|
2238
|
+
}
|
|
2239
|
+
return {
|
|
2240
|
+
title: relative(ctx.worktree, filepath),
|
|
2241
|
+
metadata: {
|
|
2242
|
+
diagnostics,
|
|
2243
|
+
filepath,
|
|
2244
|
+
exists
|
|
2245
|
+
},
|
|
2246
|
+
output
|
|
2247
|
+
};
|
|
2248
|
+
}
|
|
2249
|
+
});
|
|
2250
|
+
var InvalidTool = Tool.define("invalid", {
|
|
2251
|
+
description: "Do not use",
|
|
2252
|
+
parameters: z14.object({
|
|
2253
|
+
tool: z14.string(),
|
|
2254
|
+
error: z14.string()
|
|
2255
|
+
}),
|
|
2256
|
+
async execute(params) {
|
|
2257
|
+
return {
|
|
2258
|
+
title: "Invalid Tool",
|
|
2259
|
+
output: `The arguments provided to the tool are invalid: ${params.error}`,
|
|
2260
|
+
metadata: {}
|
|
2261
|
+
};
|
|
2262
|
+
}
|
|
2263
|
+
});
|
|
2264
|
+
var ConfigMarkdown;
|
|
2265
|
+
((ConfigMarkdown2) => {
|
|
2266
|
+
ConfigMarkdown2.FILE_REGEX = /(?<![\w`])@(\.?[^\s`,.]*(?:\.[^\s`,.]+)*)/g;
|
|
2267
|
+
ConfigMarkdown2.SHELL_REGEX = /!`([^`]+)`/g;
|
|
2268
|
+
function files(template) {
|
|
2269
|
+
return Array.from(template.matchAll(ConfigMarkdown2.FILE_REGEX));
|
|
2270
|
+
}
|
|
2271
|
+
ConfigMarkdown2.files = files;
|
|
2272
|
+
function shell(template) {
|
|
2273
|
+
return Array.from(template.matchAll(ConfigMarkdown2.SHELL_REGEX));
|
|
2274
|
+
}
|
|
2275
|
+
ConfigMarkdown2.shell = shell;
|
|
2276
|
+
function fallbackSanitization(content) {
|
|
2277
|
+
const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---/);
|
|
2278
|
+
if (!match) return content;
|
|
2279
|
+
const frontmatter = match[1];
|
|
2280
|
+
const lines = frontmatter.split(/\r?\n/);
|
|
2281
|
+
const result = [];
|
|
2282
|
+
for (const line of lines) {
|
|
2283
|
+
if (line.trim().startsWith("#") || line.trim() === "") {
|
|
2284
|
+
result.push(line);
|
|
2285
|
+
continue;
|
|
2286
|
+
}
|
|
2287
|
+
if (line.match(/^\s+/)) {
|
|
2288
|
+
result.push(line);
|
|
2289
|
+
continue;
|
|
2290
|
+
}
|
|
2291
|
+
const kvMatch = line.match(/^([a-zA-Z_][a-zA-Z0-9_]*)\s*:\s*(.*)$/);
|
|
2292
|
+
if (!kvMatch) {
|
|
2293
|
+
result.push(line);
|
|
2294
|
+
continue;
|
|
2295
|
+
}
|
|
2296
|
+
const key = kvMatch[1];
|
|
2297
|
+
const value = kvMatch[2].trim();
|
|
2298
|
+
if (value === "" || value === ">" || value === "|" || value.startsWith('"') || value.startsWith("'")) {
|
|
2299
|
+
result.push(line);
|
|
2300
|
+
continue;
|
|
2301
|
+
}
|
|
2302
|
+
if (value.includes(":")) {
|
|
2303
|
+
result.push(`${key}: |-`);
|
|
2304
|
+
result.push(` ${value}`);
|
|
2305
|
+
continue;
|
|
2306
|
+
}
|
|
2307
|
+
result.push(line);
|
|
2308
|
+
}
|
|
2309
|
+
const processed = result.join("\n");
|
|
2310
|
+
return content.replace(frontmatter, () => processed);
|
|
2311
|
+
}
|
|
2312
|
+
ConfigMarkdown2.fallbackSanitization = fallbackSanitization;
|
|
2313
|
+
async function parse(context, filePath) {
|
|
2314
|
+
const template = await Filesystem.readText(context, filePath);
|
|
2315
|
+
try {
|
|
2316
|
+
const md = matter(template);
|
|
2317
|
+
return md;
|
|
2318
|
+
} catch {
|
|
2319
|
+
try {
|
|
2320
|
+
return matter(fallbackSanitization(template));
|
|
2321
|
+
} catch (err) {
|
|
2322
|
+
throw new ConfigMarkdown2.FrontmatterError(
|
|
2323
|
+
{
|
|
2324
|
+
path: filePath,
|
|
2325
|
+
message: `${filePath}: Failed to parse YAML frontmatter: ${err instanceof Error ? err.message : String(err)}`
|
|
2326
|
+
},
|
|
2327
|
+
{ cause: err }
|
|
2328
|
+
);
|
|
2329
|
+
}
|
|
2330
|
+
}
|
|
2331
|
+
}
|
|
2332
|
+
ConfigMarkdown2.parse = parse;
|
|
2333
|
+
ConfigMarkdown2.FrontmatterError = NamedError.create(
|
|
2334
|
+
"ConfigFrontmatterError",
|
|
2335
|
+
z15.object({
|
|
2336
|
+
path: z15.string(),
|
|
2337
|
+
message: z15.string()
|
|
2338
|
+
})
|
|
2339
|
+
);
|
|
2340
|
+
})(ConfigMarkdown || (ConfigMarkdown = {}));
|
|
2341
|
+
var Discovery;
|
|
2342
|
+
((Discovery2) => {
|
|
2343
|
+
const log2 = Log.create({ service: "skill-discovery" });
|
|
2344
|
+
function dir(context) {
|
|
2345
|
+
return join(context.dataPath, "skills");
|
|
2346
|
+
}
|
|
2347
|
+
Discovery2.dir = dir;
|
|
2348
|
+
async function get(context, url, dest) {
|
|
2349
|
+
if (await Filesystem.exists(context, dest)) return true;
|
|
2350
|
+
return fetch(url).then(async (response) => {
|
|
2351
|
+
if (!response.ok) {
|
|
2352
|
+
log2.error("failed to download", { url, status: response.status });
|
|
2353
|
+
return false;
|
|
2354
|
+
}
|
|
2355
|
+
if (response.body) {
|
|
2356
|
+
const bytes = new Uint8Array(await response.arrayBuffer());
|
|
2357
|
+
await Filesystem.mkdir(context, dirname(dest));
|
|
2358
|
+
await Filesystem.write(context, dest, bytes);
|
|
2359
|
+
}
|
|
2360
|
+
return true;
|
|
2361
|
+
}).catch((err) => {
|
|
2362
|
+
log2.error("failed to download", { url, err });
|
|
2363
|
+
return false;
|
|
2364
|
+
});
|
|
2365
|
+
}
|
|
2366
|
+
async function pull(context, url) {
|
|
2367
|
+
const result = [];
|
|
2368
|
+
const base = url.endsWith("/") ? url : `${url}/`;
|
|
2369
|
+
const index = new URL("index.json", base).href;
|
|
2370
|
+
const cache = dir(context);
|
|
2371
|
+
const host = base.slice(0, -1);
|
|
2372
|
+
log2.info("fetching index", { url: index });
|
|
2373
|
+
const data = await fetch(index).then(async (response) => {
|
|
2374
|
+
if (!response.ok) {
|
|
2375
|
+
log2.error("failed to fetch index", { url: index, status: response.status });
|
|
2376
|
+
return void 0;
|
|
2377
|
+
}
|
|
2378
|
+
return response.json().then((json) => json).catch((err) => {
|
|
2379
|
+
log2.error("failed to parse index", { url: index, err });
|
|
2380
|
+
return void 0;
|
|
2381
|
+
});
|
|
2382
|
+
}).catch((err) => {
|
|
2383
|
+
log2.error("failed to fetch index", { url: index, err });
|
|
2384
|
+
return void 0;
|
|
2385
|
+
});
|
|
2386
|
+
if (!data?.skills || !Array.isArray(data.skills)) {
|
|
2387
|
+
log2.warn("invalid index format", { url: index });
|
|
2388
|
+
return result;
|
|
2389
|
+
}
|
|
2390
|
+
const list = data.skills.filter((skill) => {
|
|
2391
|
+
if (!skill?.name || !Array.isArray(skill.files)) {
|
|
2392
|
+
log2.warn("invalid skill entry", { url: index, skill });
|
|
2393
|
+
return false;
|
|
2394
|
+
}
|
|
2395
|
+
return true;
|
|
2396
|
+
});
|
|
2397
|
+
await Promise.all(
|
|
2398
|
+
list.map(async (skill) => {
|
|
2399
|
+
const root = join(cache, skill.name);
|
|
2400
|
+
await Promise.all(
|
|
2401
|
+
skill.files.map(async (file) => {
|
|
2402
|
+
const link = new URL(file, `${host}/${skill.name}/`).href;
|
|
2403
|
+
const dest = join(root, file);
|
|
2404
|
+
await Filesystem.mkdir(context, dirname(dest));
|
|
2405
|
+
await get(context, link, dest);
|
|
2406
|
+
})
|
|
2407
|
+
);
|
|
2408
|
+
const md = join(root, "SKILL.md");
|
|
2409
|
+
if (await Filesystem.exists(context, md)) result.push(root);
|
|
2410
|
+
})
|
|
2411
|
+
);
|
|
2412
|
+
return result;
|
|
2413
|
+
}
|
|
2414
|
+
Discovery2.pull = pull;
|
|
2415
|
+
})(Discovery || (Discovery = {}));
|
|
2416
|
+
var Skill;
|
|
2417
|
+
((Skill2) => {
|
|
2418
|
+
const log2 = Log.create({ service: "skill" });
|
|
2419
|
+
Skill2.Info = z16.object({
|
|
2420
|
+
name: z16.string(),
|
|
2421
|
+
description: z16.string(),
|
|
2422
|
+
location: z16.string(),
|
|
2423
|
+
content: z16.string()
|
|
2424
|
+
});
|
|
2425
|
+
Skill2.InvalidError = NamedError.create(
|
|
2426
|
+
"SkillInvalidError",
|
|
2427
|
+
z16.object({
|
|
2428
|
+
path: z16.string(),
|
|
2429
|
+
message: z16.string().optional(),
|
|
2430
|
+
issues: z16.custom().optional()
|
|
2431
|
+
})
|
|
2432
|
+
);
|
|
2433
|
+
Skill2.NameMismatchError = NamedError.create(
|
|
2434
|
+
"SkillNameMismatchError",
|
|
2435
|
+
z16.object({
|
|
2436
|
+
path: z16.string(),
|
|
2437
|
+
expected: z16.string(),
|
|
2438
|
+
actual: z16.string()
|
|
2439
|
+
})
|
|
2440
|
+
);
|
|
2441
|
+
const EXTERNAL_DIRS = [".claude", ".agents", ".opencode"];
|
|
2442
|
+
const EXTERNAL_SKILL_PATTERN = "skills/**/SKILL.md";
|
|
2443
|
+
const OPENCODE_SKILL_PATTERN = "{skill,skills}/**/SKILL.md";
|
|
2444
|
+
const SKILL_PATTERN = "**/SKILL.md";
|
|
2445
|
+
class SkillService {
|
|
2446
|
+
_promise;
|
|
2447
|
+
constructor(context) {
|
|
2448
|
+
this._promise = initSkills(context);
|
|
2449
|
+
}
|
|
2450
|
+
async get(name) {
|
|
2451
|
+
return (await this._promise).skills[name];
|
|
2452
|
+
}
|
|
2453
|
+
async all() {
|
|
2454
|
+
return Object.values((await this._promise).skills);
|
|
2455
|
+
}
|
|
2456
|
+
async dirs() {
|
|
2457
|
+
return (await this._promise).dirs;
|
|
2458
|
+
}
|
|
2459
|
+
async available(agent) {
|
|
2460
|
+
return this.all();
|
|
2461
|
+
}
|
|
2462
|
+
}
|
|
2463
|
+
Skill2.SkillService = SkillService;
|
|
2464
|
+
async function initSkills(context) {
|
|
2465
|
+
const skills = {};
|
|
2466
|
+
const dirs = /* @__PURE__ */ new Set();
|
|
2467
|
+
const addSkill = async (match) => {
|
|
2468
|
+
const md = await ConfigMarkdown.parse(context, match).catch((err) => {
|
|
2469
|
+
log2.error("failed to load skill", { skill: match, err });
|
|
2470
|
+
return void 0;
|
|
2471
|
+
});
|
|
2472
|
+
if (!md) return;
|
|
2473
|
+
const parsed = Skill2.Info.pick({ name: true, description: true }).safeParse(md.data);
|
|
2474
|
+
if (!parsed.success) return;
|
|
2475
|
+
if (skills[parsed.data.name]) {
|
|
2476
|
+
log2.warn("duplicate skill name", {
|
|
2477
|
+
name: parsed.data.name,
|
|
2478
|
+
existing: skills[parsed.data.name].location,
|
|
2479
|
+
duplicate: match
|
|
2480
|
+
});
|
|
2481
|
+
}
|
|
2482
|
+
dirs.add(dirname(match));
|
|
2483
|
+
skills[parsed.data.name] = {
|
|
2484
|
+
name: parsed.data.name,
|
|
2485
|
+
description: parsed.data.description,
|
|
2486
|
+
location: match,
|
|
2487
|
+
content: md.content
|
|
2488
|
+
};
|
|
2489
|
+
};
|
|
2490
|
+
const scanExternal = async (root, scope) => {
|
|
2491
|
+
return Glob.scan(context, EXTERNAL_SKILL_PATTERN, {
|
|
2492
|
+
cwd: root,
|
|
2493
|
+
absolute: true,
|
|
2494
|
+
include: "file",
|
|
2495
|
+
dot: true,
|
|
2496
|
+
symlink: true
|
|
2497
|
+
}).then((matches) => Promise.all(matches.map(addSkill))).catch((error) => {
|
|
2498
|
+
log2.error(`failed to scan ${scope} skills`, { dir: root, error });
|
|
2499
|
+
});
|
|
2500
|
+
};
|
|
2501
|
+
if (!Flag.OPENCODE_DISABLE_EXTERNAL_SKILLS) {
|
|
2502
|
+
for await (const root of Filesystem.up(context, {
|
|
2503
|
+
targets: EXTERNAL_DIRS,
|
|
2504
|
+
start: context.directory,
|
|
2505
|
+
stop: context.worktree
|
|
2506
|
+
})) {
|
|
2507
|
+
await scanExternal(root, "project");
|
|
2508
|
+
}
|
|
2509
|
+
}
|
|
2510
|
+
const config = context.config;
|
|
2511
|
+
for (const skillPath of config.skills?.paths ?? []) {
|
|
2512
|
+
const resolved = isAbsolute(skillPath) ? skillPath : join(context.directory, skillPath);
|
|
2513
|
+
if (!await Filesystem.isDir(context, resolved)) {
|
|
2514
|
+
log2.warn("skill path not found", { path: resolved });
|
|
2515
|
+
continue;
|
|
2516
|
+
}
|
|
2517
|
+
const matches = await Glob.scan(context, SKILL_PATTERN, {
|
|
2518
|
+
cwd: resolved,
|
|
2519
|
+
absolute: true,
|
|
2520
|
+
include: "file",
|
|
2521
|
+
symlink: true
|
|
2522
|
+
});
|
|
2523
|
+
for (const match of matches) {
|
|
2524
|
+
await addSkill(match);
|
|
2525
|
+
}
|
|
2526
|
+
}
|
|
2527
|
+
for (const url of config.skills?.urls ?? []) {
|
|
2528
|
+
const list = await Discovery.pull(context, url);
|
|
2529
|
+
for (const dir of list) {
|
|
2530
|
+
dirs.add(dir);
|
|
2531
|
+
const matches = await Glob.scan(context, SKILL_PATTERN, {
|
|
2532
|
+
cwd: dir,
|
|
2533
|
+
absolute: true,
|
|
2534
|
+
include: "file",
|
|
2535
|
+
symlink: true
|
|
2536
|
+
});
|
|
2537
|
+
for (const match of matches) {
|
|
2538
|
+
await addSkill(match);
|
|
2539
|
+
}
|
|
2540
|
+
}
|
|
2541
|
+
}
|
|
2542
|
+
return {
|
|
2543
|
+
skills,
|
|
2544
|
+
dirs: Array.from(dirs)
|
|
2545
|
+
};
|
|
2546
|
+
}
|
|
2547
|
+
function fmt(list, opts) {
|
|
2548
|
+
if (list.length === 0) {
|
|
2549
|
+
return "No skills are currently available.";
|
|
2550
|
+
}
|
|
2551
|
+
if (opts.verbose) {
|
|
2552
|
+
return [
|
|
2553
|
+
"<available_skills>",
|
|
2554
|
+
...list.flatMap((skill) => [
|
|
2555
|
+
` <skill>`,
|
|
2556
|
+
` <name>${skill.name}</name>`,
|
|
2557
|
+
` <description>${skill.description}</description>`,
|
|
2558
|
+
` <location>${pathToFileURL(skill.location).href}</location>`,
|
|
2559
|
+
` </skill>`
|
|
2560
|
+
]),
|
|
2561
|
+
"</available_skills>"
|
|
2562
|
+
].join("\n");
|
|
2563
|
+
}
|
|
2564
|
+
return ["## Available Skills", ...list.flatMap((skill) => `- **${skill.name}**: ${skill.description}`)].join("\n");
|
|
2565
|
+
}
|
|
2566
|
+
Skill2.fmt = fmt;
|
|
2567
|
+
})(Skill || (Skill = {}));
|
|
2568
|
+
var SkillTool = Tool.define("skill", async (ctx) => {
|
|
2569
|
+
const list = await ctx?.agentContext?.skill.available(ctx?.agent) ?? [];
|
|
2570
|
+
const description = list.length === 0 ? "Load a specialized skill that provides domain-specific instructions and workflows. No skills are currently available." : [
|
|
2571
|
+
"Load a specialized skill that provides domain-specific instructions and workflows.",
|
|
2572
|
+
"",
|
|
2573
|
+
"When you recognize that a task matches one of the available skills listed below, use this tool to load the full skill instructions.",
|
|
2574
|
+
"",
|
|
2575
|
+
"The skill will inject detailed instructions, workflows, and access to bundled resources (scripts, references, templates) into the conversation context.",
|
|
2576
|
+
"",
|
|
2577
|
+
'Tool output includes a `<skill_content name="...">` block with the loaded content.',
|
|
2578
|
+
"",
|
|
2579
|
+
"The following skills provide specialized sets of instructions for particular tasks",
|
|
2580
|
+
"Invoke this tool to load a skill when a task matches one of the available skills listed below:",
|
|
2581
|
+
"",
|
|
2582
|
+
Skill.fmt(list, { verbose: false })
|
|
2583
|
+
].join("\n");
|
|
2584
|
+
const examples = list.map((skill) => `'${skill.name}'`).slice(0, 3).join(", ");
|
|
2585
|
+
const hint = examples.length > 0 ? ` (e.g., ${examples}, ...)` : "";
|
|
2586
|
+
const parameters = z17.object({
|
|
2587
|
+
name: z17.string().describe(`The name of the skill from available_skills${hint}`)
|
|
2588
|
+
});
|
|
2589
|
+
return {
|
|
2590
|
+
description,
|
|
2591
|
+
parameters,
|
|
2592
|
+
async execute(params, ctx2) {
|
|
2593
|
+
const skill = await ctx2.skill.get(params.name);
|
|
2594
|
+
if (!skill) {
|
|
2595
|
+
const available = await ctx2.skill.all().then((x) => x.map((skill2) => skill2.name).join(", "));
|
|
2596
|
+
throw new Error(`Skill "${params.name}" not found. Available skills: ${available || "none"}`);
|
|
2597
|
+
}
|
|
2598
|
+
await ctx2.ask({
|
|
2599
|
+
permission: "skill",
|
|
2600
|
+
patterns: [params.name],
|
|
2601
|
+
always: [params.name],
|
|
2602
|
+
metadata: {}
|
|
2603
|
+
});
|
|
2604
|
+
const dir = dirname(skill.location);
|
|
2605
|
+
const base = pathToFileURL2(dir).href;
|
|
2606
|
+
const limit = 10;
|
|
2607
|
+
const files = await iife(async () => {
|
|
2608
|
+
if (!ctx2.search) throw new Error("Search provider is required for the skill tool");
|
|
2609
|
+
const filePaths = await ctx2.search.listFiles({
|
|
2610
|
+
cwd: dir,
|
|
2611
|
+
follow: false,
|
|
2612
|
+
hidden: true,
|
|
2613
|
+
limit: limit + 1,
|
|
2614
|
+
// Extra to filter out SKILL.md
|
|
2615
|
+
signal: ctx2.abort
|
|
2616
|
+
});
|
|
2617
|
+
const arr = [];
|
|
2618
|
+
for (const file of filePaths) {
|
|
2619
|
+
if (file.includes("SKILL.md")) {
|
|
2620
|
+
continue;
|
|
2621
|
+
}
|
|
2622
|
+
arr.push(resolve(dir, file));
|
|
2623
|
+
if (arr.length >= limit) {
|
|
2624
|
+
break;
|
|
2625
|
+
}
|
|
2626
|
+
}
|
|
2627
|
+
return arr;
|
|
2628
|
+
}).then((f) => f.map((file) => `<file>${file}</file>`).join("\n"));
|
|
2629
|
+
return {
|
|
2630
|
+
title: `Loaded skill: ${skill.name}`,
|
|
2631
|
+
output: [
|
|
2632
|
+
`<skill_content name="${skill.name}">`,
|
|
2633
|
+
`# Skill: ${skill.name}`,
|
|
2634
|
+
"",
|
|
2635
|
+
skill.content.trim(),
|
|
2636
|
+
"",
|
|
2637
|
+
`Base directory for this skill: ${base}`,
|
|
2638
|
+
"Relative paths in this skill (e.g., scripts/, reference/) are relative to this base directory.",
|
|
2639
|
+
"Note: file list is sampled.",
|
|
2640
|
+
"",
|
|
2641
|
+
"<skill_files>",
|
|
2642
|
+
files,
|
|
2643
|
+
"</skill_files>",
|
|
2644
|
+
"</skill_content>"
|
|
2645
|
+
].join("\n"),
|
|
2646
|
+
metadata: {
|
|
2647
|
+
name: skill.name,
|
|
2648
|
+
dir
|
|
2649
|
+
}
|
|
2650
|
+
};
|
|
2651
|
+
}
|
|
2652
|
+
};
|
|
2653
|
+
});
|
|
2654
|
+
var websearch_txt_default = `- Search the web using Exa AI - performs real-time web searches and can scrape content from specific URLs
|
|
2655
|
+
- Provides up-to-date information for current events and recent data
|
|
2656
|
+
- Supports configurable result counts and returns the content from the most relevant websites
|
|
2657
|
+
- Use this tool for accessing information beyond knowledge cutoff
|
|
2658
|
+
- Searches are performed automatically within a single API call
|
|
2659
|
+
|
|
2660
|
+
Usage notes:
|
|
2661
|
+
- Supports live crawling modes: 'fallback' (backup if cached unavailable) or 'preferred' (prioritize live crawling)
|
|
2662
|
+
- Search types: 'auto' (balanced), 'fast' (quick results), 'deep' (comprehensive search)
|
|
2663
|
+
- Configurable context length for optimal LLM integration
|
|
2664
|
+
- Domain filtering and advanced search options available
|
|
2665
|
+
|
|
2666
|
+
The current year is {{year}}. You MUST use this year when searching for recent information or current events
|
|
2667
|
+
- Example: If the current year is 2026 and the user asks for "latest AI news", search for "AI news 2026", NOT "AI news 2025"
|
|
2668
|
+
`;
|
|
2669
|
+
var API_CONFIG = {
|
|
2670
|
+
BASE_URL: "https://mcp.exa.ai",
|
|
2671
|
+
ENDPOINTS: {
|
|
2672
|
+
SEARCH: "/mcp"
|
|
2673
|
+
},
|
|
2674
|
+
DEFAULT_NUM_RESULTS: 8
|
|
2675
|
+
};
|
|
2676
|
+
var WebSearchTool = Tool.define("websearch", async () => {
|
|
2677
|
+
return {
|
|
2678
|
+
get description() {
|
|
2679
|
+
return websearch_txt_default.replace("{{year}}", (/* @__PURE__ */ new Date()).getFullYear().toString());
|
|
2680
|
+
},
|
|
2681
|
+
parameters: z18.object({
|
|
2682
|
+
query: z18.string().describe("Websearch query"),
|
|
2683
|
+
numResults: z18.number().optional().describe("Number of search results to return (default: 8)"),
|
|
2684
|
+
livecrawl: z18.enum(["fallback", "preferred"]).optional().describe(
|
|
2685
|
+
"Live crawl mode - 'fallback': use live crawling as backup if cached content unavailable, 'preferred': prioritize live crawling (default: 'fallback')"
|
|
2686
|
+
),
|
|
2687
|
+
type: z18.enum(["auto", "fast", "deep"]).optional().describe(
|
|
2688
|
+
"Search type - 'auto': balanced search (default), 'fast': quick results, 'deep': comprehensive search"
|
|
2689
|
+
),
|
|
2690
|
+
contextMaxCharacters: z18.number().optional().describe("Maximum characters for context string optimized for LLMs (default: 10000)")
|
|
2691
|
+
}),
|
|
2692
|
+
async execute(params, ctx) {
|
|
2693
|
+
await ctx.ask({
|
|
2694
|
+
permission: "websearch",
|
|
2695
|
+
patterns: [params.query],
|
|
2696
|
+
always: ["*"],
|
|
2697
|
+
metadata: {
|
|
2698
|
+
query: params.query,
|
|
2699
|
+
numResults: params.numResults,
|
|
2700
|
+
livecrawl: params.livecrawl,
|
|
2701
|
+
type: params.type,
|
|
2702
|
+
contextMaxCharacters: params.contextMaxCharacters
|
|
2703
|
+
}
|
|
2704
|
+
});
|
|
2705
|
+
const searchRequest = {
|
|
2706
|
+
jsonrpc: "2.0",
|
|
2707
|
+
id: 1,
|
|
2708
|
+
method: "tools/call",
|
|
2709
|
+
params: {
|
|
2710
|
+
name: "web_search_exa",
|
|
2711
|
+
arguments: {
|
|
2712
|
+
query: params.query,
|
|
2713
|
+
type: params.type || "auto",
|
|
2714
|
+
numResults: params.numResults || API_CONFIG.DEFAULT_NUM_RESULTS,
|
|
2715
|
+
livecrawl: params.livecrawl || "fallback",
|
|
2716
|
+
contextMaxCharacters: params.contextMaxCharacters
|
|
2717
|
+
}
|
|
2718
|
+
}
|
|
2719
|
+
};
|
|
2720
|
+
const { signal, clearTimeout: clearTimeout2 } = abortAfterAny(25e3, ctx.abort);
|
|
2721
|
+
try {
|
|
2722
|
+
const headers = {
|
|
2723
|
+
accept: "application/json, text/event-stream",
|
|
2724
|
+
"content-type": "application/json"
|
|
2725
|
+
};
|
|
2726
|
+
const response = await fetch(`${API_CONFIG.BASE_URL}${API_CONFIG.ENDPOINTS.SEARCH}`, {
|
|
2727
|
+
method: "POST",
|
|
2728
|
+
headers,
|
|
2729
|
+
body: JSON.stringify(searchRequest),
|
|
2730
|
+
signal
|
|
2731
|
+
});
|
|
2732
|
+
clearTimeout2();
|
|
2733
|
+
if (!response.ok) {
|
|
2734
|
+
const errorText = await response.text();
|
|
2735
|
+
throw new Error(`Search error (${response.status}): ${errorText}`);
|
|
2736
|
+
}
|
|
2737
|
+
const responseText = await response.text();
|
|
2738
|
+
const lines = responseText.split("\n");
|
|
2739
|
+
for (const line of lines) {
|
|
2740
|
+
if (line.startsWith("data: ")) {
|
|
2741
|
+
const data = JSON.parse(line.substring(6));
|
|
2742
|
+
if (data.result && data.result.content && data.result.content.length > 0) {
|
|
2743
|
+
return {
|
|
2744
|
+
output: data.result.content[0].text,
|
|
2745
|
+
title: `Web search: ${params.query}`,
|
|
2746
|
+
metadata: {}
|
|
2747
|
+
};
|
|
2748
|
+
}
|
|
2749
|
+
}
|
|
2750
|
+
}
|
|
2751
|
+
return {
|
|
2752
|
+
output: "No search results found. Please try a different query.",
|
|
2753
|
+
title: `Web search: ${params.query}`,
|
|
2754
|
+
metadata: {}
|
|
2755
|
+
};
|
|
2756
|
+
} catch (error) {
|
|
2757
|
+
clearTimeout2();
|
|
2758
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
2759
|
+
throw new Error("Search request timed out");
|
|
2760
|
+
}
|
|
2761
|
+
throw error;
|
|
2762
|
+
}
|
|
2763
|
+
}
|
|
2764
|
+
};
|
|
2765
|
+
});
|
|
2766
|
+
var codesearch_txt_default = `- Search and get relevant context for any programming task using Exa Code API
|
|
2767
|
+
- Provides the highest quality and freshest context for libraries, SDKs, and APIs
|
|
2768
|
+
- Use this tool for ANY question or task related to programming
|
|
2769
|
+
- Returns comprehensive code examples, documentation, and API references
|
|
2770
|
+
- Optimized for finding specific programming patterns and solutions
|
|
2771
|
+
|
|
2772
|
+
Usage notes:
|
|
2773
|
+
- Adjustable token count (1000-50000) for focused or comprehensive results
|
|
2774
|
+
- Default 5000 tokens provides balanced context for most queries
|
|
2775
|
+
- Use lower values for specific questions, higher values for comprehensive documentation
|
|
2776
|
+
- Supports queries about frameworks, libraries, APIs, and programming concepts
|
|
2777
|
+
- Examples: 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware'
|
|
2778
|
+
`;
|
|
2779
|
+
var API_CONFIG2 = {
|
|
2780
|
+
BASE_URL: "https://mcp.exa.ai",
|
|
2781
|
+
ENDPOINTS: {
|
|
2782
|
+
CONTEXT: "/mcp"
|
|
2783
|
+
}
|
|
2784
|
+
};
|
|
2785
|
+
var CodeSearchTool = Tool.define("codesearch", {
|
|
2786
|
+
description: codesearch_txt_default,
|
|
2787
|
+
parameters: z19.object({
|
|
2788
|
+
query: z19.string().describe(
|
|
2789
|
+
"Search query to find relevant context for APIs, Libraries, and SDKs. For example, 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware', 'Next js partial prerendering configuration'"
|
|
2790
|
+
),
|
|
2791
|
+
tokensNum: z19.number().min(1e3).max(5e4).default(5e3).describe(
|
|
2792
|
+
"Number of tokens to return (1000-50000). Default is 5000 tokens. Adjust this value based on how much context you need - use lower values for focused queries and higher values for comprehensive documentation."
|
|
2793
|
+
)
|
|
2794
|
+
}),
|
|
2795
|
+
async execute(params, ctx) {
|
|
2796
|
+
await ctx.ask({
|
|
2797
|
+
permission: "codesearch",
|
|
2798
|
+
patterns: [params.query],
|
|
2799
|
+
always: ["*"],
|
|
2800
|
+
metadata: {
|
|
2801
|
+
query: params.query,
|
|
2802
|
+
tokensNum: params.tokensNum
|
|
2803
|
+
}
|
|
2804
|
+
});
|
|
2805
|
+
const codeRequest = {
|
|
2806
|
+
jsonrpc: "2.0",
|
|
2807
|
+
id: 1,
|
|
2808
|
+
method: "tools/call",
|
|
2809
|
+
params: {
|
|
2810
|
+
name: "get_code_context_exa",
|
|
2811
|
+
arguments: {
|
|
2812
|
+
query: params.query,
|
|
2813
|
+
tokensNum: params.tokensNum || 5e3
|
|
2814
|
+
}
|
|
2815
|
+
}
|
|
2816
|
+
};
|
|
2817
|
+
const { signal, clearTimeout: clearTimeout2 } = abortAfterAny(3e4, ctx.abort);
|
|
2818
|
+
try {
|
|
2819
|
+
const headers = {
|
|
2820
|
+
accept: "application/json, text/event-stream",
|
|
2821
|
+
"content-type": "application/json"
|
|
2822
|
+
};
|
|
2823
|
+
const response = await fetch(`${API_CONFIG2.BASE_URL}${API_CONFIG2.ENDPOINTS.CONTEXT}`, {
|
|
2824
|
+
method: "POST",
|
|
2825
|
+
headers,
|
|
2826
|
+
body: JSON.stringify(codeRequest),
|
|
2827
|
+
signal
|
|
2828
|
+
});
|
|
2829
|
+
clearTimeout2();
|
|
2830
|
+
if (!response.ok) {
|
|
2831
|
+
const errorText = await response.text();
|
|
2832
|
+
throw new Error(`Code search error (${response.status}): ${errorText}`);
|
|
2833
|
+
}
|
|
2834
|
+
const responseText = await response.text();
|
|
2835
|
+
const lines = responseText.split("\n");
|
|
2836
|
+
for (const line of lines) {
|
|
2837
|
+
if (line.startsWith("data: ")) {
|
|
2838
|
+
const data = JSON.parse(line.substring(6));
|
|
2839
|
+
if (data.result && data.result.content && data.result.content.length > 0) {
|
|
2840
|
+
return {
|
|
2841
|
+
output: data.result.content[0].text,
|
|
2842
|
+
title: `Code search: ${params.query}`,
|
|
2843
|
+
metadata: {}
|
|
2844
|
+
};
|
|
2845
|
+
}
|
|
2846
|
+
}
|
|
2847
|
+
}
|
|
2848
|
+
return {
|
|
2849
|
+
output: "No code snippets or documentation found. Please try a different query, be more specific about the library or programming concept, or check the spelling of framework names.",
|
|
2850
|
+
title: `Code search: ${params.query}`,
|
|
2851
|
+
metadata: {}
|
|
2852
|
+
};
|
|
2853
|
+
} catch (error) {
|
|
2854
|
+
clearTimeout2();
|
|
2855
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
2856
|
+
throw new Error("Code search request timed out");
|
|
2857
|
+
}
|
|
2858
|
+
throw error;
|
|
2859
|
+
}
|
|
2860
|
+
}
|
|
2861
|
+
});
|
|
2862
|
+
var Patch;
|
|
2863
|
+
((Patch2) => {
|
|
2864
|
+
const log2 = Log.create({ service: "patch" });
|
|
2865
|
+
Patch2.PatchSchema = z20.object({
|
|
2866
|
+
patchText: z20.string().describe("The full patch text that describes all changes to be made")
|
|
2867
|
+
});
|
|
2868
|
+
let ApplyPatchError;
|
|
2869
|
+
((ApplyPatchError2) => {
|
|
2870
|
+
ApplyPatchError2["ParseError"] = "ParseError";
|
|
2871
|
+
ApplyPatchError2["IoError"] = "IoError";
|
|
2872
|
+
ApplyPatchError2["ComputeReplacements"] = "ComputeReplacements";
|
|
2873
|
+
ApplyPatchError2["ImplicitInvocation"] = "ImplicitInvocation";
|
|
2874
|
+
})(ApplyPatchError = Patch2.ApplyPatchError || (Patch2.ApplyPatchError = {}));
|
|
2875
|
+
let MaybeApplyPatch;
|
|
2876
|
+
((MaybeApplyPatch2) => {
|
|
2877
|
+
MaybeApplyPatch2["Body"] = "Body";
|
|
2878
|
+
MaybeApplyPatch2["ShellParseError"] = "ShellParseError";
|
|
2879
|
+
MaybeApplyPatch2["PatchParseError"] = "PatchParseError";
|
|
2880
|
+
MaybeApplyPatch2["NotApplyPatch"] = "NotApplyPatch";
|
|
2881
|
+
})(MaybeApplyPatch = Patch2.MaybeApplyPatch || (Patch2.MaybeApplyPatch = {}));
|
|
2882
|
+
let MaybeApplyPatchVerified;
|
|
2883
|
+
((MaybeApplyPatchVerified2) => {
|
|
2884
|
+
MaybeApplyPatchVerified2["Body"] = "Body";
|
|
2885
|
+
MaybeApplyPatchVerified2["ShellParseError"] = "ShellParseError";
|
|
2886
|
+
MaybeApplyPatchVerified2["CorrectnessError"] = "CorrectnessError";
|
|
2887
|
+
MaybeApplyPatchVerified2["NotApplyPatch"] = "NotApplyPatch";
|
|
2888
|
+
})(MaybeApplyPatchVerified = Patch2.MaybeApplyPatchVerified || (Patch2.MaybeApplyPatchVerified = {}));
|
|
2889
|
+
function parsePatchHeader(lines, startIdx) {
|
|
2890
|
+
const line = lines[startIdx];
|
|
2891
|
+
if (line.startsWith("*** Add File:")) {
|
|
2892
|
+
const filePath = line.slice("*** Add File:".length).trim();
|
|
2893
|
+
return filePath ? { filePath, nextIdx: startIdx + 1 } : null;
|
|
2894
|
+
}
|
|
2895
|
+
if (line.startsWith("*** Delete File:")) {
|
|
2896
|
+
const filePath = line.slice("*** Delete File:".length).trim();
|
|
2897
|
+
return filePath ? { filePath, nextIdx: startIdx + 1 } : null;
|
|
2898
|
+
}
|
|
2899
|
+
if (line.startsWith("*** Update File:")) {
|
|
2900
|
+
const filePath = line.slice("*** Update File:".length).trim();
|
|
2901
|
+
let movePath;
|
|
2902
|
+
let nextIdx = startIdx + 1;
|
|
2903
|
+
if (nextIdx < lines.length && lines[nextIdx].startsWith("*** Move to:")) {
|
|
2904
|
+
movePath = lines[nextIdx].slice("*** Move to:".length).trim();
|
|
2905
|
+
nextIdx++;
|
|
2906
|
+
}
|
|
2907
|
+
return filePath ? { filePath, movePath, nextIdx } : null;
|
|
2908
|
+
}
|
|
2909
|
+
return null;
|
|
2910
|
+
}
|
|
2911
|
+
function parseUpdateFileChunks(lines, startIdx) {
|
|
2912
|
+
const chunks = [];
|
|
2913
|
+
let i = startIdx;
|
|
2914
|
+
while (i < lines.length && !lines[i].startsWith("***")) {
|
|
2915
|
+
if (lines[i].startsWith("@@")) {
|
|
2916
|
+
const contextLine = lines[i].substring(2).trim();
|
|
2917
|
+
i++;
|
|
2918
|
+
const oldLines = [];
|
|
2919
|
+
const newLines = [];
|
|
2920
|
+
let isEndOfFile = false;
|
|
2921
|
+
while (i < lines.length && !lines[i].startsWith("@@") && !lines[i].startsWith("***")) {
|
|
2922
|
+
const changeLine = lines[i];
|
|
2923
|
+
if (changeLine === "*** End of File") {
|
|
2924
|
+
isEndOfFile = true;
|
|
2925
|
+
i++;
|
|
2926
|
+
break;
|
|
2927
|
+
}
|
|
2928
|
+
if (changeLine.startsWith(" ")) {
|
|
2929
|
+
const content = changeLine.substring(1);
|
|
2930
|
+
oldLines.push(content);
|
|
2931
|
+
newLines.push(content);
|
|
2932
|
+
} else if (changeLine.startsWith("-")) {
|
|
2933
|
+
oldLines.push(changeLine.substring(1));
|
|
2934
|
+
} else if (changeLine.startsWith("+")) {
|
|
2935
|
+
newLines.push(changeLine.substring(1));
|
|
2936
|
+
}
|
|
2937
|
+
i++;
|
|
2938
|
+
}
|
|
2939
|
+
chunks.push({
|
|
2940
|
+
old_lines: oldLines,
|
|
2941
|
+
new_lines: newLines,
|
|
2942
|
+
change_context: contextLine || void 0,
|
|
2943
|
+
is_end_of_file: isEndOfFile || void 0
|
|
2944
|
+
});
|
|
2945
|
+
} else {
|
|
2946
|
+
i++;
|
|
2947
|
+
}
|
|
2948
|
+
}
|
|
2949
|
+
return { chunks, nextIdx: i };
|
|
2950
|
+
}
|
|
2951
|
+
function parseAddFileContent(lines, startIdx) {
|
|
2952
|
+
let content = "";
|
|
2953
|
+
let i = startIdx;
|
|
2954
|
+
while (i < lines.length && !lines[i].startsWith("***")) {
|
|
2955
|
+
if (lines[i].startsWith("+")) {
|
|
2956
|
+
content += lines[i].substring(1) + "\n";
|
|
2957
|
+
}
|
|
2958
|
+
i++;
|
|
2959
|
+
}
|
|
2960
|
+
if (content.endsWith("\n")) {
|
|
2961
|
+
content = content.slice(0, -1);
|
|
2962
|
+
}
|
|
2963
|
+
return { content, nextIdx: i };
|
|
2964
|
+
}
|
|
2965
|
+
function stripHeredoc(input) {
|
|
2966
|
+
const heredocMatch = input.match(/^(?:cat\s+)?<<['"]?(\w+)['"]?\s*\n([\s\S]*?)\n\1\s*$/);
|
|
2967
|
+
if (heredocMatch) {
|
|
2968
|
+
return heredocMatch[2];
|
|
2969
|
+
}
|
|
2970
|
+
return input;
|
|
2971
|
+
}
|
|
2972
|
+
function parsePatch(patchText) {
|
|
2973
|
+
const cleaned = stripHeredoc(patchText.trim());
|
|
2974
|
+
const lines = cleaned.split("\n");
|
|
2975
|
+
const hunks = [];
|
|
2976
|
+
let i = 0;
|
|
2977
|
+
const beginMarker = "*** Begin Patch";
|
|
2978
|
+
const endMarker = "*** End Patch";
|
|
2979
|
+
const beginIdx = lines.findIndex((line) => line.trim() === beginMarker);
|
|
2980
|
+
const endIdx = lines.findIndex((line) => line.trim() === endMarker);
|
|
2981
|
+
if (beginIdx === -1 || endIdx === -1 || beginIdx >= endIdx) {
|
|
2982
|
+
throw new Error("Invalid patch format: missing Begin/End markers");
|
|
2983
|
+
}
|
|
2984
|
+
i = beginIdx + 1;
|
|
2985
|
+
while (i < endIdx) {
|
|
2986
|
+
const header = parsePatchHeader(lines, i);
|
|
2987
|
+
if (!header) {
|
|
2988
|
+
i++;
|
|
2989
|
+
continue;
|
|
2990
|
+
}
|
|
2991
|
+
if (lines[i].startsWith("*** Add File:")) {
|
|
2992
|
+
const { content, nextIdx } = parseAddFileContent(lines, header.nextIdx);
|
|
2993
|
+
hunks.push({
|
|
2994
|
+
type: "add",
|
|
2995
|
+
path: header.filePath,
|
|
2996
|
+
contents: content
|
|
2997
|
+
});
|
|
2998
|
+
i = nextIdx;
|
|
2999
|
+
} else if (lines[i].startsWith("*** Delete File:")) {
|
|
3000
|
+
hunks.push({
|
|
3001
|
+
type: "delete",
|
|
3002
|
+
path: header.filePath
|
|
3003
|
+
});
|
|
3004
|
+
i = header.nextIdx;
|
|
3005
|
+
} else if (lines[i].startsWith("*** Update File:")) {
|
|
3006
|
+
const { chunks, nextIdx } = parseUpdateFileChunks(lines, header.nextIdx);
|
|
3007
|
+
hunks.push({
|
|
3008
|
+
type: "update",
|
|
3009
|
+
path: header.filePath,
|
|
3010
|
+
move_path: header.movePath,
|
|
3011
|
+
chunks
|
|
3012
|
+
});
|
|
3013
|
+
i = nextIdx;
|
|
3014
|
+
} else {
|
|
3015
|
+
i++;
|
|
3016
|
+
}
|
|
3017
|
+
}
|
|
3018
|
+
return { hunks };
|
|
3019
|
+
}
|
|
3020
|
+
Patch2.parsePatch = parsePatch;
|
|
3021
|
+
function maybeParseApplyPatch(argv) {
|
|
3022
|
+
const APPLY_PATCH_COMMANDS = ["apply_patch", "applypatch"];
|
|
3023
|
+
if (argv.length === 2 && APPLY_PATCH_COMMANDS.includes(argv[0])) {
|
|
3024
|
+
try {
|
|
3025
|
+
const { hunks } = parsePatch(argv[1]);
|
|
3026
|
+
return {
|
|
3027
|
+
type: "Body",
|
|
3028
|
+
args: {
|
|
3029
|
+
patch: argv[1],
|
|
3030
|
+
hunks
|
|
3031
|
+
}
|
|
3032
|
+
};
|
|
3033
|
+
} catch (error) {
|
|
3034
|
+
return {
|
|
3035
|
+
type: "PatchParseError",
|
|
3036
|
+
error
|
|
3037
|
+
};
|
|
3038
|
+
}
|
|
3039
|
+
}
|
|
3040
|
+
if (argv.length === 3 && argv[0] === "bash" && argv[1] === "-lc") {
|
|
3041
|
+
const script = argv[2];
|
|
3042
|
+
const heredocMatch = script.match(/apply_patch\s*<<['"](\w+)['"]\s*\n([\s\S]*?)\n\1/);
|
|
3043
|
+
if (heredocMatch) {
|
|
3044
|
+
const patchContent = heredocMatch[2];
|
|
3045
|
+
try {
|
|
3046
|
+
const { hunks } = parsePatch(patchContent);
|
|
3047
|
+
return {
|
|
3048
|
+
type: "Body",
|
|
3049
|
+
args: {
|
|
3050
|
+
patch: patchContent,
|
|
3051
|
+
hunks
|
|
3052
|
+
}
|
|
3053
|
+
};
|
|
3054
|
+
} catch (error) {
|
|
3055
|
+
return {
|
|
3056
|
+
type: "PatchParseError",
|
|
3057
|
+
error
|
|
3058
|
+
};
|
|
3059
|
+
}
|
|
3060
|
+
}
|
|
3061
|
+
}
|
|
3062
|
+
return {
|
|
3063
|
+
type: "NotApplyPatch"
|
|
3064
|
+
/* NotApplyPatch */
|
|
3065
|
+
};
|
|
3066
|
+
}
|
|
3067
|
+
Patch2.maybeParseApplyPatch = maybeParseApplyPatch;
|
|
3068
|
+
async function deriveNewContentsFromChunks(context, filePath, chunks) {
|
|
3069
|
+
let originalContent;
|
|
3070
|
+
try {
|
|
3071
|
+
originalContent = await Filesystem.readText(context, filePath);
|
|
3072
|
+
} catch (error) {
|
|
3073
|
+
throw new Error(`Failed to read file ${filePath}: ${error}`);
|
|
3074
|
+
}
|
|
3075
|
+
let originalLines = originalContent.split("\n");
|
|
3076
|
+
if (originalLines.length > 0 && originalLines[originalLines.length - 1] === "") {
|
|
3077
|
+
originalLines.pop();
|
|
3078
|
+
}
|
|
3079
|
+
const replacements = computeReplacements(originalLines, filePath, chunks);
|
|
3080
|
+
let newLines = applyReplacements(originalLines, replacements);
|
|
3081
|
+
if (newLines.length === 0 || newLines[newLines.length - 1] !== "") {
|
|
3082
|
+
newLines.push("");
|
|
3083
|
+
}
|
|
3084
|
+
const newContent = newLines.join("\n");
|
|
3085
|
+
const unifiedDiff = generateUnifiedDiff(originalContent, newContent);
|
|
3086
|
+
return {
|
|
3087
|
+
unified_diff: unifiedDiff,
|
|
3088
|
+
content: newContent
|
|
3089
|
+
};
|
|
3090
|
+
}
|
|
3091
|
+
Patch2.deriveNewContentsFromChunks = deriveNewContentsFromChunks;
|
|
3092
|
+
function computeReplacements(originalLines, filePath, chunks) {
|
|
3093
|
+
const replacements = [];
|
|
3094
|
+
let lineIndex = 0;
|
|
3095
|
+
for (const chunk of chunks) {
|
|
3096
|
+
if (chunk.change_context) {
|
|
3097
|
+
const contextIdx = seekSequence(originalLines, [chunk.change_context], lineIndex);
|
|
3098
|
+
if (contextIdx === -1) {
|
|
3099
|
+
throw new Error(`Failed to find context '${chunk.change_context}' in ${filePath}`);
|
|
3100
|
+
}
|
|
3101
|
+
lineIndex = contextIdx + 1;
|
|
3102
|
+
}
|
|
3103
|
+
if (chunk.old_lines.length === 0) {
|
|
3104
|
+
const insertionIdx = originalLines.length > 0 && originalLines[originalLines.length - 1] === "" ? originalLines.length - 1 : originalLines.length;
|
|
3105
|
+
replacements.push([insertionIdx, 0, chunk.new_lines]);
|
|
3106
|
+
continue;
|
|
3107
|
+
}
|
|
3108
|
+
let pattern = chunk.old_lines;
|
|
3109
|
+
let newSlice = chunk.new_lines;
|
|
3110
|
+
let found = seekSequence(originalLines, pattern, lineIndex, chunk.is_end_of_file);
|
|
3111
|
+
if (found === -1 && pattern.length > 0 && pattern[pattern.length - 1] === "") {
|
|
3112
|
+
pattern = pattern.slice(0, -1);
|
|
3113
|
+
if (newSlice.length > 0 && newSlice[newSlice.length - 1] === "") {
|
|
3114
|
+
newSlice = newSlice.slice(0, -1);
|
|
3115
|
+
}
|
|
3116
|
+
found = seekSequence(originalLines, pattern, lineIndex, chunk.is_end_of_file);
|
|
3117
|
+
}
|
|
3118
|
+
if (found !== -1) {
|
|
3119
|
+
replacements.push([found, pattern.length, newSlice]);
|
|
3120
|
+
lineIndex = found + pattern.length;
|
|
3121
|
+
} else {
|
|
3122
|
+
throw new Error(`Failed to find expected lines in ${filePath}:
|
|
3123
|
+
${chunk.old_lines.join("\n")}`);
|
|
3124
|
+
}
|
|
3125
|
+
}
|
|
3126
|
+
replacements.sort((a, b) => a[0] - b[0]);
|
|
3127
|
+
return replacements;
|
|
3128
|
+
}
|
|
3129
|
+
function applyReplacements(lines, replacements) {
|
|
3130
|
+
const result = [...lines];
|
|
3131
|
+
for (let i = replacements.length - 1; i >= 0; i--) {
|
|
3132
|
+
const [startIdx, oldLen, newSegment] = replacements[i];
|
|
3133
|
+
result.splice(startIdx, oldLen);
|
|
3134
|
+
for (let j = 0; j < newSegment.length; j++) {
|
|
3135
|
+
result.splice(startIdx + j, 0, newSegment[j]);
|
|
3136
|
+
}
|
|
3137
|
+
}
|
|
3138
|
+
return result;
|
|
3139
|
+
}
|
|
3140
|
+
function normalizeUnicode(str) {
|
|
3141
|
+
return str.replace(/[\u2018\u2019\u201A\u201B]/g, "'").replace(/[\u201C\u201D\u201E\u201F]/g, '"').replace(/[\u2010\u2011\u2012\u2013\u2014\u2015]/g, "-").replace(/\u2026/g, "...").replace(/\u00A0/g, " ");
|
|
3142
|
+
}
|
|
3143
|
+
function tryMatch(lines, pattern, startIndex, compare, eof) {
|
|
3144
|
+
if (eof) {
|
|
3145
|
+
const fromEnd = lines.length - pattern.length;
|
|
3146
|
+
if (fromEnd >= startIndex) {
|
|
3147
|
+
let matches = true;
|
|
3148
|
+
for (let j = 0; j < pattern.length; j++) {
|
|
3149
|
+
if (!compare(lines[fromEnd + j], pattern[j])) {
|
|
3150
|
+
matches = false;
|
|
3151
|
+
break;
|
|
3152
|
+
}
|
|
3153
|
+
}
|
|
3154
|
+
if (matches) return fromEnd;
|
|
3155
|
+
}
|
|
3156
|
+
}
|
|
3157
|
+
for (let i = startIndex; i <= lines.length - pattern.length; i++) {
|
|
3158
|
+
let matches = true;
|
|
3159
|
+
for (let j = 0; j < pattern.length; j++) {
|
|
3160
|
+
if (!compare(lines[i + j], pattern[j])) {
|
|
3161
|
+
matches = false;
|
|
3162
|
+
break;
|
|
3163
|
+
}
|
|
3164
|
+
}
|
|
3165
|
+
if (matches) return i;
|
|
3166
|
+
}
|
|
3167
|
+
return -1;
|
|
3168
|
+
}
|
|
3169
|
+
function seekSequence(lines, pattern, startIndex, eof = false) {
|
|
3170
|
+
if (pattern.length === 0) return -1;
|
|
3171
|
+
const exact = tryMatch(lines, pattern, startIndex, (a, b) => a === b, eof);
|
|
3172
|
+
if (exact !== -1) return exact;
|
|
3173
|
+
const rstrip = tryMatch(lines, pattern, startIndex, (a, b) => a.trimEnd() === b.trimEnd(), eof);
|
|
3174
|
+
if (rstrip !== -1) return rstrip;
|
|
3175
|
+
const trim = tryMatch(lines, pattern, startIndex, (a, b) => a.trim() === b.trim(), eof);
|
|
3176
|
+
if (trim !== -1) return trim;
|
|
3177
|
+
const normalized = tryMatch(
|
|
3178
|
+
lines,
|
|
3179
|
+
pattern,
|
|
3180
|
+
startIndex,
|
|
3181
|
+
(a, b) => normalizeUnicode(a.trim()) === normalizeUnicode(b.trim()),
|
|
3182
|
+
eof
|
|
3183
|
+
);
|
|
3184
|
+
return normalized;
|
|
3185
|
+
}
|
|
3186
|
+
function generateUnifiedDiff(oldContent, newContent) {
|
|
3187
|
+
const oldLines = oldContent.split("\n");
|
|
3188
|
+
const newLines = newContent.split("\n");
|
|
3189
|
+
let diff = "@@ -1 +1 @@\n";
|
|
3190
|
+
const maxLen = Math.max(oldLines.length, newLines.length);
|
|
3191
|
+
let hasChanges = false;
|
|
3192
|
+
for (let i = 0; i < maxLen; i++) {
|
|
3193
|
+
const oldLine = oldLines[i] || "";
|
|
3194
|
+
const newLine = newLines[i] || "";
|
|
3195
|
+
if (oldLine !== newLine) {
|
|
3196
|
+
if (oldLine) diff += `-${oldLine}
|
|
3197
|
+
`;
|
|
3198
|
+
if (newLine) diff += `+${newLine}
|
|
3199
|
+
`;
|
|
3200
|
+
hasChanges = true;
|
|
3201
|
+
} else if (oldLine) {
|
|
3202
|
+
diff += ` ${oldLine}
|
|
3203
|
+
`;
|
|
3204
|
+
}
|
|
3205
|
+
}
|
|
3206
|
+
return hasChanges ? diff : "";
|
|
3207
|
+
}
|
|
3208
|
+
async function applyHunksToFiles(context, hunks) {
|
|
3209
|
+
if (hunks.length === 0) {
|
|
3210
|
+
throw new Error("No files were modified.");
|
|
3211
|
+
}
|
|
3212
|
+
const added = [];
|
|
3213
|
+
const modified = [];
|
|
3214
|
+
const deleted = [];
|
|
3215
|
+
for (const hunk of hunks) {
|
|
3216
|
+
switch (hunk.type) {
|
|
3217
|
+
case "add":
|
|
3218
|
+
const addDir = dirname(hunk.path);
|
|
3219
|
+
if (addDir !== "." && addDir !== "/") {
|
|
3220
|
+
await Filesystem.mkdir(context, addDir);
|
|
3221
|
+
}
|
|
3222
|
+
await Filesystem.write(context, hunk.path, hunk.contents);
|
|
3223
|
+
added.push(hunk.path);
|
|
3224
|
+
log2.info(`Added file: ${hunk.path}`);
|
|
3225
|
+
break;
|
|
3226
|
+
case "delete":
|
|
3227
|
+
await Filesystem.remove(context, hunk.path);
|
|
3228
|
+
deleted.push(hunk.path);
|
|
3229
|
+
log2.info(`Deleted file: ${hunk.path}`);
|
|
3230
|
+
break;
|
|
3231
|
+
case "update":
|
|
3232
|
+
const fileUpdate = await deriveNewContentsFromChunks(context, hunk.path, hunk.chunks);
|
|
3233
|
+
if (hunk.move_path) {
|
|
3234
|
+
const moveDir = dirname(hunk.move_path);
|
|
3235
|
+
if (moveDir !== "." && moveDir !== "/") {
|
|
3236
|
+
await Filesystem.mkdir(context, moveDir);
|
|
3237
|
+
}
|
|
3238
|
+
await Filesystem.write(context, hunk.move_path, fileUpdate.content);
|
|
3239
|
+
await Filesystem.remove(context, hunk.path);
|
|
3240
|
+
modified.push(hunk.move_path);
|
|
3241
|
+
log2.info(`Moved file: ${hunk.path} -> ${hunk.move_path}`);
|
|
3242
|
+
} else {
|
|
3243
|
+
await Filesystem.write(context, hunk.path, fileUpdate.content);
|
|
3244
|
+
modified.push(hunk.path);
|
|
3245
|
+
log2.info(`Updated file: ${hunk.path}`);
|
|
3246
|
+
}
|
|
3247
|
+
break;
|
|
3248
|
+
}
|
|
3249
|
+
}
|
|
3250
|
+
return { added, modified, deleted };
|
|
3251
|
+
}
|
|
3252
|
+
Patch2.applyHunksToFiles = applyHunksToFiles;
|
|
3253
|
+
async function applyPatch(context, patchText) {
|
|
3254
|
+
const { hunks } = parsePatch(patchText);
|
|
3255
|
+
return applyHunksToFiles(context, hunks);
|
|
3256
|
+
}
|
|
3257
|
+
Patch2.applyPatch = applyPatch;
|
|
3258
|
+
async function maybeParseApplyPatchVerified(context, argv, cwd) {
|
|
3259
|
+
if (argv.length === 1) {
|
|
3260
|
+
try {
|
|
3261
|
+
parsePatch(argv[0]);
|
|
3262
|
+
return {
|
|
3263
|
+
type: "CorrectnessError",
|
|
3264
|
+
error: new Error(
|
|
3265
|
+
"ImplicitInvocation"
|
|
3266
|
+
/* ImplicitInvocation */
|
|
3267
|
+
)
|
|
3268
|
+
};
|
|
3269
|
+
} catch {
|
|
3270
|
+
}
|
|
3271
|
+
}
|
|
3272
|
+
const result = maybeParseApplyPatch(argv);
|
|
3273
|
+
switch (result.type) {
|
|
3274
|
+
case "Body":
|
|
3275
|
+
const { args } = result;
|
|
3276
|
+
const effectiveCwd = args.workdir ? resolve(cwd, args.workdir) : cwd;
|
|
3277
|
+
const changes = /* @__PURE__ */ new Map();
|
|
3278
|
+
for (const hunk of args.hunks) {
|
|
3279
|
+
const resolvedPath = resolve(
|
|
3280
|
+
effectiveCwd,
|
|
3281
|
+
hunk.type === "update" && hunk.move_path ? hunk.move_path : hunk.path
|
|
3282
|
+
);
|
|
3283
|
+
switch (hunk.type) {
|
|
3284
|
+
case "add":
|
|
3285
|
+
changes.set(resolvedPath, {
|
|
3286
|
+
type: "add",
|
|
3287
|
+
content: hunk.contents
|
|
3288
|
+
});
|
|
3289
|
+
break;
|
|
3290
|
+
case "delete":
|
|
3291
|
+
const deletePath = resolve(effectiveCwd, hunk.path);
|
|
3292
|
+
try {
|
|
3293
|
+
const content = await Filesystem.readText(context, deletePath);
|
|
3294
|
+
changes.set(resolvedPath, {
|
|
3295
|
+
type: "delete",
|
|
3296
|
+
content
|
|
3297
|
+
});
|
|
3298
|
+
} catch (error) {
|
|
3299
|
+
return {
|
|
3300
|
+
type: "CorrectnessError",
|
|
3301
|
+
error: new Error(`Failed to read file for deletion: ${deletePath}`)
|
|
3302
|
+
};
|
|
3303
|
+
}
|
|
3304
|
+
break;
|
|
3305
|
+
case "update":
|
|
3306
|
+
const updatePath = resolve(effectiveCwd, hunk.path);
|
|
3307
|
+
try {
|
|
3308
|
+
const fileUpdate = await deriveNewContentsFromChunks(context, updatePath, hunk.chunks);
|
|
3309
|
+
changes.set(resolvedPath, {
|
|
3310
|
+
type: "update",
|
|
3311
|
+
unified_diff: fileUpdate.unified_diff,
|
|
3312
|
+
move_path: hunk.move_path ? resolve(effectiveCwd, hunk.move_path) : void 0,
|
|
3313
|
+
new_content: fileUpdate.content
|
|
3314
|
+
});
|
|
3315
|
+
} catch (error) {
|
|
3316
|
+
return {
|
|
3317
|
+
type: "CorrectnessError",
|
|
3318
|
+
error
|
|
3319
|
+
};
|
|
3320
|
+
}
|
|
3321
|
+
break;
|
|
3322
|
+
}
|
|
3323
|
+
}
|
|
3324
|
+
return {
|
|
3325
|
+
type: "Body",
|
|
3326
|
+
action: {
|
|
3327
|
+
changes,
|
|
3328
|
+
patch: args.patch,
|
|
3329
|
+
cwd: effectiveCwd
|
|
3330
|
+
}
|
|
3331
|
+
};
|
|
3332
|
+
case "PatchParseError":
|
|
3333
|
+
return {
|
|
3334
|
+
type: "CorrectnessError",
|
|
3335
|
+
error: result.error
|
|
3336
|
+
};
|
|
3337
|
+
case "NotApplyPatch":
|
|
3338
|
+
return {
|
|
3339
|
+
type: "NotApplyPatch"
|
|
3340
|
+
/* NotApplyPatch */
|
|
3341
|
+
};
|
|
3342
|
+
}
|
|
3343
|
+
}
|
|
3344
|
+
Patch2.maybeParseApplyPatchVerified = maybeParseApplyPatchVerified;
|
|
3345
|
+
})(Patch || (Patch = {}));
|
|
3346
|
+
var apply_patch_txt_default = `Use the \`apply_patch\` tool to edit files. Your patch language is a stripped\u2011down, file\u2011oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high\u2011level envelope:
|
|
3347
|
+
|
|
3348
|
+
*** Begin Patch
|
|
3349
|
+
[ one or more file sections ]
|
|
3350
|
+
*** End Patch
|
|
3351
|
+
|
|
3352
|
+
Within that envelope, you get a sequence of file operations.
|
|
3353
|
+
You MUST include a header to specify the action you are taking.
|
|
3354
|
+
Each operation starts with one of three headers:
|
|
3355
|
+
|
|
3356
|
+
*** Add File: <path> - create a new file. Every following line is a + line (the initial contents).
|
|
3357
|
+
*** Delete File: <path> - remove an existing file. Nothing follows.
|
|
3358
|
+
*** Update File: <path> - patch an existing file in place (optionally with a rename).
|
|
3359
|
+
|
|
3360
|
+
Example patch:
|
|
3361
|
+
|
|
3362
|
+
\`\`\`
|
|
3363
|
+
*** Begin Patch
|
|
3364
|
+
*** Add File: hello.txt
|
|
3365
|
+
+Hello world
|
|
3366
|
+
*** Update File: src/app.py
|
|
3367
|
+
*** Move to: src/main.py
|
|
3368
|
+
@@ def greet():
|
|
3369
|
+
-print("Hi")
|
|
3370
|
+
+print("Hello, world!")
|
|
3371
|
+
*** Delete File: obsolete.txt
|
|
3372
|
+
*** End Patch
|
|
3373
|
+
\`\`\`
|
|
3374
|
+
|
|
3375
|
+
It is important to remember:
|
|
3376
|
+
|
|
3377
|
+
- You must include a header with your intended action (Add/Delete/Update)
|
|
3378
|
+
- You must prefix new lines with \`+\` even when creating a new file
|
|
3379
|
+
`;
|
|
3380
|
+
var PatchParams = z21.object({
|
|
3381
|
+
patchText: z21.string().describe("The full patch text that describes all changes to be made")
|
|
3382
|
+
});
|
|
3383
|
+
var ApplyPatchTool = Tool.define("apply_patch", {
|
|
3384
|
+
description: apply_patch_txt_default,
|
|
3385
|
+
parameters: PatchParams,
|
|
3386
|
+
async execute(params, ctx) {
|
|
3387
|
+
if (!params.patchText) {
|
|
3388
|
+
throw new Error("patchText is required");
|
|
3389
|
+
}
|
|
3390
|
+
let hunks;
|
|
3391
|
+
try {
|
|
3392
|
+
const parseResult = Patch.parsePatch(params.patchText);
|
|
3393
|
+
hunks = parseResult.hunks;
|
|
3394
|
+
} catch (error) {
|
|
3395
|
+
throw new Error(`apply_patch verification failed: ${error}`);
|
|
3396
|
+
}
|
|
3397
|
+
if (hunks.length === 0) {
|
|
3398
|
+
const normalized = params.patchText.replace(/\r\n/g, "\n").replace(/\r/g, "\n").trim();
|
|
3399
|
+
if (normalized === "*** Begin Patch\n*** End Patch") {
|
|
3400
|
+
throw new Error("patch rejected: empty patch");
|
|
3401
|
+
}
|
|
3402
|
+
throw new Error("apply_patch verification failed: no hunks found");
|
|
3403
|
+
}
|
|
3404
|
+
const fileChanges = [];
|
|
3405
|
+
let totalDiff = "";
|
|
3406
|
+
for (const hunk of hunks) {
|
|
3407
|
+
const filePath = resolve(ctx.directory, hunk.path);
|
|
3408
|
+
await assertExternalDirectory(ctx, filePath);
|
|
3409
|
+
switch (hunk.type) {
|
|
3410
|
+
case "add": {
|
|
3411
|
+
const oldContent = "";
|
|
3412
|
+
const newContent = hunk.contents.length === 0 || hunk.contents.endsWith("\n") ? hunk.contents : `${hunk.contents}
|
|
3413
|
+
`;
|
|
3414
|
+
const diff = trimDiff(createTwoFilesPatch3(filePath, filePath, oldContent, newContent));
|
|
3415
|
+
let additions = 0;
|
|
3416
|
+
let deletions = 0;
|
|
3417
|
+
for (const change of diffLines2(oldContent, newContent)) {
|
|
3418
|
+
if (change.added) additions += change.count || 0;
|
|
3419
|
+
if (change.removed) deletions += change.count || 0;
|
|
3420
|
+
}
|
|
3421
|
+
fileChanges.push({
|
|
3422
|
+
filePath,
|
|
3423
|
+
oldContent,
|
|
3424
|
+
newContent,
|
|
3425
|
+
type: "add",
|
|
3426
|
+
diff,
|
|
3427
|
+
additions,
|
|
3428
|
+
deletions
|
|
3429
|
+
});
|
|
3430
|
+
totalDiff += diff + "\n";
|
|
3431
|
+
break;
|
|
3432
|
+
}
|
|
3433
|
+
case "update": {
|
|
3434
|
+
const stats = await ctx.fs.stat(filePath);
|
|
3435
|
+
if (!stats || stats.isDirectory) {
|
|
3436
|
+
throw new Error(`apply_patch verification failed: Failed to read file to update: ${filePath}`);
|
|
3437
|
+
}
|
|
3438
|
+
const oldContent = await ctx.fs.readText(filePath);
|
|
3439
|
+
let newContent = oldContent;
|
|
3440
|
+
try {
|
|
3441
|
+
const fileUpdate = await Patch.deriveNewContentsFromChunks(ctx, filePath, hunk.chunks);
|
|
3442
|
+
newContent = fileUpdate.content;
|
|
3443
|
+
} catch (error) {
|
|
3444
|
+
throw new Error(`apply_patch verification failed: ${error}`);
|
|
3445
|
+
}
|
|
3446
|
+
const diff = trimDiff(createTwoFilesPatch3(filePath, filePath, oldContent, newContent));
|
|
3447
|
+
let additions = 0;
|
|
3448
|
+
let deletions = 0;
|
|
3449
|
+
for (const change of diffLines2(oldContent, newContent)) {
|
|
3450
|
+
if (change.added) additions += change.count || 0;
|
|
3451
|
+
if (change.removed) deletions += change.count || 0;
|
|
3452
|
+
}
|
|
3453
|
+
const movePath = hunk.move_path ? resolve(ctx.directory, hunk.move_path) : void 0;
|
|
3454
|
+
await assertExternalDirectory(ctx, movePath);
|
|
3455
|
+
fileChanges.push({
|
|
3456
|
+
filePath,
|
|
3457
|
+
oldContent,
|
|
3458
|
+
newContent,
|
|
3459
|
+
type: hunk.move_path ? "move" : "update",
|
|
3460
|
+
movePath,
|
|
3461
|
+
diff,
|
|
3462
|
+
additions,
|
|
3463
|
+
deletions
|
|
3464
|
+
});
|
|
3465
|
+
totalDiff += diff + "\n";
|
|
3466
|
+
break;
|
|
3467
|
+
}
|
|
3468
|
+
case "delete": {
|
|
3469
|
+
const contentToDelete = await ctx.fs.readText(filePath).catch((error) => {
|
|
3470
|
+
throw new Error(`apply_patch verification failed: ${error}`);
|
|
3471
|
+
});
|
|
3472
|
+
const deleteDiff = trimDiff(createTwoFilesPatch3(filePath, filePath, contentToDelete, ""));
|
|
3473
|
+
const deletions = contentToDelete.split("\n").length;
|
|
3474
|
+
fileChanges.push({
|
|
3475
|
+
filePath,
|
|
3476
|
+
oldContent: contentToDelete,
|
|
3477
|
+
newContent: "",
|
|
3478
|
+
type: "delete",
|
|
3479
|
+
diff: deleteDiff,
|
|
3480
|
+
additions: 0,
|
|
3481
|
+
deletions
|
|
3482
|
+
});
|
|
3483
|
+
totalDiff += deleteDiff + "\n";
|
|
3484
|
+
break;
|
|
3485
|
+
}
|
|
3486
|
+
}
|
|
3487
|
+
}
|
|
3488
|
+
const files = fileChanges.map((change) => ({
|
|
3489
|
+
filePath: change.filePath,
|
|
3490
|
+
relativePath: relative(ctx.worktree, change.movePath ?? change.filePath).replaceAll("\\", "/"),
|
|
3491
|
+
type: change.type,
|
|
3492
|
+
diff: change.diff,
|
|
3493
|
+
before: change.oldContent,
|
|
3494
|
+
after: change.newContent,
|
|
3495
|
+
additions: change.additions,
|
|
3496
|
+
deletions: change.deletions,
|
|
3497
|
+
movePath: change.movePath
|
|
3498
|
+
}));
|
|
3499
|
+
const relativePaths = fileChanges.map((c) => relative(ctx.worktree, c.filePath).replaceAll("\\", "/"));
|
|
3500
|
+
await ctx.ask({
|
|
3501
|
+
permission: "edit",
|
|
3502
|
+
patterns: relativePaths,
|
|
3503
|
+
always: ["*"],
|
|
3504
|
+
metadata: {
|
|
3505
|
+
filepath: relativePaths.join(", "),
|
|
3506
|
+
diff: totalDiff,
|
|
3507
|
+
files
|
|
3508
|
+
}
|
|
3509
|
+
});
|
|
3510
|
+
const updates = [];
|
|
3511
|
+
for (const change of fileChanges) {
|
|
3512
|
+
const edited = change.type === "delete" ? void 0 : change.movePath ?? change.filePath;
|
|
3513
|
+
switch (change.type) {
|
|
3514
|
+
case "add":
|
|
3515
|
+
await ctx.fs.mkdir(dirname(change.filePath));
|
|
3516
|
+
await ctx.fs.write(change.filePath, change.newContent);
|
|
3517
|
+
updates.push({ file: change.filePath, event: "add" });
|
|
3518
|
+
break;
|
|
3519
|
+
case "update":
|
|
3520
|
+
await ctx.fs.write(change.filePath, change.newContent);
|
|
3521
|
+
updates.push({ file: change.filePath, event: "change" });
|
|
3522
|
+
break;
|
|
3523
|
+
case "move":
|
|
3524
|
+
if (change.movePath) {
|
|
3525
|
+
await ctx.fs.mkdir(dirname(change.movePath));
|
|
3526
|
+
await ctx.fs.write(change.movePath, change.newContent);
|
|
3527
|
+
await ctx.fs.remove(change.filePath);
|
|
3528
|
+
updates.push({ file: change.filePath, event: "unlink" });
|
|
3529
|
+
updates.push({ file: change.movePath, event: "add" });
|
|
3530
|
+
}
|
|
3531
|
+
break;
|
|
3532
|
+
case "delete":
|
|
3533
|
+
await ctx.fs.remove(change.filePath);
|
|
3534
|
+
updates.push({ file: change.filePath, event: "unlink" });
|
|
3535
|
+
break;
|
|
3536
|
+
}
|
|
3537
|
+
if (edited) {
|
|
3538
|
+
ctx.emit("file.edited", { file: edited });
|
|
3539
|
+
}
|
|
3540
|
+
}
|
|
3541
|
+
for (const change of fileChanges) {
|
|
3542
|
+
if (change.type === "delete") continue;
|
|
3543
|
+
const target = change.movePath ?? change.filePath;
|
|
3544
|
+
await LSP.touchFile(target, true);
|
|
3545
|
+
}
|
|
3546
|
+
const diagnostics = await LSP.diagnostics();
|
|
3547
|
+
const summaryLines = fileChanges.map((change) => {
|
|
3548
|
+
if (change.type === "add") {
|
|
3549
|
+
return `A ${relative(ctx.worktree, change.filePath).replaceAll("\\", "/")}`;
|
|
3550
|
+
}
|
|
3551
|
+
if (change.type === "delete") {
|
|
3552
|
+
return `D ${relative(ctx.worktree, change.filePath).replaceAll("\\", "/")}`;
|
|
3553
|
+
}
|
|
3554
|
+
const target = change.movePath ?? change.filePath;
|
|
3555
|
+
return `M ${relative(ctx.worktree, target).replaceAll("\\", "/")}`;
|
|
3556
|
+
});
|
|
3557
|
+
let output = `Success. Updated the following files:
|
|
3558
|
+
${summaryLines.join("\n")}`;
|
|
3559
|
+
const MAX_DIAGNOSTICS_PER_FILE3 = 20;
|
|
3560
|
+
for (const change of fileChanges) {
|
|
3561
|
+
if (change.type === "delete") continue;
|
|
3562
|
+
const target = change.movePath ?? change.filePath;
|
|
3563
|
+
const normalized = target;
|
|
3564
|
+
const issues = diagnostics.get(normalized) ?? [];
|
|
3565
|
+
const errors = issues.filter((item) => item.severity === 1);
|
|
3566
|
+
if (errors.length > 0) {
|
|
3567
|
+
const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE3);
|
|
3568
|
+
const suffix = errors.length > MAX_DIAGNOSTICS_PER_FILE3 ? `
|
|
3569
|
+
... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE3} more` : "";
|
|
3570
|
+
output += `
|
|
3571
|
+
|
|
3572
|
+
LSP errors detected in ${relative(ctx.worktree, target).replaceAll("\\", "/")}, please fix:
|
|
3573
|
+
<diagnostics file="${target}">
|
|
3574
|
+
${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}
|
|
3575
|
+
</diagnostics>`;
|
|
3576
|
+
}
|
|
3577
|
+
}
|
|
3578
|
+
return {
|
|
3579
|
+
title: output,
|
|
3580
|
+
metadata: {
|
|
3581
|
+
diff: totalDiff,
|
|
3582
|
+
files,
|
|
3583
|
+
diagnostics
|
|
3584
|
+
},
|
|
3585
|
+
output
|
|
3586
|
+
};
|
|
3587
|
+
}
|
|
3588
|
+
});
|
|
3589
|
+
var terminal_write_txt_default = `Send input to the shared user terminal, or create/destroy it.
|
|
3590
|
+
|
|
3591
|
+
This tool interacts with a single shared terminal (PTY) that is also visible to the user.
|
|
3592
|
+
|
|
3593
|
+
## Actions
|
|
3594
|
+
|
|
3595
|
+
- **type="create"**: Spawn a new terminal. Fails if one already exists. You must create a terminal before sending input.
|
|
3596
|
+
- **type="destroy"**: Kill the current terminal. Use this when the terminal is stuck or unresponsive, then create a new one. Fails if no terminal exists.
|
|
3597
|
+
- **type="input"**: Send text to the terminal. By default, Enter is pressed after the input (pressEnter=true). Set pressEnter=false for partial input or answering prompts like y/n.
|
|
3598
|
+
|
|
3599
|
+
## Important
|
|
3600
|
+
- In most cases, prefer the **bash** tool for running commands. It is faster, captures output directly, and does not require creating/destroying a terminal.
|
|
3601
|
+
- Use terminal_write/terminal_read when you need a **persistent, stateful shell session**, e.g. running a long-lived dev server for preview, interactive REPL, or commands that depend on prior shell state.
|
|
3602
|
+
|
|
3603
|
+
## Usage notes
|
|
3604
|
+
- The terminal is shared with the user \u2014 they can see everything you type and you can see their output.
|
|
3605
|
+
- Always create a terminal before sending commands.
|
|
3606
|
+
- If a long-running command is stuck, destroy and recreate the terminal.
|
|
3607
|
+
- For commands that produce output, use the terminal_read tool after sending input to see the results.
|
|
3608
|
+
- When answering interactive prompts (e.g. "Continue? [y/n]"), set pressEnter=false if the program reads single characters, or pressEnter=true if it expects a line.
|
|
3609
|
+
`;
|
|
3610
|
+
var TerminalWriteTool = Tool.define("terminal_write", async () => {
|
|
3611
|
+
return {
|
|
3612
|
+
description: terminal_write_txt_default,
|
|
3613
|
+
parameters: z22.object({
|
|
3614
|
+
type: z22.enum(["input", "create", "destroy"]).describe(
|
|
3615
|
+
'The type of action. "input" sends text to the terminal. "create" spawns a new terminal (errors if one already exists). "destroy" kills the current terminal (errors if none exists).'
|
|
3616
|
+
),
|
|
3617
|
+
content: z22.string().describe('The text to send to the terminal. Required when type is "input".').optional(),
|
|
3618
|
+
pressEnter: z22.boolean().describe("Whether to press Enter after the input. Defaults to true.").optional()
|
|
3619
|
+
}),
|
|
3620
|
+
async execute(params, ctx) {
|
|
3621
|
+
const terminal = ctx.terminal;
|
|
3622
|
+
if (params.type === "create") {
|
|
3623
|
+
terminal.create();
|
|
3624
|
+
return {
|
|
3625
|
+
title: "Create terminal",
|
|
3626
|
+
metadata: { type: "create" },
|
|
3627
|
+
output: "Terminal created successfully."
|
|
3628
|
+
};
|
|
3629
|
+
}
|
|
3630
|
+
if (params.type === "destroy") {
|
|
3631
|
+
terminal.destroy();
|
|
3632
|
+
return {
|
|
3633
|
+
title: "Destroy terminal",
|
|
3634
|
+
metadata: { type: "destroy" },
|
|
3635
|
+
output: "Terminal destroyed successfully."
|
|
3636
|
+
};
|
|
3637
|
+
}
|
|
3638
|
+
if (!params.content && params.content !== "") {
|
|
3639
|
+
throw new Error('The "content" parameter is required when type is "input".');
|
|
3640
|
+
}
|
|
3641
|
+
if (!terminal.exists()) {
|
|
3642
|
+
throw new Error('No terminal exists. Use type "create" first.');
|
|
3643
|
+
}
|
|
3644
|
+
const pressEnter = params.pressEnter ?? true;
|
|
3645
|
+
const data = pressEnter ? params.content + "\n" : params.content;
|
|
3646
|
+
terminal.write(data);
|
|
3647
|
+
return {
|
|
3648
|
+
title: params.content.length > 60 ? params.content.slice(0, 57) + "..." : params.content,
|
|
3649
|
+
metadata: {
|
|
3650
|
+
type: "input",
|
|
3651
|
+
content: params.content,
|
|
3652
|
+
pressEnter
|
|
3653
|
+
},
|
|
3654
|
+
output: `Input sent to terminal.`
|
|
3655
|
+
};
|
|
3656
|
+
}
|
|
3657
|
+
};
|
|
3658
|
+
});
|
|
3659
|
+
var terminal_read_txt_default = `Read the terminal output from the bottom of the buffer.
|
|
3660
|
+
|
|
3661
|
+
Returns the last N lines from the shared user terminal. Use this after sending a command via terminal_write to see its output.
|
|
3662
|
+
|
|
3663
|
+
## Parameters
|
|
3664
|
+
- **length**: How many lines to read from the bottom. Start with a small number (e.g. 20-50) and increase if you need more context.
|
|
3665
|
+
- **waitBefore**: Milliseconds to wait before reading. Use this to let a command finish producing output. Defaults to 0. Maximum is 5000ms (values above 5000 are clamped to 5000).
|
|
3666
|
+
|
|
3667
|
+
## Important
|
|
3668
|
+
- In most cases, prefer the **bash** tool for running commands \u2014 it captures output directly without needing terminal_read.
|
|
3669
|
+
- Use terminal_read when reading output from a persistent terminal session (e.g. a dev server for preview) started via terminal_write.
|
|
3670
|
+
|
|
3671
|
+
## Usage notes
|
|
3672
|
+
- The terminal must exist (created via terminal_write type="create") before reading.
|
|
3673
|
+
- If output looks truncated or the command hasn't finished, just call terminal_read again \u2014 the terminal is persistent.
|
|
3674
|
+
- Lines are returned as plain text, one per line.
|
|
3675
|
+
`;
|
|
3676
|
+
var TerminalReadTool = Tool.define("terminal_read", async () => {
|
|
3677
|
+
return {
|
|
3678
|
+
description: terminal_read_txt_default,
|
|
3679
|
+
parameters: z23.object({
|
|
3680
|
+
length: z23.number().int().min(1).describe("Number of lines to read from the bottom of the terminal buffer."),
|
|
3681
|
+
waitBefore: z23.number().int().min(0).describe("Milliseconds to wait before reading. Use this to let a command finish producing output. Defaults to 0.").optional()
|
|
3682
|
+
}),
|
|
3683
|
+
async execute(params, ctx) {
|
|
3684
|
+
const terminal = ctx.terminal;
|
|
3685
|
+
if (!terminal.exists()) {
|
|
3686
|
+
throw new Error('No terminal exists. Use terminal_write with type "create" first.');
|
|
3687
|
+
}
|
|
3688
|
+
const MAX_WAIT = 5e3;
|
|
3689
|
+
const waitMs = Math.min(params.waitBefore ?? 0, MAX_WAIT);
|
|
3690
|
+
if (waitMs > 0) {
|
|
3691
|
+
await new Promise((resolve2) => setTimeout(resolve2, waitMs));
|
|
3692
|
+
}
|
|
3693
|
+
const content = terminal.read(params.length);
|
|
3694
|
+
return {
|
|
3695
|
+
title: `Read ${params.length} lines`,
|
|
3696
|
+
metadata: {
|
|
3697
|
+
length: params.length,
|
|
3698
|
+
waitBefore: waitMs
|
|
3699
|
+
},
|
|
3700
|
+
output: content || "(terminal buffer is empty)"
|
|
3701
|
+
};
|
|
3702
|
+
}
|
|
3703
|
+
};
|
|
3704
|
+
});
|
|
3705
|
+
var set_preview_url_txt_default = `Set the preview URL for the user's preview panel.
|
|
3706
|
+
|
|
3707
|
+
This tool configures a reverse proxy so the user can preview a locally running web application directly in the IDE's preview tab. A dedicated preview port on the server proxies all requests to the given local URL.
|
|
3708
|
+
|
|
3709
|
+
## Parameters
|
|
3710
|
+
- **forwarded_local_url**: The absolute local URL to reverse-proxy to (e.g. "http://localhost:5173" for Vite, "http://localhost:3000" for React).
|
|
3711
|
+
|
|
3712
|
+
## Usage notes
|
|
3713
|
+
- Use this after starting a local dev server (e.g. via terminal_write) to let the user see the result.
|
|
3714
|
+
- The preview tab will automatically load the proxied page.
|
|
3715
|
+
- Calling this again will update the target and refresh the preview.
|
|
3716
|
+
`;
|
|
3717
|
+
var SetPreviewUrlTool = Tool.define("set_preview_url", async () => {
|
|
3718
|
+
return {
|
|
3719
|
+
description: set_preview_url_txt_default,
|
|
3720
|
+
parameters: z24.object({
|
|
3721
|
+
forwarded_local_url: z24.string().describe('The absolute local URL to reverse-proxy to (e.g. "http://localhost:5173").')
|
|
3722
|
+
}),
|
|
3723
|
+
async execute(params, ctx) {
|
|
3724
|
+
ctx.preview.setPreviewTarget(params.forwarded_local_url);
|
|
3725
|
+
return {
|
|
3726
|
+
title: `Preview \u2192 ${params.forwarded_local_url}`,
|
|
3727
|
+
metadata: {
|
|
3728
|
+
forwarded_local_url: params.forwarded_local_url
|
|
3729
|
+
},
|
|
3730
|
+
output: `Preview proxy set to "${params.forwarded_local_url}". The user's preview tab will load this automatically.`
|
|
3731
|
+
};
|
|
3732
|
+
}
|
|
3733
|
+
};
|
|
3734
|
+
});
|
|
3735
|
+
var ToolRegistry;
|
|
3736
|
+
((ToolRegistry2) => {
|
|
3737
|
+
const log2 = Log.create({ service: "tool.registry" });
|
|
3738
|
+
class ToolRegistryService {
|
|
3739
|
+
_promise;
|
|
3740
|
+
context;
|
|
3741
|
+
constructor(context) {
|
|
3742
|
+
this.context = context;
|
|
3743
|
+
this._promise = initTools(context);
|
|
3744
|
+
}
|
|
3745
|
+
async register(tool) {
|
|
3746
|
+
const { custom } = await this._promise;
|
|
3747
|
+
const idx = custom.findIndex((t) => t.id === tool.id);
|
|
3748
|
+
if (idx >= 0) {
|
|
3749
|
+
custom.splice(idx, 1, tool);
|
|
3750
|
+
return;
|
|
3751
|
+
}
|
|
3752
|
+
custom.push(tool);
|
|
3753
|
+
}
|
|
3754
|
+
async tools(model, agent) {
|
|
3755
|
+
return ToolRegistry2.tools(this.context, model, agent);
|
|
3756
|
+
}
|
|
3757
|
+
}
|
|
3758
|
+
ToolRegistry2.ToolRegistryService = ToolRegistryService;
|
|
3759
|
+
async function initTools(context) {
|
|
3760
|
+
const custom = [];
|
|
3761
|
+
return { custom };
|
|
3762
|
+
}
|
|
3763
|
+
function fromPlugin(id, def) {
|
|
3764
|
+
return {
|
|
3765
|
+
id,
|
|
3766
|
+
init: async (initCtx) => ({
|
|
3767
|
+
parameters: z25.object(def.args),
|
|
3768
|
+
description: def.description,
|
|
3769
|
+
execute: async (args, ctx) => {
|
|
3770
|
+
const pluginCtx = {
|
|
3771
|
+
...ctx,
|
|
3772
|
+
directory: ctx.directory,
|
|
3773
|
+
worktree: ctx.worktree
|
|
3774
|
+
};
|
|
3775
|
+
const result = await def.execute(args, pluginCtx);
|
|
3776
|
+
const out = await Truncate.output(ctx, result, {}, initCtx?.agent);
|
|
3777
|
+
return {
|
|
3778
|
+
title: "",
|
|
3779
|
+
output: out.truncated ? out.content : result,
|
|
3780
|
+
metadata: { truncated: out.truncated, outputPath: out.truncated ? out.outputPath : void 0 }
|
|
3781
|
+
};
|
|
3782
|
+
}
|
|
3783
|
+
})
|
|
3784
|
+
};
|
|
3785
|
+
}
|
|
3786
|
+
async function all(context) {
|
|
3787
|
+
const custom = await context.toolRegistry._promise.then((x) => x.custom);
|
|
3788
|
+
const config = context.config;
|
|
3789
|
+
return [
|
|
3790
|
+
InvalidTool,
|
|
3791
|
+
SetWorkingDirectoryTool,
|
|
3792
|
+
BashTool,
|
|
3793
|
+
ReadTool,
|
|
3794
|
+
GlobTool,
|
|
3795
|
+
GrepTool,
|
|
3796
|
+
EditTool,
|
|
3797
|
+
WriteTool,
|
|
3798
|
+
WebFetchTool,
|
|
3799
|
+
TodoWriteTool,
|
|
3800
|
+
// TodoReadTool,
|
|
3801
|
+
WebSearchTool,
|
|
3802
|
+
CodeSearchTool,
|
|
3803
|
+
SkillTool,
|
|
3804
|
+
ApplyPatchTool,
|
|
3805
|
+
TerminalWriteTool,
|
|
3806
|
+
TerminalReadTool,
|
|
3807
|
+
SetPreviewUrlTool,
|
|
3808
|
+
...config.experimental?.batch_tool === true ? [BatchTool] : [],
|
|
3809
|
+
...Flag.OPENCODE_EXPERIMENTAL_PLAN_MODE && Flag.OPENCODE_CLIENT === "cli" ? [PlanExitTool] : [],
|
|
3810
|
+
...custom
|
|
3811
|
+
];
|
|
3812
|
+
}
|
|
3813
|
+
async function ids(context) {
|
|
3814
|
+
return all(context).then((x) => x.map((t) => t.id));
|
|
3815
|
+
}
|
|
3816
|
+
ToolRegistry2.ids = ids;
|
|
3817
|
+
async function tools(context, model, agent) {
|
|
3818
|
+
const tools2 = await all(context);
|
|
3819
|
+
const result = await Promise.all(
|
|
3820
|
+
tools2.filter((t) => {
|
|
3821
|
+
if (t.id === "codesearch" || t.id === "websearch") {
|
|
3822
|
+
return model.providerID === ProviderID.opencode || Flag.OPENCODE_ENABLE_EXA;
|
|
3823
|
+
}
|
|
3824
|
+
const usePatch = model.modelID.includes("gpt-") && !model.modelID.includes("oss") && !model.modelID.includes("gpt-4");
|
|
3825
|
+
if (t.id === "apply_patch") return usePatch;
|
|
3826
|
+
if (t.id === "edit" || t.id === "write") return !usePatch;
|
|
3827
|
+
return true;
|
|
3828
|
+
}).map(async (t) => {
|
|
3829
|
+
var _stack = [];
|
|
3830
|
+
try {
|
|
3831
|
+
const _ = __using(_stack, log2.time(t.id));
|
|
3832
|
+
const tool = await t.init({ agent, agentContext: context });
|
|
3833
|
+
const output = {
|
|
3834
|
+
description: tool.description,
|
|
3835
|
+
parameters: tool.parameters
|
|
3836
|
+
};
|
|
3837
|
+
return {
|
|
3838
|
+
id: t.id,
|
|
3839
|
+
...tool,
|
|
3840
|
+
description: output.description,
|
|
3841
|
+
parameters: output.parameters
|
|
3842
|
+
};
|
|
3843
|
+
} catch (_2) {
|
|
3844
|
+
var _error = _2, _hasError = true;
|
|
3845
|
+
} finally {
|
|
3846
|
+
__callDispose(_stack, _error, _hasError);
|
|
3847
|
+
}
|
|
3848
|
+
})
|
|
3849
|
+
);
|
|
3850
|
+
return result;
|
|
3851
|
+
}
|
|
3852
|
+
ToolRegistry2.tools = tools;
|
|
3853
|
+
})(ToolRegistry || (ToolRegistry = {}));
|
|
3854
|
+
|
|
3855
|
+
export {
|
|
3856
|
+
SchedulerService,
|
|
3857
|
+
Tool,
|
|
3858
|
+
SetWorkingDirectoryTool,
|
|
3859
|
+
ReadTool,
|
|
3860
|
+
ConfigMarkdown,
|
|
3861
|
+
Discovery,
|
|
3862
|
+
Skill,
|
|
3863
|
+
ToolRegistry
|
|
3864
|
+
};
|