@oh-my-pi/pi-coding-agent 13.3.7 → 13.3.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +82 -0
  2. package/package.json +9 -18
  3. package/scripts/format-prompts.ts +7 -172
  4. package/src/config/prompt-templates.ts +2 -54
  5. package/src/config/settings-schema.ts +24 -0
  6. package/src/discovery/codex.ts +1 -2
  7. package/src/discovery/helpers.ts +0 -5
  8. package/src/lsp/client.ts +8 -0
  9. package/src/lsp/config.ts +2 -3
  10. package/src/lsp/index.ts +379 -99
  11. package/src/lsp/render.ts +21 -31
  12. package/src/lsp/types.ts +21 -8
  13. package/src/lsp/utils.ts +193 -1
  14. package/src/mcp/config-writer.ts +3 -0
  15. package/src/modes/components/settings-defs.ts +9 -0
  16. package/src/modes/interactive-mode.ts +8 -1
  17. package/src/modes/theme/mermaid-cache.ts +4 -4
  18. package/src/modes/theme/theme.ts +33 -0
  19. package/src/prompts/system/subagent-user-prompt.md +2 -0
  20. package/src/prompts/system/system-prompt.md +12 -1
  21. package/src/prompts/tools/ast-find.md +20 -0
  22. package/src/prompts/tools/ast-replace.md +21 -0
  23. package/src/prompts/tools/bash.md +2 -0
  24. package/src/prompts/tools/hashline.md +26 -8
  25. package/src/prompts/tools/lsp.md +22 -5
  26. package/src/sdk.ts +11 -1
  27. package/src/session/agent-session.ts +261 -82
  28. package/src/task/executor.ts +8 -5
  29. package/src/tools/ast-find.ts +316 -0
  30. package/src/tools/ast-replace.ts +294 -0
  31. package/src/tools/bash.ts +2 -1
  32. package/src/tools/browser.ts +2 -8
  33. package/src/tools/fetch.ts +55 -18
  34. package/src/tools/index.ts +8 -0
  35. package/src/tools/path-utils.ts +34 -0
  36. package/src/tools/python.ts +2 -1
  37. package/src/tools/renderers.ts +4 -0
  38. package/src/tools/ssh.ts +2 -1
  39. package/src/tools/todo-write.ts +34 -0
  40. package/src/tools/tool-timeouts.ts +29 -0
  41. package/src/utils/mime.ts +37 -14
  42. package/src/utils/prompt-format.ts +172 -0
  43. package/src/web/scrapers/arxiv.ts +12 -12
  44. package/src/web/scrapers/go-pkg.ts +2 -2
  45. package/src/web/scrapers/iacr.ts +17 -9
  46. package/src/web/scrapers/readthedocs.ts +3 -3
  47. package/src/web/scrapers/twitter.ts +11 -11
  48. package/src/web/scrapers/wikipedia.ts +4 -5
  49. package/src/utils/ignore-files.ts +0 -119
@@ -1,4 +1,4 @@
1
- import { parse as parseHtml } from "node-html-parser";
1
+ import { parseHTML } from "linkedom";
2
2
  import type { RenderResult, SpecialHandler } from "./types";
3
3
  import { buildResult, loadPage } from "./types";
4
4
  import { convertWithMarkitdown, fetchBinary } from "./utils";
@@ -30,22 +30,30 @@ export const handleIacr: SpecialHandler = async (
30
30
 
31
31
  if (!result.ok) return null;
32
32
 
33
- const doc = parseHtml(result.content);
33
+ const doc = parseHTML(result.content).document;
34
34
 
35
35
  // Extract metadata from the page
36
36
  const title =
37
- doc.querySelector("h3.mb-3")?.text?.trim() ||
37
+ doc.querySelector("h3.mb-3")?.textContent?.trim() ||
38
38
  doc.querySelector('meta[name="citation_title"]')?.getAttribute("content");
39
- const authors = doc
40
- .querySelectorAll('meta[name="citation_author"]')
39
+ const authors = Array.from(
40
+ doc.querySelectorAll('meta[name="citation_author"]') as Iterable<{
41
+ getAttribute: (name: string) => string | null;
42
+ }>,
43
+ )
41
44
  .map(m => m.getAttribute("content"))
42
- .filter(Boolean);
45
+ .filter((author): author is string => Boolean(author));
43
46
  // Abstract is in <p> after <h5>Abstract</h5>
44
- const abstractHeading = doc.querySelectorAll("h5").find(h => h.text?.includes("Abstract"));
47
+ const abstractHeading = Array.from(
48
+ doc.querySelectorAll("h5") as Iterable<{
49
+ textContent: string | null;
50
+ parentElement?: { querySelector: (selector: string) => { textContent: string | null } | null } | null;
51
+ }>,
52
+ ).find(h => h.textContent?.includes("Abstract"));
45
53
  const abstract =
46
- abstractHeading?.parentNode?.querySelector("p")?.text?.trim() ||
54
+ abstractHeading?.parentElement?.querySelector("p")?.textContent?.trim() ||
47
55
  doc.querySelector('meta[name="description"]')?.getAttribute("content");
48
- const keywords = doc.querySelector(".keywords")?.text?.replace("Keywords:", "").trim();
56
+ const keywords = doc.querySelector(".keywords")?.textContent?.replace("Keywords:", "").trim();
49
57
  const pubDate = doc.querySelector('meta[name="citation_publication_date"]')?.getAttribute("content");
50
58
 
51
59
  let md = `# ${title || "IACR ePrint Paper"}\n\n`;
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * Read the Docs handler for web-fetch
3
3
  */
4
- import { parse as parseHtml } from "node-html-parser";
4
+ import { parseHTML } from "linkedom";
5
5
  import { buildResult, htmlToBasicMarkdown, loadPage, type RenderResult, type SpecialHandler } from "./types";
6
6
 
7
7
  export const handleReadTheDocs: SpecialHandler = async (
@@ -39,7 +39,7 @@ export const handleReadTheDocs: SpecialHandler = async (
39
39
  }
40
40
 
41
41
  // Parse HTML
42
- const root = parseHtml(result.content);
42
+ const root = parseHTML(result.content).document;
43
43
 
44
44
  // Extract main content from common Read the Docs selectors
45
45
  let mainContent =
@@ -60,7 +60,7 @@ export const handleReadTheDocs: SpecialHandler = async (
60
60
  ?.querySelectorAll(
61
61
  ".headerlink, .viewcode-link, nav, .sidebar, footer, .related, .sphinxsidebar, .toctree-wrapper",
62
62
  )
63
- .forEach(el => {
63
+ .forEach((el: Element) => {
64
64
  el.remove();
65
65
  });
66
66
 
@@ -1,4 +1,4 @@
1
- import { parse as parseHtml } from "node-html-parser";
1
+ import { type HTMLElement, parseHTML } from "linkedom";
2
2
  import { ToolAbortError } from "../../tools/tool-errors";
3
3
  import type { RenderResult, SpecialHandler } from "./types";
4
4
  import { buildResult, loadPage } from "./types";
@@ -33,14 +33,14 @@ export const handleTwitter: SpecialHandler = async (
33
33
 
34
34
  if (result.ok && result.content.length > 500) {
35
35
  // Parse the Nitter HTML
36
- const doc = parseHtml(result.content);
36
+ const doc = parseHTML(result.content).document;
37
37
 
38
38
  // Extract tweet content
39
- const tweetContent = doc.querySelector(".tweet-content")?.text?.trim();
40
- const fullname = doc.querySelector(".fullname")?.text?.trim();
41
- const username = doc.querySelector(".username")?.text?.trim();
42
- const date = doc.querySelector(".tweet-date a")?.text?.trim();
43
- const stats = doc.querySelector(".tweet-stats")?.text?.trim();
39
+ const tweetContent = doc.querySelector(".tweet-content")?.textContent?.trim();
40
+ const fullname = doc.querySelector(".fullname")?.textContent?.trim();
41
+ const username = doc.querySelector(".username")?.textContent?.trim();
42
+ const date = doc.querySelector(".tweet-date a")?.textContent?.trim();
43
+ const stats = doc.querySelector(".tweet-stats")?.textContent?.trim();
44
44
 
45
45
  if (tweetContent) {
46
46
  let md = `# Tweet by ${fullname || "Unknown"} (${username || "@?"})\n\n`;
@@ -49,12 +49,12 @@ export const handleTwitter: SpecialHandler = async (
49
49
  if (stats) md += `---\n${stats.replace(/\s+/g, " ")}\n`;
50
50
 
51
51
  // Check for replies/thread
52
- const replies = doc.querySelectorAll(".timeline-item .tweet-content");
52
+ const replies = Array.from(doc.querySelectorAll(".timeline-item .tweet-content")) as HTMLElement[];
53
53
  if (replies.length > 1) {
54
54
  md += `\n---\n\n## Thread/Replies\n\n`;
55
- for (const reply of Array.from(replies).slice(1, 10)) {
56
- const replyUser = reply.parentNode?.querySelector(".username")?.text?.trim();
57
- md += `**${replyUser || "@?"}**: ${reply.text?.trim()}\n\n`;
55
+ for (const reply of replies.slice(1, 10)) {
56
+ const replyUser = reply.parentElement?.querySelector(".username")?.textContent?.trim();
57
+ md += `**${replyUser || "@?"}**: ${reply.textContent?.trim()}\n\n`;
58
58
  }
59
59
  }
60
60
 
@@ -1,4 +1,4 @@
1
- import { parse as parseHtml } from "node-html-parser";
1
+ import { parseHTML } from "linkedom";
2
2
  import type { RenderResult, SpecialHandler } from "./types";
3
3
  import { buildResult, loadPage } from "./types";
4
4
 
@@ -45,14 +45,13 @@ export const handleWikipedia: SpecialHandler = async (
45
45
  const contentResult = await loadPage(contentUrl, { timeout, signal });
46
46
 
47
47
  if (contentResult.ok) {
48
- const doc = parseHtml(contentResult.content);
48
+ const doc = parseHTML(contentResult.content).document;
49
49
 
50
50
  // Extract main content sections
51
51
  const sections = doc.querySelectorAll("section");
52
52
  for (const section of sections) {
53
53
  const heading = section.querySelector("h2, h3, h4");
54
- const headingText = heading?.text?.trim();
55
-
54
+ const headingText = heading?.textContent?.trim();
56
55
  // Skip certain sections
57
56
  if (
58
57
  headingText &&
@@ -68,7 +67,7 @@ export const handleWikipedia: SpecialHandler = async (
68
67
 
69
68
  const paragraphs = section.querySelectorAll("p");
70
69
  for (const p of paragraphs) {
71
- const text = p.text?.trim();
70
+ const text = p.textContent?.trim();
72
71
  if (text && text.length > 20) {
73
72
  md += `${text}\n\n`;
74
73
  }
@@ -1,119 +0,0 @@
1
- /**
2
- * Ignore file handling for .gitignore/.ignore/.fdignore support when scanning directories.
3
- */
4
- import * as path from "node:path";
5
- import ignore from "ignore";
6
-
7
- export const IGNORE_FILE_NAMES = [".gitignore", ".ignore", ".fdignore"] as const;
8
-
9
- export type IgnoreMatcher = ignore.Ignore;
10
-
11
- /**
12
- * Convert a path to POSIX format (forward slashes).
13
- */
14
- export function toPosixPath(p: string): string {
15
- return p.split(path.sep).join("/");
16
- }
17
-
18
- /**
19
- * Prefix an ignore pattern to make it relative to a subdirectory.
20
- * Returns null for comments and empty lines.
21
- */
22
- export function prefixIgnorePattern(line: string, prefix: string): string | null {
23
- const trimmed = line.trim();
24
- if (!trimmed) return null;
25
- if (trimmed.startsWith("#") && !trimmed.startsWith("\\#")) return null;
26
-
27
- let pattern = line;
28
- let negated = false;
29
-
30
- if (pattern.startsWith("!")) {
31
- negated = true;
32
- pattern = pattern.slice(1);
33
- } else if (pattern.startsWith("\\!")) {
34
- pattern = pattern.slice(1);
35
- }
36
-
37
- if (pattern.startsWith("/")) {
38
- pattern = pattern.slice(1);
39
- }
40
-
41
- const prefixed = prefix ? `${prefix}${pattern}` : pattern;
42
- return negated ? `!${prefixed}` : prefixed;
43
- }
44
-
45
- /**
46
- * Read and add ignore rules from a directory to the matcher.
47
- */
48
- export async function addIgnoreRules(
49
- ig: IgnoreMatcher,
50
- dir: string,
51
- rootDir: string,
52
- readFile: (path: string) => Promise<string | null>,
53
- ): Promise<void> {
54
- const relativeDir = path.relative(rootDir, dir);
55
- const prefix = relativeDir ? `${toPosixPath(relativeDir)}/` : "";
56
-
57
- for (const filename of IGNORE_FILE_NAMES) {
58
- const ignorePath = path.join(dir, filename);
59
- const content = await readFile(ignorePath);
60
- if (!content) continue;
61
-
62
- const patterns = content
63
- .split(/\r?\n/)
64
- .map(line => prefixIgnorePattern(line, prefix))
65
- .filter((line): line is string => Boolean(line));
66
-
67
- if (patterns.length > 0) {
68
- ig.add(patterns);
69
- }
70
- }
71
- }
72
-
73
- /**
74
- * Read and add ignore rules from a directory to the matcher (synchronous version).
75
- */
76
- export function addIgnoreRulesSync(
77
- ig: IgnoreMatcher,
78
- dir: string,
79
- rootDir: string,
80
- readFileSync: (path: string) => string | null,
81
- ): void {
82
- const relativeDir = path.relative(rootDir, dir);
83
- const prefix = relativeDir ? `${toPosixPath(relativeDir)}/` : "";
84
-
85
- for (const filename of IGNORE_FILE_NAMES) {
86
- const ignorePath = path.join(dir, filename);
87
- const content = readFileSync(ignorePath);
88
- if (!content) continue;
89
-
90
- const patterns = content
91
- .split(/\r?\n/)
92
- .map(line => prefixIgnorePattern(line, prefix))
93
- .filter((line): line is string => Boolean(line));
94
-
95
- if (patterns.length > 0) {
96
- ig.add(patterns);
97
- }
98
- }
99
- }
100
-
101
- /**
102
- * Create a fresh ignore matcher.
103
- */
104
- export function createIgnoreMatcher(): IgnoreMatcher {
105
- return ignore();
106
- }
107
-
108
- /**
109
- * Check if a path should be ignored.
110
- * @param ig - The ignore matcher
111
- * @param root - The root directory for relative path calculation
112
- * @param fullPath - The full path to check
113
- * @param isDir - Whether the path is a directory
114
- */
115
- export function shouldIgnore(ig: IgnoreMatcher, root: string, fullPath: string, isDir: boolean): boolean {
116
- const relPath = toPosixPath(path.relative(root, fullPath));
117
- const ignorePath = isDir ? `${relPath}/` : relPath;
118
- return ig.ignores(ignorePath);
119
- }