@zhijiewang/openharness 2.30.1 → 2.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/index.d.ts +1 -1
- package/dist/commands/index.js +1 -1
- package/dist/commands/info.js +2 -5
- package/dist/commands/settings.d.ts +1 -1
- package/dist/commands/settings.js +1 -5
- package/dist/harness/config.d.ts +0 -8
- package/dist/harness/project-purge.d.ts +56 -0
- package/dist/harness/project-purge.js +198 -0
- package/dist/main.js +56 -0
- package/dist/tools/FileReadTool/index.js +7 -4
- package/dist/tools/ImageReadTool/index.js +6 -1
- package/dist/utils/image-downscale.d.ts +34 -0
- package/dist/utils/image-downscale.js +89 -0
- package/package.json +3 -3
- package/dist/harness/sandbox.d.ts +0 -34
- package/dist/harness/sandbox.js +0 -104
package/dist/commands/index.d.ts
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
* session.ts — /clear, /compact, /export, /history, /browse, /resume, /fork, /pin, /unpin
|
|
9
9
|
* git.ts — /diff, /undo, /rewind, /commit, /log
|
|
10
10
|
* info.ts — /help, /cost, /status, /config, /files, /model, /memory, /doctor, /context, /mcp, /init
|
|
11
|
-
* settings.ts — /theme, /companion, /fast, /keys, /effort, /
|
|
11
|
+
* settings.ts — /theme, /companion, /fast, /keys, /effort, /permissions, /allowed-tools
|
|
12
12
|
* ai.ts — /plan, /review, /roles, /agents, /plugins, /btw, /loop, /cybergotchi
|
|
13
13
|
* skills.ts — /skill-create, /skill-delete, /skill-edit, /skill-search, /skill-install
|
|
14
14
|
*/
|
package/dist/commands/index.js
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
* session.ts — /clear, /compact, /export, /history, /browse, /resume, /fork, /pin, /unpin
|
|
9
9
|
* git.ts — /diff, /undo, /rewind, /commit, /log
|
|
10
10
|
* info.ts — /help, /cost, /status, /config, /files, /model, /memory, /doctor, /context, /mcp, /init
|
|
11
|
-
* settings.ts — /theme, /companion, /fast, /keys, /effort, /
|
|
11
|
+
* settings.ts — /theme, /companion, /fast, /keys, /effort, /permissions, /allowed-tools
|
|
12
12
|
* ai.ts — /plan, /review, /roles, /agents, /plugins, /btw, /loop, /cybergotchi
|
|
13
13
|
* skills.ts — /skill-create, /skill-delete, /skill-edit, /skill-search, /skill-install
|
|
14
14
|
*/
|
package/dist/commands/info.js
CHANGED
|
@@ -10,7 +10,6 @@ import { estimateMessageTokens } from "../harness/context-warning.js";
|
|
|
10
10
|
import { getContextWindow } from "../harness/cost.js";
|
|
11
11
|
import { getHooks, invalidateHookCache } from "../harness/hooks.js";
|
|
12
12
|
import { discoverPlugins, discoverSkills } from "../harness/plugins.js";
|
|
13
|
-
import { invalidateSandboxCache } from "../harness/sandbox.js";
|
|
14
13
|
import { formatTrace, listTracedSessions, loadTrace } from "../harness/traces.js";
|
|
15
14
|
import { getVerificationConfig, invalidateVerificationCache } from "../harness/verification.js";
|
|
16
15
|
import { normalizeMcpConfig } from "../mcp/config-normalize.js";
|
|
@@ -76,7 +75,6 @@ export function registerInfoCommands(register, getCommandMap) {
|
|
|
76
75
|
"fast",
|
|
77
76
|
"keys",
|
|
78
77
|
"effort",
|
|
79
|
-
"sandbox",
|
|
80
78
|
"permissions",
|
|
81
79
|
"allowed-tools",
|
|
82
80
|
"login",
|
|
@@ -752,13 +750,12 @@ export function registerInfoCommands(register, getCommandMap) {
|
|
|
752
750
|
return { output: lines.join("\n"), handled: true };
|
|
753
751
|
});
|
|
754
752
|
register("reload-plugins", "Hot-reload plugins, skills, hooks, MCP servers and config without restarting the session.", async () => {
|
|
755
|
-
// Invalidate every cached source — config, hooks,
|
|
753
|
+
// Invalidate every cached source — config, hooks, verification.
|
|
756
754
|
// Skills + plugins aren't cached (each discoverSkills/discoverPlugins call
|
|
757
755
|
// reads fresh) but we still re-run them for the report so the user sees
|
|
758
756
|
// a count consistent with the new on-disk state.
|
|
759
757
|
invalidateConfigCache();
|
|
760
758
|
invalidateHookCache();
|
|
761
|
-
invalidateSandboxCache();
|
|
762
759
|
invalidateVerificationCache();
|
|
763
760
|
// Tear down + reconnect MCP servers (the live connections aren't
|
|
764
761
|
// cache-driven; they're long-lived sockets that need an explicit
|
|
@@ -780,7 +777,7 @@ export function registerInfoCommands(register, getCommandMap) {
|
|
|
780
777
|
const mcpServers = connectedMcpServers().length;
|
|
781
778
|
const lines = [
|
|
782
779
|
"Hot reload complete:",
|
|
783
|
-
" - config + hooks +
|
|
780
|
+
" - config + hooks + verification: caches invalidated",
|
|
784
781
|
` - hook events configured: ${hookEvents}`,
|
|
785
782
|
` - MCP servers connected: ${mcpServers}${mcpError ? ` (error: ${mcpError})` : ""}`,
|
|
786
783
|
` - MCP tools loaded: ${mcpTools}`,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Settings commands — /theme, /companion, /fast, /keys, /keybindings, /effort, /
|
|
2
|
+
* Settings commands — /theme, /companion, /fast, /keys, /keybindings, /effort, /permissions, /allowed-tools, /trust
|
|
3
3
|
*/
|
|
4
4
|
import type { CommandHandler } from "./types.js";
|
|
5
5
|
export declare function registerSettingsCommands(register: (name: string, description: string, handler: CommandHandler) => void): void;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Settings commands — /theme, /companion, /fast, /keys, /keybindings, /effort, /
|
|
2
|
+
* Settings commands — /theme, /companion, /fast, /keys, /keybindings, /effort, /permissions, /allowed-tools, /trust
|
|
3
3
|
*/
|
|
4
4
|
import { spawn } from "node:child_process";
|
|
5
5
|
import { existsSync, mkdirSync, writeFileSync } from "node:fs";
|
|
@@ -8,7 +8,6 @@ import { dirname, join } from "node:path";
|
|
|
8
8
|
import { readApprovalLog } from "../harness/approvals.js";
|
|
9
9
|
import { readOhConfig } from "../harness/config.js";
|
|
10
10
|
import { loadKeybindings } from "../harness/keybindings.js";
|
|
11
|
-
import { sandboxStatus } from "../harness/sandbox.js";
|
|
12
11
|
import { isTrusted, listTrusted, trust } from "../harness/trust.js";
|
|
13
12
|
const KEYBINDINGS_TEMPLATE = `[
|
|
14
13
|
{ "key": "ctrl+d", "action": "/diff" },
|
|
@@ -136,9 +135,6 @@ export function registerSettingsCommands(register) {
|
|
|
136
135
|
}
|
|
137
136
|
return { output: `Effort level set to: ${level}`, handled: true };
|
|
138
137
|
});
|
|
139
|
-
register("sandbox", "Show sandbox status and restrictions", () => {
|
|
140
|
-
return { output: `${sandboxStatus()}\n\nConfigure in .oh/config.yaml under sandbox:`, handled: true };
|
|
141
|
-
});
|
|
142
138
|
register("permissions", "View or change permission mode (or 'log' for approval history)", (args, ctx) => {
|
|
143
139
|
const trimmed = args.trim();
|
|
144
140
|
if (!trimmed) {
|
package/dist/harness/config.d.ts
CHANGED
|
@@ -208,14 +208,6 @@ export type OhConfig = {
|
|
|
208
208
|
enabled?: boolean;
|
|
209
209
|
endpoint?: string;
|
|
210
210
|
};
|
|
211
|
-
/** Sandbox — filesystem and network restrictions */
|
|
212
|
-
sandbox?: {
|
|
213
|
-
enabled?: boolean;
|
|
214
|
-
allowedPaths?: string[];
|
|
215
|
-
allowedDomains?: string[];
|
|
216
|
-
blockNetwork?: boolean;
|
|
217
|
-
blockedCommands?: string[];
|
|
218
|
-
};
|
|
219
211
|
/** Remote server security settings */
|
|
220
212
|
remote?: {
|
|
221
213
|
tokens?: string[];
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* `oh project purge` core logic — extracted from the CLI command for testability.
|
|
3
|
+
*
|
|
4
|
+
* Deletes per-project openHarness state at a target directory:
|
|
5
|
+
* 1. The entire `.oh/` directory at that path (config, RULES.md, memory/,
|
|
6
|
+
* skills/, agents/, output-styles/, plans/, checkpoints/, exports).
|
|
7
|
+
* 2. The workspace-trust entry for that path in `~/.oh/trusted-dirs.json`,
|
|
8
|
+
* if present.
|
|
9
|
+
*
|
|
10
|
+
* What it does NOT touch (these are global-and-cross-project):
|
|
11
|
+
* - `~/.oh/sessions/` session transcripts (may span projects)
|
|
12
|
+
* - `~/.oh/credentials.enc` global API keys
|
|
13
|
+
* - `~/.oh/memory/` (etc.) global counterparts of project state
|
|
14
|
+
* - `~/.oh/plugins/`, marketplaces installed plugins
|
|
15
|
+
* - `~/.oh/telemetry/`, traces/ global observability data
|
|
16
|
+
* - `~/.oh/approvals.log` append-only audit log
|
|
17
|
+
* - `~/.oh/keybindings.json`,
|
|
18
|
+
* `~/.oh/config.yaml` global config
|
|
19
|
+
*
|
|
20
|
+
* Mirrors Claude Code's `claude project purge` UX surface (--dry-run, --yes,
|
|
21
|
+
* default plan + confirm). `--all` and `--interactive` are deferred — openHarness
|
|
22
|
+
* has no project registry, so `--all` would need a session-cwd scan, and
|
|
23
|
+
* `--dry-run` already covers the spec for `--interactive`.
|
|
24
|
+
*/
|
|
25
|
+
export type PurgeEntry = {
|
|
26
|
+
/** Filesystem path that will be removed. */
|
|
27
|
+
path: string;
|
|
28
|
+
/** Human-readable label shown in the plan. */
|
|
29
|
+
label: string;
|
|
30
|
+
/** Cumulative size in bytes. 0 when the entry is metadata-only (e.g. a trust-store entry). */
|
|
31
|
+
bytes: number;
|
|
32
|
+
/** When false, this entry is reported but doesn't currently exist on disk. */
|
|
33
|
+
exists: boolean;
|
|
34
|
+
/** When true, removal is via JSON edit instead of `rmSync`. Used for the trust-store entry. */
|
|
35
|
+
jsonEdit?: boolean;
|
|
36
|
+
};
|
|
37
|
+
export type PurgePlan = {
|
|
38
|
+
projectPath: string;
|
|
39
|
+
entries: PurgeEntry[];
|
|
40
|
+
totalBytes: number;
|
|
41
|
+
};
|
|
42
|
+
/** Format bytes as a short human string (e.g. `1.2 MB`, `342 B`). */
|
|
43
|
+
export declare function formatBytes(bytes: number): string;
|
|
44
|
+
/**
|
|
45
|
+
* Build the list of things `purge` would delete, without touching the filesystem.
|
|
46
|
+
* Inspects the `.oh/` directory at `projectPath` and looks for a trust-store entry.
|
|
47
|
+
*/
|
|
48
|
+
export declare function planPurge(projectPath: string): PurgePlan;
|
|
49
|
+
/** Render a plan as a multi-line string for display. */
|
|
50
|
+
export declare function formatPurgePlan(plan: PurgePlan): string;
|
|
51
|
+
/** Execute the plan. Returns the count of successfully removed entries and any errors. */
|
|
52
|
+
export declare function executePurge(plan: PurgePlan): {
|
|
53
|
+
deleted: number;
|
|
54
|
+
errors: string[];
|
|
55
|
+
};
|
|
56
|
+
//# sourceMappingURL=project-purge.d.ts.map
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* `oh project purge` core logic — extracted from the CLI command for testability.
|
|
3
|
+
*
|
|
4
|
+
* Deletes per-project openHarness state at a target directory:
|
|
5
|
+
* 1. The entire `.oh/` directory at that path (config, RULES.md, memory/,
|
|
6
|
+
* skills/, agents/, output-styles/, plans/, checkpoints/, exports).
|
|
7
|
+
* 2. The workspace-trust entry for that path in `~/.oh/trusted-dirs.json`,
|
|
8
|
+
* if present.
|
|
9
|
+
*
|
|
10
|
+
* What it does NOT touch (these are global-and-cross-project):
|
|
11
|
+
* - `~/.oh/sessions/` session transcripts (may span projects)
|
|
12
|
+
* - `~/.oh/credentials.enc` global API keys
|
|
13
|
+
* - `~/.oh/memory/` (etc.) global counterparts of project state
|
|
14
|
+
* - `~/.oh/plugins/`, marketplaces installed plugins
|
|
15
|
+
* - `~/.oh/telemetry/`, traces/ global observability data
|
|
16
|
+
* - `~/.oh/approvals.log` append-only audit log
|
|
17
|
+
* - `~/.oh/keybindings.json`,
|
|
18
|
+
* `~/.oh/config.yaml` global config
|
|
19
|
+
*
|
|
20
|
+
* Mirrors Claude Code's `claude project purge` UX surface (--dry-run, --yes,
|
|
21
|
+
* default plan + confirm). `--all` and `--interactive` are deferred — openHarness
|
|
22
|
+
* has no project registry, so `--all` would need a session-cwd scan, and
|
|
23
|
+
* `--dry-run` already covers the spec for `--interactive`.
|
|
24
|
+
*/
|
|
25
|
+
import { existsSync, readdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs";
|
|
26
|
+
import { homedir } from "node:os";
|
|
27
|
+
import { join, resolve } from "node:path";
|
|
28
|
+
/**
|
|
29
|
+
* Path to the workspace-trust file. Resolved per-call so `OH_TRUST_FILE`
|
|
30
|
+
* env-var overrides (used by tests) take effect without re-importing.
|
|
31
|
+
*/
|
|
32
|
+
function trustFilePath() {
|
|
33
|
+
return process.env.OH_TRUST_FILE ?? join(homedir(), ".oh", "trusted-dirs.json");
|
|
34
|
+
}
|
|
35
|
+
/** Walk a directory and return the cumulative size in bytes. Errors swallowed. */
|
|
36
|
+
function dirSize(path) {
|
|
37
|
+
let total = 0;
|
|
38
|
+
try {
|
|
39
|
+
if (!existsSync(path))
|
|
40
|
+
return 0;
|
|
41
|
+
const stats = statSync(path);
|
|
42
|
+
if (stats.isFile())
|
|
43
|
+
return stats.size;
|
|
44
|
+
if (!stats.isDirectory())
|
|
45
|
+
return 0;
|
|
46
|
+
for (const entry of readdirSync(path)) {
|
|
47
|
+
total += dirSize(join(path, entry));
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
catch {
|
|
51
|
+
/* permission errors etc. — best-effort sizing */
|
|
52
|
+
}
|
|
53
|
+
return total;
|
|
54
|
+
}
|
|
55
|
+
/** Format bytes as a short human string (e.g. `1.2 MB`, `342 B`). */
|
|
56
|
+
export function formatBytes(bytes) {
|
|
57
|
+
if (bytes < 1024)
|
|
58
|
+
return `${bytes} B`;
|
|
59
|
+
if (bytes < 1024 * 1024)
|
|
60
|
+
return `${(bytes / 1024).toFixed(1)} KB`;
|
|
61
|
+
if (bytes < 1024 * 1024 * 1024)
|
|
62
|
+
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
|
|
63
|
+
return `${(bytes / (1024 * 1024 * 1024)).toFixed(2)} GB`;
|
|
64
|
+
}
|
|
65
|
+
/** Normalize a directory the same way `harness/trust.ts` does. Lowercase on Windows. */
|
|
66
|
+
function normalizeForTrust(dir) {
|
|
67
|
+
const abs = resolve(dir);
|
|
68
|
+
return process.platform === "win32" ? abs.toLowerCase() : abs;
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Build the list of things `purge` would delete, without touching the filesystem.
|
|
72
|
+
* Inspects the `.oh/` directory at `projectPath` and looks for a trust-store entry.
|
|
73
|
+
*/
|
|
74
|
+
export function planPurge(projectPath) {
|
|
75
|
+
const project = resolve(projectPath);
|
|
76
|
+
const ohDir = join(project, ".oh");
|
|
77
|
+
const entries = [];
|
|
78
|
+
if (existsSync(ohDir)) {
|
|
79
|
+
// Group sub-paths so the plan is informative without listing every file.
|
|
80
|
+
const knownChildren = [
|
|
81
|
+
{ rel: "config.yaml", label: "Project config (config.yaml)" },
|
|
82
|
+
{ rel: "RULES.md", label: "Project rules (RULES.md)" },
|
|
83
|
+
{ rel: "memory", label: "Memories (.oh/memory/)" },
|
|
84
|
+
{ rel: "skills", label: "Skills (.oh/skills/)" },
|
|
85
|
+
{ rel: "agents", label: "Agent roles (.oh/agents/)" },
|
|
86
|
+
{ rel: "output-styles", label: "Output styles (.oh/output-styles/)" },
|
|
87
|
+
{ rel: "plans", label: "Plans (.oh/plans/)" },
|
|
88
|
+
{ rel: "checkpoints", label: "Checkpoints (.oh/checkpoints/)" },
|
|
89
|
+
];
|
|
90
|
+
for (const child of knownChildren) {
|
|
91
|
+
const path = join(ohDir, child.rel);
|
|
92
|
+
if (existsSync(path)) {
|
|
93
|
+
entries.push({ path, label: child.label, bytes: dirSize(path), exists: true });
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
// Anything else under .oh/ that we didn't enumerate (export-*, etc.).
|
|
97
|
+
try {
|
|
98
|
+
const explicit = new Set(knownChildren.map((c) => c.rel));
|
|
99
|
+
for (const name of readdirSync(ohDir)) {
|
|
100
|
+
if (explicit.has(name))
|
|
101
|
+
continue;
|
|
102
|
+
const path = join(ohDir, name);
|
|
103
|
+
entries.push({
|
|
104
|
+
path,
|
|
105
|
+
label: `Other .oh/ entry (${name})`,
|
|
106
|
+
bytes: dirSize(path),
|
|
107
|
+
exists: true,
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
catch {
|
|
112
|
+
/* directory unreadable — caught later when we try to remove */
|
|
113
|
+
}
|
|
114
|
+
// Finally, add the .oh dir itself so it's removed after children are reported.
|
|
115
|
+
entries.push({ path: ohDir, label: ".oh/ directory", bytes: 0, exists: true });
|
|
116
|
+
}
|
|
117
|
+
// Workspace-trust entry, if any.
|
|
118
|
+
const trustFile = trustFilePath();
|
|
119
|
+
if (existsSync(trustFile)) {
|
|
120
|
+
try {
|
|
121
|
+
const raw = readFileSync(trustFile, "utf8");
|
|
122
|
+
const parsed = JSON.parse(raw);
|
|
123
|
+
if (Array.isArray(parsed.trusted)) {
|
|
124
|
+
const target = normalizeForTrust(project);
|
|
125
|
+
const isTrusted = parsed.trusted.some((p) => typeof p === "string" && normalizeForTrust(p) === target);
|
|
126
|
+
if (isTrusted) {
|
|
127
|
+
entries.push({
|
|
128
|
+
path: trustFile,
|
|
129
|
+
label: "Workspace-trust entry (~/.oh/trusted-dirs.json)",
|
|
130
|
+
bytes: 0,
|
|
131
|
+
exists: true,
|
|
132
|
+
jsonEdit: true,
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
catch {
|
|
138
|
+
/* malformed trust file — nothing to remove */
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
const totalBytes = entries.reduce((sum, e) => sum + e.bytes, 0);
|
|
142
|
+
return { projectPath: project, entries, totalBytes };
|
|
143
|
+
}
|
|
144
|
+
/** Render a plan as a multi-line string for display. */
|
|
145
|
+
export function formatPurgePlan(plan) {
|
|
146
|
+
const lines = [];
|
|
147
|
+
lines.push(`Purge plan for ${plan.projectPath}`);
|
|
148
|
+
lines.push("");
|
|
149
|
+
if (plan.entries.length === 0) {
|
|
150
|
+
lines.push(" (nothing to delete — no .oh/ directory and no trust entry)");
|
|
151
|
+
return lines.join("\n");
|
|
152
|
+
}
|
|
153
|
+
for (const entry of plan.entries) {
|
|
154
|
+
const size = entry.bytes > 0 ? ` [${formatBytes(entry.bytes)}]` : "";
|
|
155
|
+
lines.push(` - ${entry.label}${size}`);
|
|
156
|
+
}
|
|
157
|
+
lines.push("");
|
|
158
|
+
lines.push(`Total: ${plan.entries.length} target(s), ${formatBytes(plan.totalBytes)}`);
|
|
159
|
+
lines.push("");
|
|
160
|
+
lines.push("Not touched (global state): ~/.oh/sessions/, credentials, plugins,");
|
|
161
|
+
lines.push(" telemetry, traces, approvals.log, keybindings, global config.");
|
|
162
|
+
return lines.join("\n");
|
|
163
|
+
}
|
|
164
|
+
/** Execute the plan. Returns the count of successfully removed entries and any errors. */
|
|
165
|
+
export function executePurge(plan) {
|
|
166
|
+
let deleted = 0;
|
|
167
|
+
const errors = [];
|
|
168
|
+
for (const entry of plan.entries) {
|
|
169
|
+
if (entry.jsonEdit) {
|
|
170
|
+
// Trust entry — JSON edit, not file delete.
|
|
171
|
+
try {
|
|
172
|
+
const raw = readFileSync(entry.path, "utf8");
|
|
173
|
+
const parsed = JSON.parse(raw);
|
|
174
|
+
if (Array.isArray(parsed.trusted)) {
|
|
175
|
+
const target = normalizeForTrust(plan.projectPath);
|
|
176
|
+
const filtered = parsed.trusted.filter((p) => typeof p === "string" && normalizeForTrust(p) !== target);
|
|
177
|
+
writeFileSync(entry.path, JSON.stringify({ trusted: filtered }, null, 2));
|
|
178
|
+
deleted++;
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
catch (err) {
|
|
182
|
+
errors.push(`${entry.label}: ${err instanceof Error ? err.message : String(err)}`);
|
|
183
|
+
}
|
|
184
|
+
continue;
|
|
185
|
+
}
|
|
186
|
+
try {
|
|
187
|
+
if (existsSync(entry.path)) {
|
|
188
|
+
rmSync(entry.path, { recursive: true, force: true });
|
|
189
|
+
deleted++;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
catch (err) {
|
|
193
|
+
errors.push(`${entry.label}: ${err instanceof Error ? err.message : String(err)}`);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
return { deleted, errors };
|
|
197
|
+
}
|
|
198
|
+
//# sourceMappingURL=project-purge.js.map
|
package/dist/main.js
CHANGED
|
@@ -1111,6 +1111,62 @@ program
|
|
|
1111
1111
|
.action(async () => {
|
|
1112
1112
|
await runInitWizard({ exitOnDone: true });
|
|
1113
1113
|
});
|
|
1114
|
+
// ── project — per-project state management ──
|
|
1115
|
+
//
|
|
1116
|
+
// `oh project purge [path]` — delete all openHarness state for a project
|
|
1117
|
+
//
|
|
1118
|
+
// Mirrors Claude Code's `claude project purge`. Removes the entire `.oh/`
|
|
1119
|
+
// directory at the target path plus the workspace-trust entry (if any).
|
|
1120
|
+
// Sessions, credentials, plugins, telemetry, traces, and global config are
|
|
1121
|
+
// NOT touched — they're global-and-cross-project. Default UX prints the
|
|
1122
|
+
// deletion plan and asks for confirmation; --dry-run previews; --yes skips
|
|
1123
|
+
// the prompt. `--all` is deferred (openHarness has no project registry, so
|
|
1124
|
+
// "all projects" isn't well-defined without a session-cwd scan).
|
|
1125
|
+
const projectCmd = program.command("project").description("Manage per-project openHarness state");
|
|
1126
|
+
projectCmd
|
|
1127
|
+
.command("purge [path]")
|
|
1128
|
+
.description("Delete all openHarness state for a project (config, rules, memory, skills, agents, plans, checkpoints, trust entry). Sessions, credentials, plugins, telemetry, and global config are NOT touched. Defaults to the current directory.")
|
|
1129
|
+
.option("--dry-run", "Preview what would be deleted without touching the filesystem")
|
|
1130
|
+
.option("-y, --yes", "Skip the confirmation prompt")
|
|
1131
|
+
.action(async (pathArg, opts) => {
|
|
1132
|
+
const { planPurge, formatPurgePlan, executePurge } = await import("./harness/project-purge.js");
|
|
1133
|
+
const target = pathArg ?? process.cwd();
|
|
1134
|
+
if (!existsSync(target)) {
|
|
1135
|
+
process.stderr.write(`Error: path does not exist: ${target}\n`);
|
|
1136
|
+
process.exit(1);
|
|
1137
|
+
}
|
|
1138
|
+
const plan = planPurge(target);
|
|
1139
|
+
console.log(formatPurgePlan(plan));
|
|
1140
|
+
if (plan.entries.length === 0) {
|
|
1141
|
+
return;
|
|
1142
|
+
}
|
|
1143
|
+
if (opts.dryRun) {
|
|
1144
|
+
console.log("\n(dry-run — no files were deleted)");
|
|
1145
|
+
return;
|
|
1146
|
+
}
|
|
1147
|
+
if (!opts.yes) {
|
|
1148
|
+
const readline = await import("node:readline/promises");
|
|
1149
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
1150
|
+
try {
|
|
1151
|
+
const answer = (await rl.question("\nProceed with deletion? [y/N] ")).trim();
|
|
1152
|
+
if (!/^y(es)?$/i.test(answer)) {
|
|
1153
|
+
console.log("Aborted.");
|
|
1154
|
+
return;
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
finally {
|
|
1158
|
+
rl.close();
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
const result = executePurge(plan);
|
|
1162
|
+
console.log(`\nDeleted ${result.deleted} of ${plan.entries.length} target(s).`);
|
|
1163
|
+
if (result.errors.length > 0) {
|
|
1164
|
+
console.log(`${result.errors.length} error(s):`);
|
|
1165
|
+
for (const err of result.errors)
|
|
1166
|
+
console.log(` ⚠ ${err}`);
|
|
1167
|
+
process.exit(1);
|
|
1168
|
+
}
|
|
1169
|
+
});
|
|
1114
1170
|
// ── auth (audit B6) — provider-agnostic credential management ──
|
|
1115
1171
|
//
|
|
1116
1172
|
// `oh auth login [provider] --key <value>` — set API key for a provider
|
|
@@ -59,10 +59,9 @@ export const FileReadTool = {
|
|
|
59
59
|
return { output: `Error: ${filePath} is a directory, not a file.`, isError: true };
|
|
60
60
|
}
|
|
61
61
|
const ext = path.extname(filePath).toLowerCase();
|
|
62
|
-
// Image files: return as base64
|
|
62
|
+
// Image files: return as base64 (auto-downscaled if oversized)
|
|
63
63
|
if (IMAGE_EXTENSIONS.has(ext)) {
|
|
64
|
-
const
|
|
65
|
-
const base64 = buffer.toString("base64");
|
|
64
|
+
const raw = await fs.readFile(filePath);
|
|
66
65
|
const mimeTypes = {
|
|
67
66
|
".png": "image/png",
|
|
68
67
|
".jpg": "image/jpeg",
|
|
@@ -72,7 +71,11 @@ export const FileReadTool = {
|
|
|
72
71
|
".bmp": "image/bmp",
|
|
73
72
|
".svg": "image/svg+xml",
|
|
74
73
|
};
|
|
75
|
-
|
|
74
|
+
const mediaType = mimeTypes[ext] ?? "image/png";
|
|
75
|
+
const { downscaleIfLarge } = await import("../../utils/image-downscale.js");
|
|
76
|
+
const { buffer } = await downscaleIfLarge(raw, mediaType);
|
|
77
|
+
const base64 = buffer.toString("base64");
|
|
78
|
+
return { output: `__IMAGE__:${mediaType}:${base64}`, isError: false };
|
|
76
79
|
}
|
|
77
80
|
// PDF files: extract text per page (basic extraction)
|
|
78
81
|
if (ext === ".pdf") {
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import * as fs from "node:fs/promises";
|
|
2
2
|
import * as path from "node:path";
|
|
3
3
|
import { z } from "zod";
|
|
4
|
+
import { downscaleIfLarge } from "../../utils/image-downscale.js";
|
|
4
5
|
const SUPPORTED_TYPES = {
|
|
5
6
|
".png": "image/png",
|
|
6
7
|
".jpg": "image/jpeg",
|
|
@@ -37,7 +38,11 @@ export const ImageReadTool = {
|
|
|
37
38
|
};
|
|
38
39
|
}
|
|
39
40
|
try {
|
|
40
|
-
const
|
|
41
|
+
const raw = await fs.readFile(filePath);
|
|
42
|
+
// Auto-downscale to ≤2000px on the longest dimension. PDFs and
|
|
43
|
+
// missing-sharp installs pass through unchanged. Aspect + format
|
|
44
|
+
// preserved by sharp.
|
|
45
|
+
const { buffer } = await downscaleIfLarge(raw, mediaType);
|
|
41
46
|
const base64 = buffer.toString("base64");
|
|
42
47
|
return {
|
|
43
48
|
output: `${IMAGE_PREFIX}:${mediaType}:${base64}`,
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Image auto-downscale — bound the longest dimension to a fixed maximum
|
|
3
|
+
* before encoding the image as base64 for the model.
|
|
4
|
+
*
|
|
5
|
+
* Why: most providers reject or downsample images above ~1568-2048px on
|
|
6
|
+
* the longest side. Shipping a 4000px screenshot wastes input tokens, can
|
|
7
|
+
* exceed the request size limit, and historically broke the session
|
|
8
|
+
* outright when an oversized image landed in the conversation history.
|
|
9
|
+
*
|
|
10
|
+
* The function is a no-op for images already within bounds, for formats
|
|
11
|
+
* sharp doesn't process (PDF, SVG), and when sharp itself isn't installed
|
|
12
|
+
* (it's an `optionalDependency` so unsupported platforms still install).
|
|
13
|
+
* Any sharp error returns the original buffer unchanged — we never break a
|
|
14
|
+
* tool call over a downscale failure.
|
|
15
|
+
*/
|
|
16
|
+
/** @internal Test-only reset of the lazy sharp cache. */
|
|
17
|
+
export declare function _resetSharpCacheForTest(): void;
|
|
18
|
+
export type DownscaleResult = {
|
|
19
|
+
/** The (possibly resized) buffer to encode. */
|
|
20
|
+
buffer: Buffer;
|
|
21
|
+
/** True if a resize actually happened; false for passthrough. */
|
|
22
|
+
downscaled: boolean;
|
|
23
|
+
/** Set when sharp wasn't available — caller may want to surface a one-time hint. */
|
|
24
|
+
reason?: "sharp-unavailable" | "unsupported-format" | "within-bounds" | "sharp-error";
|
|
25
|
+
};
|
|
26
|
+
/**
|
|
27
|
+
* Downscale `buffer` so its longest dimension is ≤ `maxDimension` (default 2000).
|
|
28
|
+
* Aspect ratio preserved. Format preserved (PNG stays PNG, JPEG stays JPEG, etc.).
|
|
29
|
+
*
|
|
30
|
+
* Pure pass-through for: PDF, SVG, BMP (sharp doesn't handle reliably),
|
|
31
|
+
* already-small images, missing sharp, and any sharp error.
|
|
32
|
+
*/
|
|
33
|
+
export declare function downscaleIfLarge(buffer: Buffer, mediaType: string, maxDimension?: number): Promise<DownscaleResult>;
|
|
34
|
+
//# sourceMappingURL=image-downscale.d.ts.map
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Image auto-downscale — bound the longest dimension to a fixed maximum
|
|
3
|
+
* before encoding the image as base64 for the model.
|
|
4
|
+
*
|
|
5
|
+
* Why: most providers reject or downsample images above ~1568-2048px on
|
|
6
|
+
* the longest side. Shipping a 4000px screenshot wastes input tokens, can
|
|
7
|
+
* exceed the request size limit, and historically broke the session
|
|
8
|
+
* outright when an oversized image landed in the conversation history.
|
|
9
|
+
*
|
|
10
|
+
* The function is a no-op for images already within bounds, for formats
|
|
11
|
+
* sharp doesn't process (PDF, SVG), and when sharp itself isn't installed
|
|
12
|
+
* (it's an `optionalDependency` so unsupported platforms still install).
|
|
13
|
+
* Any sharp error returns the original buffer unchanged — we never break a
|
|
14
|
+
* tool call over a downscale failure.
|
|
15
|
+
*/
|
|
16
|
+
const DEFAULT_MAX_DIMENSION = 2000;
|
|
17
|
+
const SHARP_SUPPORTED_TYPES = new Set([
|
|
18
|
+
"image/png",
|
|
19
|
+
"image/jpeg",
|
|
20
|
+
"image/jpg",
|
|
21
|
+
"image/gif",
|
|
22
|
+
"image/webp",
|
|
23
|
+
"image/avif",
|
|
24
|
+
"image/tiff",
|
|
25
|
+
]);
|
|
26
|
+
let _sharpModule;
|
|
27
|
+
/** Lazy-load sharp; cache the result so we don't pay the import cost per image. */
|
|
28
|
+
async function getSharp() {
|
|
29
|
+
if (_sharpModule !== undefined)
|
|
30
|
+
return _sharpModule;
|
|
31
|
+
try {
|
|
32
|
+
const mod = (await import("sharp"));
|
|
33
|
+
_sharpModule = (mod.default ?? mod);
|
|
34
|
+
return _sharpModule;
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
_sharpModule = null;
|
|
38
|
+
return null;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
/** @internal Test-only reset of the lazy sharp cache. */
|
|
42
|
+
export function _resetSharpCacheForTest() {
|
|
43
|
+
_sharpModule = undefined;
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Downscale `buffer` so its longest dimension is ≤ `maxDimension` (default 2000).
|
|
47
|
+
* Aspect ratio preserved. Format preserved (PNG stays PNG, JPEG stays JPEG, etc.).
|
|
48
|
+
*
|
|
49
|
+
* Pure pass-through for: PDF, SVG, BMP (sharp doesn't handle reliably),
|
|
50
|
+
* already-small images, missing sharp, and any sharp error.
|
|
51
|
+
*/
|
|
52
|
+
export async function downscaleIfLarge(buffer, mediaType, maxDimension = DEFAULT_MAX_DIMENSION) {
|
|
53
|
+
if (!SHARP_SUPPORTED_TYPES.has(mediaType)) {
|
|
54
|
+
return { buffer, downscaled: false, reason: "unsupported-format" };
|
|
55
|
+
}
|
|
56
|
+
const sharp = await getSharp();
|
|
57
|
+
if (!sharp) {
|
|
58
|
+
return { buffer, downscaled: false, reason: "sharp-unavailable" };
|
|
59
|
+
}
|
|
60
|
+
try {
|
|
61
|
+
const pipeline = sharp(buffer);
|
|
62
|
+
const meta = await pipeline.metadata();
|
|
63
|
+
const w = meta.width ?? 0;
|
|
64
|
+
const h = meta.height ?? 0;
|
|
65
|
+
if (w === 0 || h === 0) {
|
|
66
|
+
// Animated GIFs can report 0 here; pass through rather than mangle.
|
|
67
|
+
return { buffer, downscaled: false, reason: "unsupported-format" };
|
|
68
|
+
}
|
|
69
|
+
if (Math.max(w, h) <= maxDimension) {
|
|
70
|
+
return { buffer, downscaled: false, reason: "within-bounds" };
|
|
71
|
+
}
|
|
72
|
+
// `fit: "inside"` + `withoutEnlargement: true` resizes proportionally
|
|
73
|
+
// so the longest side equals maxDimension, with no upscaling.
|
|
74
|
+
const out = await pipeline
|
|
75
|
+
.resize({
|
|
76
|
+
width: maxDimension,
|
|
77
|
+
height: maxDimension,
|
|
78
|
+
fit: "inside",
|
|
79
|
+
withoutEnlargement: true,
|
|
80
|
+
})
|
|
81
|
+
.toBuffer();
|
|
82
|
+
return { buffer: out, downscaled: true };
|
|
83
|
+
}
|
|
84
|
+
catch {
|
|
85
|
+
// Corrupt image, unsupported subformat, etc. — never fail the tool over this.
|
|
86
|
+
return { buffer, downscaled: false, reason: "sharp-error" };
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
//# sourceMappingURL=image-downscale.js.map
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zhijiewang/openharness",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.31.0",
|
|
4
4
|
"description": "Open-source terminal coding agent. Works with any LLM.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -63,7 +63,6 @@
|
|
|
63
63
|
"@types/react": "^18.3.0",
|
|
64
64
|
"c8": "^11.0.0",
|
|
65
65
|
"husky": "^9.1.7",
|
|
66
|
-
"sharp": "^0.34.5",
|
|
67
66
|
"tsx": "^4.19.0",
|
|
68
67
|
"typescript": "^5.8.0"
|
|
69
68
|
},
|
|
@@ -92,6 +91,7 @@
|
|
|
92
91
|
},
|
|
93
92
|
"homepage": "https://github.com/zhijiewong/openharness#readme",
|
|
94
93
|
"optionalDependencies": {
|
|
95
|
-
"@napi-rs/keyring": "^1.2.0"
|
|
94
|
+
"@napi-rs/keyring": "^1.2.0",
|
|
95
|
+
"sharp": "^0.34.5"
|
|
96
96
|
}
|
|
97
97
|
}
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Sandbox — filesystem and network restrictions for tool execution.
|
|
3
|
-
*
|
|
4
|
-
* Limits what tools can access:
|
|
5
|
-
* - File tools: only write to allowed paths
|
|
6
|
-
* - Web tools: only access allowed domains
|
|
7
|
-
* - Bash: restricted commands (no curl/wget by default)
|
|
8
|
-
*
|
|
9
|
-
* Reduces permission prompts while maintaining security.
|
|
10
|
-
*/
|
|
11
|
-
export type SandboxConfig = {
|
|
12
|
-
enabled: boolean;
|
|
13
|
-
/** Paths tools can write to (glob-style, relative to cwd) */
|
|
14
|
-
allowedPaths: string[];
|
|
15
|
-
/** Domains WebFetch/WebSearch can access */
|
|
16
|
-
allowedDomains: string[];
|
|
17
|
-
/** Block all network access */
|
|
18
|
-
blockNetwork: boolean;
|
|
19
|
-
/** Commands blocked in Bash (default: curl, wget) */
|
|
20
|
-
blockedCommands: string[];
|
|
21
|
-
};
|
|
22
|
-
/** Get the current sandbox config */
|
|
23
|
-
export declare function getSandboxConfig(): SandboxConfig;
|
|
24
|
-
/** Reset cached config */
|
|
25
|
-
export declare function invalidateSandboxCache(): void;
|
|
26
|
-
/** Check if a file path is allowed for writing */
|
|
27
|
-
export declare function isPathAllowed(filePath: string): boolean;
|
|
28
|
-
/** Check if a domain is allowed for network access */
|
|
29
|
-
export declare function isDomainAllowed(url: string): boolean;
|
|
30
|
-
/** Check if a bash command is allowed */
|
|
31
|
-
export declare function isCommandAllowed(command: string): boolean;
|
|
32
|
-
/** Get a human-readable sandbox status */
|
|
33
|
-
export declare function sandboxStatus(): string;
|
|
34
|
-
//# sourceMappingURL=sandbox.d.ts.map
|
package/dist/harness/sandbox.js
DELETED
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Sandbox — filesystem and network restrictions for tool execution.
|
|
3
|
-
*
|
|
4
|
-
* Limits what tools can access:
|
|
5
|
-
* - File tools: only write to allowed paths
|
|
6
|
-
* - Web tools: only access allowed domains
|
|
7
|
-
* - Bash: restricted commands (no curl/wget by default)
|
|
8
|
-
*
|
|
9
|
-
* Reduces permission prompts while maintaining security.
|
|
10
|
-
*/
|
|
11
|
-
import { relative, resolve } from "node:path";
|
|
12
|
-
import { readOhConfig } from "./config.js";
|
|
13
|
-
const DEFAULT_SANDBOX = {
|
|
14
|
-
enabled: false,
|
|
15
|
-
allowedPaths: ["."], // current directory
|
|
16
|
-
allowedDomains: [], // empty = all allowed
|
|
17
|
-
blockNetwork: false,
|
|
18
|
-
blockedCommands: ["curl", "wget"],
|
|
19
|
-
};
|
|
20
|
-
// ── Sandbox Manager ──
|
|
21
|
-
let _config = null;
|
|
22
|
-
/** Get the current sandbox config */
|
|
23
|
-
export function getSandboxConfig() {
|
|
24
|
-
if (_config)
|
|
25
|
-
return _config;
|
|
26
|
-
const ohConfig = readOhConfig();
|
|
27
|
-
if (ohConfig?.sandbox) {
|
|
28
|
-
_config = {
|
|
29
|
-
...DEFAULT_SANDBOX,
|
|
30
|
-
...ohConfig.sandbox,
|
|
31
|
-
};
|
|
32
|
-
}
|
|
33
|
-
else {
|
|
34
|
-
_config = DEFAULT_SANDBOX;
|
|
35
|
-
}
|
|
36
|
-
return _config;
|
|
37
|
-
}
|
|
38
|
-
/** Reset cached config */
|
|
39
|
-
export function invalidateSandboxCache() {
|
|
40
|
-
_config = null;
|
|
41
|
-
}
|
|
42
|
-
/** Check if a file path is allowed for writing */
|
|
43
|
-
export function isPathAllowed(filePath) {
|
|
44
|
-
const config = getSandboxConfig();
|
|
45
|
-
if (!config.enabled)
|
|
46
|
-
return true;
|
|
47
|
-
const resolved = resolve(filePath);
|
|
48
|
-
const cwd = process.cwd();
|
|
49
|
-
for (const allowed of config.allowedPaths) {
|
|
50
|
-
const allowedResolved = resolve(cwd, allowed);
|
|
51
|
-
// Check if the file is within the allowed directory
|
|
52
|
-
const rel = relative(allowedResolved, resolved);
|
|
53
|
-
if (!rel.startsWith("..") && !rel.startsWith("/"))
|
|
54
|
-
return true;
|
|
55
|
-
}
|
|
56
|
-
return false;
|
|
57
|
-
}
|
|
58
|
-
/** Check if a domain is allowed for network access */
|
|
59
|
-
export function isDomainAllowed(url) {
|
|
60
|
-
const config = getSandboxConfig();
|
|
61
|
-
if (!config.enabled)
|
|
62
|
-
return true;
|
|
63
|
-
if (config.blockNetwork)
|
|
64
|
-
return false;
|
|
65
|
-
if (config.allowedDomains.length === 0)
|
|
66
|
-
return true;
|
|
67
|
-
try {
|
|
68
|
-
const hostname = new URL(url).hostname.toLowerCase();
|
|
69
|
-
return config.allowedDomains.some((d) => hostname === d.toLowerCase() || hostname.endsWith(`.${d.toLowerCase()}`));
|
|
70
|
-
}
|
|
71
|
-
catch {
|
|
72
|
-
return false;
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
/** Check if a bash command is allowed */
|
|
76
|
-
export function isCommandAllowed(command) {
|
|
77
|
-
const config = getSandboxConfig();
|
|
78
|
-
if (!config.enabled)
|
|
79
|
-
return true;
|
|
80
|
-
const firstWord = command.trim().split(/\s+/)[0]?.toLowerCase() ?? "";
|
|
81
|
-
return !config.blockedCommands.includes(firstWord);
|
|
82
|
-
}
|
|
83
|
-
/** Get a human-readable sandbox status */
|
|
84
|
-
export function sandboxStatus() {
|
|
85
|
-
const config = getSandboxConfig();
|
|
86
|
-
if (!config.enabled)
|
|
87
|
-
return "Sandbox: disabled";
|
|
88
|
-
const lines = ["Sandbox: enabled"];
|
|
89
|
-
lines.push(` Allowed paths: ${config.allowedPaths.join(", ") || "none"}`);
|
|
90
|
-
if (config.blockNetwork) {
|
|
91
|
-
lines.push(" Network: blocked");
|
|
92
|
-
}
|
|
93
|
-
else if (config.allowedDomains.length > 0) {
|
|
94
|
-
lines.push(` Allowed domains: ${config.allowedDomains.join(", ")}`);
|
|
95
|
-
}
|
|
96
|
-
else {
|
|
97
|
-
lines.push(" Network: unrestricted");
|
|
98
|
-
}
|
|
99
|
-
if (config.blockedCommands.length > 0) {
|
|
100
|
-
lines.push(` Blocked commands: ${config.blockedCommands.join(", ")}`);
|
|
101
|
-
}
|
|
102
|
-
return lines.join("\n");
|
|
103
|
-
}
|
|
104
|
-
//# sourceMappingURL=sandbox.js.map
|