@zhijiewang/openharness 2.14.0 → 2.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +2 -0
  2. package/dist/main.js +146 -0
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -57,6 +57,8 @@ oh
57
57
 
58
58
  That's it. OpenHarness auto-detects Ollama and starts chatting. No API key needed.
59
59
 
60
+ **Python SDK:** there's also an official Python SDK for driving `oh` from Python programs (notebooks, batch scripts, ML pipelines). Install with `pip install openharness` after the npm install, then `from openharness import query`. See [`python/README.md`](python/README.md).
61
+
60
62
  ```bash
61
63
  oh init # interactive setup wizard (provider + cybergotchi)
62
64
  oh # auto-detect local model
package/dist/main.js CHANGED
@@ -230,7 +230,21 @@ program
230
230
  console.log(JSON.stringify({ type: "error", message: event.message }));
231
231
  }
232
232
  }
233
+ else if (event.type === "cost_update") {
234
+ if (outputFormat === "stream-json") {
235
+ console.log(JSON.stringify({
236
+ type: "cost_update",
237
+ inputTokens: event.inputTokens,
238
+ outputTokens: event.outputTokens,
239
+ cost: event.cost,
240
+ model: event.model,
241
+ }));
242
+ }
243
+ }
233
244
  else if (event.type === "turn_complete") {
245
+ if (outputFormat === "stream-json") {
246
+ console.log(JSON.stringify({ type: "turn_complete", reason: event.reason }));
247
+ }
234
248
  if (event.reason !== "completed") {
235
249
  process.exitCode = 1;
236
250
  }
@@ -243,6 +257,138 @@ program
243
257
  process.stdout.write("\n");
244
258
  }
245
259
  });
260
+ // ── `oh session`: long-lived stateful session for the Python SDK ──
261
+ program
262
+ .command("session")
263
+ .description("Long-lived session: read JSON prompts from stdin, stream NDJSON events on stdout (for the Python SDK)")
264
+ .option("-m, --model <model>", "Model to use")
265
+ .addOption(new Option("--permission-mode <mode>", "Permission mode")
266
+ .choices(["ask", "trust", "deny", "acceptEdits", "plan", "auto", "bypassPermissions"])
267
+ .default("trust"))
268
+ .option("--allowed-tools <tools>", "Comma-separated allowed tool names")
269
+ .option("--disallowed-tools <tools>", "Comma-separated disallowed tool names")
270
+ .option("--max-turns <n>", "Maximum turns per prompt", "20")
271
+ .option("--system-prompt <prompt>", "Override the system prompt")
272
+ .action(async (opts) => {
273
+ const savedConfig = readOhConfig();
274
+ const permissionMode = (opts.permissionMode ??
275
+ savedConfig?.permissionMode ??
276
+ "trust");
277
+ const { createProvider } = await import("./providers/index.js");
278
+ const effectiveModel = opts.model ?? savedConfig?.model;
279
+ const overrides = {};
280
+ if (savedConfig?.apiKey)
281
+ overrides.apiKey = savedConfig.apiKey;
282
+ if (savedConfig?.baseUrl)
283
+ overrides.baseUrl = savedConfig.baseUrl;
284
+ const { provider, model } = await createProvider(effectiveModel, Object.keys(overrides).length ? overrides : undefined);
285
+ const { query } = await import("./query.js");
286
+ const { createAssistantMessage, createToolResultMessage, createUserMessage } = await import("./types/message.js");
287
+ let tools = getAllTools();
288
+ if (opts.allowedTools) {
289
+ const allowed = new Set(opts.allowedTools.split(",").map((s) => s.trim()));
290
+ tools = tools.filter((t) => allowed.has(t.name));
291
+ }
292
+ if (opts.disallowedTools) {
293
+ const disallowed = new Set(opts.disallowedTools.split(",").map((s) => s.trim()));
294
+ tools = tools.filter((t) => !disallowed.has(t.name));
295
+ }
296
+ const systemPrompt = opts.systemPrompt ?? buildSystemPrompt(model);
297
+ const config = {
298
+ provider,
299
+ tools,
300
+ systemPrompt,
301
+ permissionMode,
302
+ maxTurns: parseInt(opts.maxTurns, 10),
303
+ model,
304
+ };
305
+ // Conversation history, shared across all prompts for this process.
306
+ const conversation = [];
307
+ // Announce readiness so the client can send the first prompt.
308
+ console.log(JSON.stringify({ type: "ready" }));
309
+ const readline = await import("node:readline");
310
+ const rl = readline.createInterface({ input: process.stdin, crlfDelay: Infinity });
311
+ for await (const rawLine of rl) {
312
+ const line = rawLine.trim();
313
+ if (!line)
314
+ continue;
315
+ let request;
316
+ try {
317
+ request = JSON.parse(line);
318
+ }
319
+ catch {
320
+ console.log(JSON.stringify({ id: "", type: "error", message: "invalid JSON on stdin" }));
321
+ continue;
322
+ }
323
+ if (request.command === "exit")
324
+ break;
325
+ const id = request.id ?? "";
326
+ const prompt = request.prompt;
327
+ if (!id || !prompt) {
328
+ console.log(JSON.stringify({ id, type: "error", message: "missing 'id' or 'prompt' field" }));
329
+ continue;
330
+ }
331
+ // Accumulate this turn's assistant output so we can push a full message at the end.
332
+ let assistantText = "";
333
+ const turnToolCalls = [];
334
+ const callIdToName = {};
335
+ const toolResults = [];
336
+ for await (const event of query(prompt, config, conversation)) {
337
+ if (event.type === "text_delta") {
338
+ assistantText += event.content;
339
+ console.log(JSON.stringify({ id, type: "text", content: event.content }));
340
+ }
341
+ else if (event.type === "tool_call_start") {
342
+ callIdToName[event.callId] = event.toolName;
343
+ console.log(JSON.stringify({ id, type: "tool_start", tool: event.toolName }));
344
+ }
345
+ else if (event.type === "tool_call_complete") {
346
+ turnToolCalls.push({
347
+ id: event.callId,
348
+ toolName: callIdToName[event.callId] ?? event.callId,
349
+ arguments: event.arguments,
350
+ });
351
+ }
352
+ else if (event.type === "tool_call_end") {
353
+ toolResults.push({ callId: event.callId, output: event.output, isError: event.isError });
354
+ console.log(JSON.stringify({
355
+ id,
356
+ type: "tool_end",
357
+ tool: callIdToName[event.callId],
358
+ output: event.output,
359
+ error: event.isError,
360
+ }));
361
+ }
362
+ else if (event.type === "error") {
363
+ console.log(JSON.stringify({ id, type: "error", message: event.message }));
364
+ }
365
+ else if (event.type === "cost_update") {
366
+ console.log(JSON.stringify({
367
+ id,
368
+ type: "cost_update",
369
+ inputTokens: event.inputTokens,
370
+ outputTokens: event.outputTokens,
371
+ cost: event.cost,
372
+ model: event.model,
373
+ }));
374
+ }
375
+ else if (event.type === "turn_complete") {
376
+ console.log(JSON.stringify({ id, type: "turn_complete", reason: event.reason }));
377
+ }
378
+ }
379
+ // Rebuild this turn's contribution to the conversation.
380
+ // The pattern mirrors query()'s internal accumulation at
381
+ // src/query/index.ts:119 (user msg pushed before turn) and 344 (assistant
382
+ // msg with tool calls pushed after each turn) — see the spec for detail.
383
+ conversation.push(createUserMessage(prompt));
384
+ if (assistantText || turnToolCalls.length > 0) {
385
+ conversation.push(createAssistantMessage(assistantText, turnToolCalls.length > 0 ? turnToolCalls : undefined));
386
+ }
387
+ for (const tr of toolResults) {
388
+ conversation.push(createToolResultMessage({ callId: tr.callId, output: tr.output, isError: tr.isError }));
389
+ }
390
+ }
391
+ });
246
392
  // ── Default command: just run `openharness` to start chatting ──
247
393
  program
248
394
  .command("chat", { isDefault: true })
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@zhijiewang/openharness",
3
- "version": "2.14.0",
3
+ "version": "2.15.0",
4
4
  "description": "Open-source terminal coding agent. Works with any LLM.",
5
5
  "type": "module",
6
6
  "bin": {