@researchcomputer/pista 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,110 @@
1
+ # Pista
2
+
3
+ A full-featured terminal coding agent powered by [`@researchcomputer/agents-sdk`](https://www.npmjs.com/package/@researchcomputer/agents-sdk). Pista provides a rich Ink-based TUI with:
4
+
5
+ - Live assistant streaming
6
+ - Slash commands
7
+ - Interactive permission prompts and agent questions
8
+ - Custom OpenAI-compatible endpoints
9
+ - Persistent local sessions and memory
10
+ - Transcript scrollback with `PgUp` / `PgDn`
11
+
12
+ Sessions and memory are stored under `~/.pista/`.
13
+ Preferences are layered: global defaults live under `~/.pista/preferences.json`, and per-project overrides live under `<project>/.pista/preferences.json`.
14
+ The same preferences files can also carry resolved `skills` entries, which are forwarded to the SDK on agent creation.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ npm install -g @researchcomputer/pista
20
+ ```
21
+
22
+ ## Usage
23
+
24
+ ```bash
25
+ pista
26
+ ```
27
+
28
+ Or pass options:
29
+
30
+ ```bash
31
+ pista --model gpt-4o-mini --cwd .
32
+ ```
33
+
34
+ Rename the agent at startup:
35
+
36
+ ```bash
37
+ pista --name Mochi
38
+ ```
39
+
40
+ Use a custom OpenAI-compatible endpoint:
41
+
42
+ ```bash
43
+ export MY_LLM_KEY=...
44
+ pista \
45
+ --endpoint https://my-endpoint.example.com/v1 \
46
+ --model my-model \
47
+ --api-style chat
48
+ ```
49
+
50
+ ## Environment Variables
51
+
52
+ Set your provider credentials as usual:
53
+
54
+ ```bash
55
+ export OPENAI_API_KEY=...
56
+ ```
57
+
58
+ Optional:
59
+
60
+ - `PISTA_MODEL`
61
+ - `PISTA_NAME`
62
+ - `PISTA_ENDPOINT`
63
+ - `PISTA_API_STYLE`
64
+ - `PISTA_CWD`
65
+ - `PISTA_SESSION_ID`
66
+ - `PISTA_PERMISSION_MODE`
67
+ - `PISTA_THINKING`
68
+
69
+ ## In-App Commands
70
+
71
+ - `/help` shows commands
72
+ - `/name` changes the displayed agent name
73
+ - `/model` picks a model or edits the custom model id
74
+ - `/endpoint` edits the OpenAI-compatible base URL
75
+ - `/api` switches between chat and responses API styles
76
+ - `/jump latest|error|tool` moves the transcript to useful checkpoints
77
+ - `/permissions` changes how tool calls are approved
78
+ - `/thinking` changes reasoning intensity
79
+ - `/session new` starts a fresh session
80
+ - `/abort` aborts the current run
81
+
82
+ Model, endpoint, and API style selections persist across restarts, with project-level preferences overriding global defaults.
83
+
84
+ ## Skills
85
+
86
+ Resolved skills can be stored in either preferences file:
87
+
88
+ ```json
89
+ {
90
+ "skills": [
91
+ {
92
+ "id": "typescript",
93
+ "description": "TypeScript house style",
94
+ "promptSections": [
95
+ "Prefer explicit return types on exported functions.",
96
+ "Avoid `any` unless there is no safer boundary type."
97
+ ]
98
+ }
99
+ ]
100
+ }
101
+ ```
102
+
103
+ Skills are layered the same way as other preferences and passed through to the SDK on agent creation.
104
+
105
+ ## Keyboard Shortcuts
106
+
107
+ - `Enter` sends the current draft
108
+ - `Shift+Enter` or `Ctrl+J` inserts a newline in the composer
109
+ - `Up` / `Down` cycle through recent drafts
110
+ - `PgUp` / `PgDn` scroll the transcript
@@ -0,0 +1,119 @@
1
+ import { stringifyPreview } from './utils.js';
2
+ export function handleAgentEvent(event, controls) {
3
+ switch (event.type) {
4
+ case 'agent_start':
5
+ controls.setRunning(true);
6
+ controls.setStatus('Starting run');
7
+ return;
8
+ case 'agent_end':
9
+ controls.setRunning(false);
10
+ controls.setPendingTools([]);
11
+ controls.setStatus('Idle');
12
+ return;
13
+ case 'turn_start':
14
+ controls.setStatus('Thinking');
15
+ return;
16
+ case 'turn_end':
17
+ controls.setStatus('Synthesizing response');
18
+ return;
19
+ case 'message_update':
20
+ if (event.assistantMessageEvent.type === 'text_delta') {
21
+ controls.appendLiveAssistantDelta(event.assistantMessageEvent.delta);
22
+ }
23
+ if (event.assistantMessageEvent.type === 'thinking_delta') {
24
+ controls.setStatus('Thinking');
25
+ }
26
+ return;
27
+ case 'message_end':
28
+ if (event.message.role === 'assistant') {
29
+ const finalText = summarizeAssistantMessage(event.message);
30
+ const body = (finalText || controls.getLiveAssistantText()).trim();
31
+ if (body) {
32
+ controls.appendLog('assistant', controls.getAgentName(), body);
33
+ }
34
+ controls.clearLiveAssistant();
35
+ }
36
+ return;
37
+ case 'tool_execution_start':
38
+ controls.setPendingTools((current) => [...current, event.toolName]);
39
+ controls.setStatus(`Running ${event.toolName}`);
40
+ controls.appendLog('tool', event.toolName, condensedArgPreview(event.toolName, event.args));
41
+ return;
42
+ case 'tool_execution_update':
43
+ controls.setStatus(`Running ${event.toolName}`);
44
+ return;
45
+ case 'tool_execution_end':
46
+ controls.setPendingTools((current) => {
47
+ let removed = false;
48
+ return current.filter((name) => {
49
+ if (!removed && name === event.toolName) {
50
+ removed = true;
51
+ return false;
52
+ }
53
+ return true;
54
+ });
55
+ });
56
+ controls.setStatus('Thinking');
57
+ controls.appendLog(event.isError ? 'error' : 'tool', `${event.toolName} ${event.isError ? 'failed' : 'completed'}`, summarizeToolResult(event.result));
58
+ return;
59
+ default:
60
+ return;
61
+ }
62
+ }
63
+ function summarizeAssistantMessage(message) {
64
+ if (message.role !== 'assistant')
65
+ return '';
66
+ const text = message.content
67
+ .map((item) => (item.type === 'text' ? item.text : ''))
68
+ .join('');
69
+ if (text.trim())
70
+ return text.trim();
71
+ if (message.errorMessage)
72
+ return `[${message.stopReason}] ${message.errorMessage}`;
73
+ const toolNames = message.content
74
+ .map((item) => (item.type === 'toolCall' ? item.name : ''))
75
+ .filter(Boolean);
76
+ return toolNames.length > 0 ? `[requested tools: ${toolNames.join(', ')}]` : '';
77
+ }
78
+ function summarizeToolResult(result) {
79
+ if (!result || typeof result !== 'object')
80
+ return '[no tool output]';
81
+ const content = result.content;
82
+ if (Array.isArray(content)) {
83
+ const text = content
84
+ .filter((item) => {
85
+ return Boolean(item) && typeof item === 'object' && item.type === 'text';
86
+ })
87
+ .map((item) => item.text)
88
+ .join('\n')
89
+ .trim();
90
+ if (text)
91
+ return truncate(text, 240);
92
+ }
93
+ if ('details' in result) {
94
+ return stringifyPreview(result.details, 240);
95
+ }
96
+ return '[no tool output]';
97
+ }
98
+ function truncate(value, maxLength) {
99
+ if (value.length <= maxLength)
100
+ return value;
101
+ if (maxLength <= 3)
102
+ return value.slice(0, maxLength);
103
+ return `${value.slice(0, maxLength - 3)}...`;
104
+ }
105
+ function condensedArgPreview(_toolName, args) {
106
+ if (!args || typeof args !== 'object')
107
+ return stringifyPreview(args, 120);
108
+ const obj = args;
109
+ // Show the most meaningful field for common tools
110
+ if (obj.file_path && typeof obj.file_path === 'string')
111
+ return obj.file_path;
112
+ if (obj.command && typeof obj.command === 'string')
113
+ return truncate(String(obj.command), 120);
114
+ if (obj.pattern && typeof obj.pattern === 'string')
115
+ return `pattern: ${obj.pattern}`;
116
+ if (obj.url && typeof obj.url === 'string')
117
+ return obj.url;
118
+ return stringifyPreview(args, 120);
119
+ }
@@ -0,0 +1,67 @@
1
+ import test from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import { handleAgentEvent } from './agent-events.js';
4
+ function createControls(overrides = {}) {
5
+ return {
6
+ getAgentName: () => 'Pista',
7
+ getLiveAssistantText: () => '',
8
+ appendLog: () => undefined,
9
+ appendLiveAssistantDelta: () => undefined,
10
+ clearLiveAssistant: () => undefined,
11
+ setPendingTools: () => undefined,
12
+ setRunning: () => undefined,
13
+ setStatus: () => undefined,
14
+ ...overrides,
15
+ };
16
+ }
17
+ test('text deltas are forwarded through the buffered live assistant path', () => {
18
+ const deltas = [];
19
+ handleAgentEvent({
20
+ type: 'message_update',
21
+ assistantMessageEvent: {
22
+ type: 'text_delta',
23
+ delta: 'hello',
24
+ },
25
+ }, createControls({
26
+ appendLiveAssistantDelta: (delta) => {
27
+ deltas.push(delta);
28
+ },
29
+ }));
30
+ assert.deepEqual(deltas, ['hello']);
31
+ });
32
+ test('assistant message end logs the final text and clears the live buffer', () => {
33
+ const logs = [];
34
+ let cleared = 0;
35
+ handleAgentEvent({
36
+ type: 'message_end',
37
+ message: {
38
+ role: 'assistant',
39
+ content: [{ type: 'text', text: 'final answer' }],
40
+ },
41
+ }, createControls({
42
+ appendLog: (kind, title, body) => {
43
+ logs.push({ kind, title, body });
44
+ },
45
+ clearLiveAssistant: () => {
46
+ cleared += 1;
47
+ },
48
+ }));
49
+ assert.deepEqual(logs, [{ kind: 'assistant', title: 'Pista', body: 'final answer' }]);
50
+ assert.equal(cleared, 1);
51
+ });
52
+ test('assistant message end falls back to buffered live text when needed', () => {
53
+ const logs = [];
54
+ handleAgentEvent({
55
+ type: 'message_end',
56
+ message: {
57
+ role: 'assistant',
58
+ content: [],
59
+ },
60
+ }, createControls({
61
+ getLiveAssistantText: () => 'buffered answer',
62
+ appendLog: (kind, title, body) => {
63
+ logs.push({ kind, title, body });
64
+ },
65
+ }));
66
+ assert.deepEqual(logs, [{ kind: 'assistant', title: 'Pista', body: 'buffered answer' }]);
67
+ });