bloby-bot 0.47.7 → 0.47.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -9,6 +9,12 @@
|
|
|
9
9
|
export interface AsyncQueue<T> extends AsyncIterable<T> {
|
|
10
10
|
push(item: T): void;
|
|
11
11
|
end(): void;
|
|
12
|
+
/**
|
|
13
|
+
* Non-blocking drain: return every item currently buffered without waiting
|
|
14
|
+
* for a new one. Used by the session to fold mid-turn user messages into
|
|
15
|
+
* the model's history without breaking the outer `for await` consumer.
|
|
16
|
+
*/
|
|
17
|
+
drainPending(): T[];
|
|
12
18
|
}
|
|
13
19
|
|
|
14
20
|
export function createAsyncQueue<T>(): AsyncQueue<T> {
|
|
@@ -30,6 +36,10 @@ export function createAsyncQueue<T>(): AsyncQueue<T> {
|
|
|
30
36
|
done = true;
|
|
31
37
|
if (resolve) resolve({ value: undefined as any, done: true });
|
|
32
38
|
},
|
|
39
|
+
drainPending() {
|
|
40
|
+
if (pending.length === 0) return [];
|
|
41
|
+
return pending.splice(0, pending.length);
|
|
42
|
+
},
|
|
33
43
|
[Symbol.asyncIterator]() {
|
|
34
44
|
return {
|
|
35
45
|
next(): Promise<IteratorResult<T>> {
|
|
@@ -82,6 +82,26 @@ function formatConversationHistory(messages: RecentMessage[]): string {
|
|
|
82
82
|
return messages.map((m) => `${m.role}: ${m.content}`).join('\n\n');
|
|
83
83
|
}
|
|
84
84
|
|
|
85
|
+
/**
|
|
86
|
+
* Live-conversation pacing hint. The Claude Agent SDK trains its model to do
|
|
87
|
+
* this natively; non-Anthropic models (Gemini especially) tend to go silent
|
|
88
|
+
* during tool loops and never report progress. This nudge makes the
|
|
89
|
+
* conversation feel alive — quick acknowledgement before long tasks, short
|
|
90
|
+
* status notes between tool calls, and inline answers if the user types
|
|
91
|
+
* something while the agent is mid-task.
|
|
92
|
+
*/
|
|
93
|
+
const LIVE_CONVERSATION_HINT = `
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
# Live-conversation pacing
|
|
97
|
+
|
|
98
|
+
You are running in a streaming chat where the user can keep typing while you work. Make the conversation feel alive:
|
|
99
|
+
|
|
100
|
+
- Before kicking off a multi-step task, say one short line acknowledging it ("On it, looking at the widget now.").
|
|
101
|
+
- Between tool calls on long tasks, drop a brief progress note ("Found the file, checking the layout next.") so the user knows you're still working.
|
|
102
|
+
- If a new user message arrives while you're mid-task, you'll see it as a fresh user-role message in the conversation history. Answer it briefly inline, mention you're still working on the main task, then continue.
|
|
103
|
+
- Final answers should be concise and concrete.`;
|
|
104
|
+
|
|
85
105
|
async function buildSystemPrompt(
|
|
86
106
|
names?: { botName: string; humanName: string },
|
|
87
107
|
recentMessages?: RecentMessage[],
|
|
@@ -89,6 +109,7 @@ async function buildSystemPrompt(
|
|
|
89
109
|
const memoryFiles = readMemoryFiles();
|
|
90
110
|
const basePrompt = await assembleSystemPrompt(names?.botName, names?.humanName);
|
|
91
111
|
let systemPrompt = basePrompt;
|
|
112
|
+
systemPrompt += LIVE_CONVERSATION_HINT;
|
|
92
113
|
systemPrompt += `\n\n---\n# Your Memory Files\n\n## MYSELF.md\n${memoryFiles.myself}\n\n## MYHUMAN.md\n${memoryFiles.myhuman}\n\n## MEMORY.md\n${memoryFiles.memory}\n\n---\n# Your Config Files\n\n## PULSE.json\n${memoryFiles.pulse}\n\n## CRONS.json\n${memoryFiles.crons}`;
|
|
93
114
|
|
|
94
115
|
try {
|
|
@@ -141,14 +141,17 @@ export function createPiSession(init: PiSessionInit): PiSession {
|
|
|
141
141
|
}
|
|
142
142
|
}
|
|
143
143
|
|
|
144
|
-
async function runOneTurn(
|
|
144
|
+
async function runOneTurn(input: AsyncQueue<PiMessage>, firstUserMsg: PiMessage): Promise<void> {
|
|
145
145
|
if (init.abortController.signal.aborted) return;
|
|
146
|
-
messages.
|
|
146
|
+
// Stack any messages that arrived in the same millisecond into one turn.
|
|
147
|
+
messages.push(firstUserMsg);
|
|
148
|
+
for (const extra of input.drainPending()) messages.push(extra);
|
|
147
149
|
init.onEvent({ type: 'turn_started' });
|
|
148
150
|
|
|
149
151
|
let accumulatedText = '';
|
|
150
152
|
const usedTools = new Set<string>();
|
|
151
153
|
let turnErrored = false;
|
|
154
|
+
let pendingInterleave = false;
|
|
152
155
|
|
|
153
156
|
for (let round = 0; round < MAX_TOOL_ROUNDS; round++) {
|
|
154
157
|
if (init.abortController.signal.aborted) break;
|
|
@@ -177,7 +180,6 @@ export function createPiSession(init: PiSessionInit): PiSession {
|
|
|
177
180
|
}
|
|
178
181
|
|
|
179
182
|
if (errored) { turnErrored = true; break; }
|
|
180
|
-
if (toolUses.length === 0) break; // model finished — exit loop
|
|
181
183
|
|
|
182
184
|
// Run every tool the model asked for this round, then feed the results
|
|
183
185
|
// back as a single user message Gemini accepts as a batch.
|
|
@@ -198,7 +200,24 @@ export function createPiSession(init: PiSessionInit): PiSession {
|
|
|
198
200
|
if (toolResultBlocks.length > 0) {
|
|
199
201
|
messages.push({ role: 'user', content: toolResultBlocks });
|
|
200
202
|
}
|
|
201
|
-
|
|
203
|
+
|
|
204
|
+
// Fold any user messages that arrived during this round into history so
|
|
205
|
+
// the next stream pass sees them. This is what makes the conversation
|
|
206
|
+
// feel alive: while the agent is grinding on a long task, a question
|
|
207
|
+
// typed mid-stream lands in the very next request as a user-role part,
|
|
208
|
+
// and the model can answer it inline before continuing.
|
|
209
|
+
const interleaved = input.drainPending();
|
|
210
|
+
if (interleaved.length > 0) {
|
|
211
|
+
log.info(`[pi/session] interleaved ${interleaved.length} mid-turn user message(s) into history`);
|
|
212
|
+
for (const m of interleaved) messages.push(m);
|
|
213
|
+
pendingInterleave = true;
|
|
214
|
+
} else {
|
|
215
|
+
pendingInterleave = false;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Exit when the model has nothing more to do AND no new user messages
|
|
219
|
+
// arrived mid-round. Either condition by itself keeps the loop alive.
|
|
220
|
+
if (toolUses.length === 0 && !pendingInterleave) break;
|
|
202
221
|
}
|
|
203
222
|
|
|
204
223
|
if (!turnErrored) {
|
|
@@ -215,7 +234,7 @@ export function createPiSession(init: PiSessionInit): PiSession {
|
|
|
215
234
|
for await (const userMsg of input) {
|
|
216
235
|
if (init.abortController.signal.aborted) break;
|
|
217
236
|
try {
|
|
218
|
-
await runOneTurn(userMsg);
|
|
237
|
+
await runOneTurn(input, userMsg);
|
|
219
238
|
} catch (err: any) {
|
|
220
239
|
log.warn(`[pi/session] Turn failed: ${err?.message || err}`);
|
|
221
240
|
init.onEvent({ type: 'error', error: err?.message || String(err) });
|