agentxjs 0.0.0-dev-20260312143810

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,486 @@
1
+ /**
2
+ * Presentation Reducer
3
+ *
4
+ * Aggregates events into PresentationState.
5
+ * Pure function: (state, event) => newState
6
+ *
7
+ * Event consumption strategy:
8
+ * - Stream layer: message_start, text_delta, tool_use_start, tool_use_stop, message_stop
9
+ * (for real-time streaming display)
10
+ * - Message layer: tool_result_message
11
+ * (for tool execution results — arrives after message_stop)
12
+ *
13
+ * Tool calls are stream-level blocks within the assistant turn,
14
+ * matching the mainstream API pattern (Anthropic, OpenAI).
15
+ */
16
+
17
+ import type {
18
+ AssistantMessage,
19
+ ErrorMessage,
20
+ Message,
21
+ ToolCallPart,
22
+ ToolResultMessage,
23
+ ToolResultOutput,
24
+ UserMessage,
25
+ } from "@agentxjs/core/agent";
26
+ import type { BusEvent } from "@agentxjs/core/event";
27
+ import type {
28
+ AssistantConversation,
29
+ Block,
30
+ Conversation,
31
+ PresentationState,
32
+ TextBlock,
33
+ TokenUsage,
34
+ ToolBlock,
35
+ } from "./types";
36
+ import { initialPresentationState } from "./types";
37
+
38
+ // ============================================================================
39
+ // Event Data Types
40
+ // ============================================================================
41
+
42
+ interface MessageStartData {
43
+ messageId?: string;
44
+ model?: string;
45
+ }
46
+
47
+ interface TextDeltaData {
48
+ text: string;
49
+ }
50
+
51
+ interface ToolUseStartData {
52
+ toolCallId: string;
53
+ toolName: string;
54
+ }
55
+
56
+ interface ToolUseStopData {
57
+ toolCallId: string;
58
+ toolName: string;
59
+ input: Record<string, unknown>;
60
+ }
61
+
62
+ interface MessageDeltaData {
63
+ usage?: {
64
+ inputTokens: number;
65
+ outputTokens: number;
66
+ };
67
+ }
68
+
69
+ interface MessageStopData {
70
+ stopReason?: string;
71
+ }
72
+
73
+ interface ErrorData {
74
+ message: string;
75
+ code?: string;
76
+ }
77
+
78
+ // ============================================================================
79
+ // Reducer
80
+ // ============================================================================
81
+
82
+ /**
83
+ * Reduce an event into presentation state.
84
+ *
85
+ * Consumes:
86
+ * - Stream events: message_start, text_delta, tool_use_start, tool_use_stop, message_stop
87
+ * - Message events: tool_result_message
88
+ * - Error events: error
89
+ */
90
+ export function presentationReducer(state: PresentationState, event: BusEvent): PresentationState {
91
+ switch (event.type) {
92
+ // Stream layer — real-time display
93
+ case "message_start":
94
+ return handleMessageStart(state, event.data as MessageStartData);
95
+
96
+ case "text_delta":
97
+ return handleTextDelta(state, event.data as TextDeltaData);
98
+
99
+ case "tool_use_start":
100
+ return handleToolUseStart(state, event.data as ToolUseStartData);
101
+
102
+ case "tool_use_stop":
103
+ return handleToolUseStop(state, event.data as ToolUseStopData);
104
+
105
+ case "message_delta":
106
+ return handleMessageDelta(state, event.data as MessageDeltaData);
107
+
108
+ case "message_stop":
109
+ return handleMessageStop(state, event.data as MessageStopData);
110
+
111
+ // Message layer — tool results from Engine
112
+ case "tool_result_message":
113
+ return handleToolResultMessage(state, event.data as ToolResultMessage);
114
+
115
+ case "error":
116
+ return handleError(state, event.data as ErrorData);
117
+
118
+ default:
119
+ return state;
120
+ }
121
+ }
122
+
123
+ // ============================================================================
124
+ // Handlers
125
+ // ============================================================================
126
+
127
+ function handleMessageStart(state: PresentationState, _data: MessageStartData): PresentationState {
128
+ // If streaming already exists (e.g. tool_use turn not yet flushed), flush it first
129
+ let conversations = state.conversations;
130
+ if (state.streaming && state.streaming.blocks.length > 0) {
131
+ conversations = [...conversations, { ...state.streaming, isStreaming: false }];
132
+ }
133
+
134
+ const streaming: AssistantConversation = {
135
+ role: "assistant",
136
+ blocks: [],
137
+ isStreaming: true,
138
+ };
139
+
140
+ return {
141
+ ...state,
142
+ conversations,
143
+ streaming,
144
+ status: "thinking",
145
+ };
146
+ }
147
+
148
+ function handleTextDelta(state: PresentationState, data: TextDeltaData): PresentationState {
149
+ if (!state.streaming) {
150
+ return state;
151
+ }
152
+
153
+ const blocks = [...state.streaming.blocks];
154
+ const lastBlock = blocks[blocks.length - 1];
155
+
156
+ if (lastBlock && lastBlock.type === "text") {
157
+ blocks[blocks.length - 1] = {
158
+ ...lastBlock,
159
+ content: lastBlock.content + data.text,
160
+ };
161
+ } else {
162
+ blocks.push({
163
+ type: "text",
164
+ content: data.text,
165
+ } as TextBlock);
166
+ }
167
+
168
+ return {
169
+ ...state,
170
+ streaming: {
171
+ ...state.streaming,
172
+ blocks,
173
+ },
174
+ status: "responding",
175
+ };
176
+ }
177
+
178
+ function handleToolUseStart(state: PresentationState, data: ToolUseStartData): PresentationState {
179
+ if (!state.streaming) {
180
+ return state;
181
+ }
182
+
183
+ // Create a pending tool block — toolInput will be filled by tool_use_stop
184
+ const toolBlock: ToolBlock = {
185
+ type: "tool",
186
+ toolUseId: data.toolCallId,
187
+ toolName: data.toolName,
188
+ toolInput: {},
189
+ status: "pending",
190
+ };
191
+
192
+ return {
193
+ ...state,
194
+ streaming: {
195
+ ...state.streaming,
196
+ blocks: [...state.streaming.blocks, toolBlock],
197
+ },
198
+ status: "executing",
199
+ };
200
+ }
201
+
202
+ /**
203
+ * Handle tool_use_stop from stream layer.
204
+ * Fills in the complete toolInput for the matching pending tool block.
205
+ * The stream event carries the fully assembled input.
206
+ */
207
+ function handleToolUseStop(state: PresentationState, data: ToolUseStopData): PresentationState {
208
+ if (!state.streaming) {
209
+ return state;
210
+ }
211
+
212
+ const blocks = state.streaming.blocks.map((block): Block => {
213
+ if (block.type === "tool" && block.toolUseId === data.toolCallId) {
214
+ return {
215
+ ...block,
216
+ toolInput: data.input,
217
+ status: "running",
218
+ };
219
+ }
220
+ return block;
221
+ });
222
+
223
+ return {
224
+ ...state,
225
+ streaming: {
226
+ ...state.streaming,
227
+ blocks,
228
+ },
229
+ };
230
+ }
231
+
232
+ function handleMessageDelta(state: PresentationState, data: MessageDeltaData): PresentationState {
233
+ if (!state.streaming || !data.usage) {
234
+ return state;
235
+ }
236
+
237
+ const prev = state.streaming.usage;
238
+ const usage: TokenUsage = {
239
+ inputTokens: (prev?.inputTokens ?? 0) + data.usage.inputTokens,
240
+ outputTokens: (prev?.outputTokens ?? 0) + data.usage.outputTokens,
241
+ };
242
+
243
+ return {
244
+ ...state,
245
+ streaming: {
246
+ ...state.streaming,
247
+ usage,
248
+ },
249
+ };
250
+ }
251
+
252
+ function handleMessageStop(state: PresentationState, data: MessageStopData): PresentationState {
253
+ if (!state.streaming) {
254
+ return state;
255
+ }
256
+
257
+ // tool_use stop → don't flush, tool results are still incoming
258
+ if (data.stopReason === "tool_use") {
259
+ return {
260
+ ...state,
261
+ status: "executing",
262
+ };
263
+ }
264
+
265
+ // end_turn / max_tokens / etc → flush streaming to conversations
266
+ const completedConversation: AssistantConversation = {
267
+ ...state.streaming,
268
+ isStreaming: false,
269
+ };
270
+
271
+ return {
272
+ ...state,
273
+ conversations: [...state.conversations, completedConversation],
274
+ streaming: null,
275
+ status: "idle",
276
+ };
277
+ }
278
+
279
+ /**
280
+ * Handle tool_result_message from Engine layer.
281
+ * Fills in the toolResult for the matching tool block.
282
+ *
283
+ * Note: tool_result_message arrives after message_stop(tool_use),
284
+ * but streaming is kept alive (not flushed) during tool_use turns.
285
+ */
286
+ function handleToolResultMessage(
287
+ state: PresentationState,
288
+ data: ToolResultMessage
289
+ ): PresentationState {
290
+ if (!state.streaming) {
291
+ return state;
292
+ }
293
+
294
+ const toolCallId = data.toolCallId;
295
+ const blocks = state.streaming.blocks.map((block): Block => {
296
+ if (block.type === "tool" && block.toolUseId === toolCallId) {
297
+ return {
298
+ ...block,
299
+ toolResult: formatToolResultOutput(data.toolResult.output),
300
+ status:
301
+ data.toolResult.output.type === "error-text" ||
302
+ data.toolResult.output.type === "error-json" ||
303
+ data.toolResult.output.type === "execution-denied"
304
+ ? "error"
305
+ : "completed",
306
+ };
307
+ }
308
+ return block;
309
+ });
310
+
311
+ return {
312
+ ...state,
313
+ streaming: {
314
+ ...state.streaming,
315
+ blocks,
316
+ },
317
+ status: "responding",
318
+ };
319
+ }
320
+
321
+ function handleError(state: PresentationState, data: ErrorData): PresentationState {
322
+ return {
323
+ ...state,
324
+ conversations: [
325
+ ...state.conversations,
326
+ {
327
+ role: "error",
328
+ message: data.message,
329
+ },
330
+ ],
331
+ streaming: null,
332
+ status: "idle",
333
+ };
334
+ }
335
+
336
+ // ============================================================================
337
+ // Helper: Add user conversation
338
+ // ============================================================================
339
+
340
+ export function addUserConversation(state: PresentationState, content: string): PresentationState {
341
+ return {
342
+ ...state,
343
+ conversations: [
344
+ ...state.conversations,
345
+ {
346
+ role: "user",
347
+ blocks: [{ type: "text", content }],
348
+ },
349
+ ],
350
+ };
351
+ }
352
+
353
+ export function createInitialState(): PresentationState {
354
+ return { ...initialPresentationState };
355
+ }
356
+
357
+ // ============================================================================
358
+ // Helper: Format tool result output
359
+ // ============================================================================
360
+
361
+ function formatToolResultOutput(output: ToolResultOutput): string {
362
+ switch (output.type) {
363
+ case "text":
364
+ case "error-text":
365
+ return output.value;
366
+ case "json":
367
+ case "error-json":
368
+ return JSON.stringify(output.value);
369
+ case "execution-denied":
370
+ return output.reason ?? "Execution denied";
371
+ case "content":
372
+ return output.value
373
+ .filter((p): p is { type: "text"; text: string } => p.type === "text")
374
+ .map((p) => p.text)
375
+ .join("");
376
+ }
377
+ }
378
+
379
+ // ============================================================================
380
+ // Message → Conversation Converter
381
+ // ============================================================================
382
+
383
+ /**
384
+ * Convert persisted Messages to Presentation Conversations.
385
+ *
386
+ * Groups consecutive assistant + tool-result messages
387
+ * into a single AssistantConversation.
388
+ *
389
+ * Tool calls are now part of AssistantMessage.content (as ToolCallPart),
390
+ * so we extract them directly from the assistant message.
391
+ */
392
+ export function messagesToConversations(messages: Message[]): Conversation[] {
393
+ const conversations: Conversation[] = [];
394
+ let currentAssistant: AssistantConversation | null = null;
395
+
396
+ function flushAssistant() {
397
+ if (currentAssistant && currentAssistant.blocks.length > 0) {
398
+ conversations.push(currentAssistant);
399
+ }
400
+ currentAssistant = null;
401
+ }
402
+
403
+ for (const msg of messages) {
404
+ switch (msg.subtype) {
405
+ case "user": {
406
+ flushAssistant();
407
+ const m = msg as UserMessage;
408
+ const text =
409
+ typeof m.content === "string"
410
+ ? m.content
411
+ : m.content
412
+ .filter((p): p is { type: "text"; text: string } => p.type === "text")
413
+ .map((p) => p.text)
414
+ .join("");
415
+ conversations.push({
416
+ role: "user",
417
+ blocks: [{ type: "text", content: text }],
418
+ });
419
+ break;
420
+ }
421
+
422
+ case "assistant": {
423
+ if (!currentAssistant) {
424
+ currentAssistant = { role: "assistant", blocks: [], isStreaming: false };
425
+ }
426
+ const m = msg as AssistantMessage;
427
+ if (typeof m.content === "string") {
428
+ if (m.content) {
429
+ currentAssistant.blocks.push({ type: "text", content: m.content } as TextBlock);
430
+ }
431
+ } else {
432
+ // Extract text and tool call parts from content
433
+ for (const part of m.content) {
434
+ if (part.type === "text") {
435
+ if (part.text) {
436
+ currentAssistant.blocks.push({ type: "text", content: part.text } as TextBlock);
437
+ }
438
+ } else if (part.type === "tool-call") {
439
+ const tc = part as ToolCallPart;
440
+ currentAssistant.blocks.push({
441
+ type: "tool",
442
+ toolUseId: tc.id,
443
+ toolName: tc.name,
444
+ toolInput: tc.input,
445
+ status: "completed",
446
+ } as ToolBlock);
447
+ }
448
+ }
449
+ }
450
+ break;
451
+ }
452
+
453
+ case "tool-result": {
454
+ const m = msg as ToolResultMessage;
455
+ if (currentAssistant) {
456
+ for (const block of currentAssistant.blocks) {
457
+ if (block.type === "tool" && block.toolUseId === m.toolResult.id) {
458
+ block.toolResult = formatToolResultOutput(m.toolResult.output);
459
+ block.status =
460
+ m.toolResult.output.type === "error-text" ||
461
+ m.toolResult.output.type === "error-json" ||
462
+ m.toolResult.output.type === "execution-denied"
463
+ ? "error"
464
+ : "completed";
465
+ break;
466
+ }
467
+ }
468
+ }
469
+ break;
470
+ }
471
+
472
+ case "error": {
473
+ flushAssistant();
474
+ const m = msg as ErrorMessage;
475
+ conversations.push({
476
+ role: "error",
477
+ message: m.content,
478
+ });
479
+ break;
480
+ }
481
+ }
482
+ }
483
+
484
+ flushAssistant();
485
+ return conversations;
486
+ }
@@ -0,0 +1,121 @@
1
+ /**
2
+ * Presentation Types
3
+ *
4
+ * UI-friendly data model aggregated from stream events.
5
+ * This implements the Presentation Model pattern.
6
+ */
7
+
8
+ // ============================================================================
9
+ // Block Types - Basic content units
10
+ // ============================================================================
11
+
12
+ /**
13
+ * Text block
14
+ */
15
+ export interface TextBlock {
16
+ type: "text";
17
+ content: string;
18
+ }
19
+
20
+ /**
21
+ * Tool block - represents a tool call and its result
22
+ */
23
+ export interface ToolBlock {
24
+ type: "tool";
25
+ toolUseId: string;
26
+ toolName: string;
27
+ toolInput: Record<string, unknown>;
28
+ toolResult?: string;
29
+ status: "pending" | "running" | "completed" | "error";
30
+ }
31
+
32
+ /**
33
+ * Image block
34
+ */
35
+ export interface ImageBlock {
36
+ type: "image";
37
+ url: string;
38
+ alt?: string;
39
+ }
40
+
41
+ /**
42
+ * All block types
43
+ */
44
+ export type Block = TextBlock | ToolBlock | ImageBlock;
45
+
46
+ // ============================================================================
47
+ // Conversation Types - A single turn in the conversation
48
+ // ============================================================================
49
+
50
+ /**
51
+ * User conversation
52
+ */
53
+ export interface UserConversation {
54
+ role: "user";
55
+ blocks: Block[];
56
+ }
57
+
58
+ /**
59
+ * Token usage for a message (one LLM call / step)
60
+ */
61
+ export interface TokenUsage {
62
+ inputTokens: number;
63
+ outputTokens: number;
64
+ }
65
+
66
+ /**
67
+ * Assistant conversation
68
+ */
69
+ export interface AssistantConversation {
70
+ role: "assistant";
71
+ blocks: Block[];
72
+ isStreaming: boolean;
73
+ /** Accumulated token usage across all steps in this conversation */
74
+ usage?: TokenUsage;
75
+ }
76
+
77
+ /**
78
+ * Error conversation
79
+ */
80
+ export interface ErrorConversation {
81
+ role: "error";
82
+ message: string;
83
+ }
84
+
85
+ /**
86
+ * All conversation types
87
+ */
88
+ export type Conversation = UserConversation | AssistantConversation | ErrorConversation;
89
+
90
+ // ============================================================================
91
+ // Presentation State
92
+ // ============================================================================
93
+
94
+ /**
95
+ * Presentation state - the complete UI state
96
+ */
97
+ export interface PresentationState {
98
+ /**
99
+ * All completed conversations
100
+ */
101
+ conversations: Conversation[];
102
+
103
+ /**
104
+ * Current streaming conversation (null if not streaming)
105
+ */
106
+ streaming: AssistantConversation | null;
107
+
108
+ /**
109
+ * Current status
110
+ */
111
+ status: "idle" | "thinking" | "responding" | "executing";
112
+ }
113
+
114
+ /**
115
+ * Initial presentation state
116
+ */
117
+ export const initialPresentationState: PresentationState = {
118
+ conversations: [],
119
+ streaming: null,
120
+ status: "idle",
121
+ };