agentxjs 1.9.9-dev → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,12 +7,9 @@
7
7
 
8
8
  import type { AgentX } from "../types";
9
9
  import type { Unsubscribe, BusEvent } from "@agentxjs/core/event";
10
- import type { PresentationState } from "./types";
11
- import {
12
- presentationReducer,
13
- addUserConversation,
14
- createInitialState,
15
- } from "./reducer";
10
+ import type { PresentationState, Conversation } from "./types";
11
+ import { initialPresentationState } from "./types";
12
+ import { presentationReducer, addUserConversation, createInitialState } from "./reducer";
16
13
 
17
14
  /**
18
15
  * Presentation update handler
@@ -50,10 +47,17 @@ export class Presentation {
50
47
  private errorHandlers: Set<PresentationErrorHandler> = new Set();
51
48
  private eventUnsubscribe: Unsubscribe | null = null;
52
49
 
53
- constructor(agentx: AgentX, agentId: string, options?: PresentationOptions) {
50
+ constructor(
51
+ agentx: AgentX,
52
+ agentId: string,
53
+ options?: PresentationOptions,
54
+ initialConversations?: Conversation[]
55
+ ) {
54
56
  this.agentx = agentx;
55
57
  this.agentId = agentId;
56
- this.state = createInitialState();
58
+ this.state = initialConversations?.length
59
+ ? { ...initialPresentationState, conversations: initialConversations }
60
+ : createInitialState();
57
61
 
58
62
  // Register initial handlers
59
63
  if (options?.onUpdate) {
@@ -106,7 +110,7 @@ export class Presentation {
106
110
 
107
111
  try {
108
112
  // Send message via agentx
109
- await this.agentx.sendMessage(this.agentId, content);
113
+ await this.agentx.sessions.send(this.agentId, content);
110
114
  } catch (error) {
111
115
  this.notifyError(error instanceof Error ? error : new Error(String(error)));
112
116
  }
@@ -117,7 +121,7 @@ export class Presentation {
117
121
  */
118
122
  async interrupt(): Promise<void> {
119
123
  try {
120
- await this.agentx.interrupt(this.agentId);
124
+ await this.agentx.sessions.interrupt(this.agentId);
121
125
  } catch (error) {
122
126
  this.notifyError(error instanceof Error ? error : new Error(String(error)));
123
127
  }
@@ -9,6 +9,7 @@ export type {
9
9
  TextBlock,
10
10
  ToolBlock,
11
11
  ImageBlock,
12
+ TokenUsage,
12
13
  Conversation,
13
14
  UserConversation,
14
15
  AssistantConversation,
@@ -22,6 +23,7 @@ export {
22
23
  presentationReducer,
23
24
  addUserConversation,
24
25
  createInitialState,
26
+ messagesToConversations,
25
27
  } from "./reducer";
26
28
 
27
29
  export {
@@ -1,22 +1,42 @@
1
1
  /**
2
2
  * Presentation Reducer
3
3
  *
4
- * Aggregates stream events into PresentationState.
4
+ * Aggregates events into PresentationState.
5
5
  * Pure function: (state, event) => newState
6
+ *
7
+ * Event consumption strategy:
8
+ * - Stream layer: message_start, text_delta, tool_use_start, tool_use_stop, message_stop
9
+ * (for real-time streaming display)
10
+ * - Message layer: tool_result_message
11
+ * (for tool execution results — arrives after message_stop)
12
+ *
13
+ * Tool calls are stream-level blocks within the assistant turn,
14
+ * matching the mainstream API pattern (Anthropic, OpenAI).
6
15
  */
7
16
 
8
17
  import type { BusEvent } from "@agentxjs/core/event";
18
+ import type {
19
+ Message,
20
+ UserMessage,
21
+ AssistantMessage,
22
+ ToolResultMessage,
23
+ ErrorMessage,
24
+ ToolResultOutput,
25
+ ToolCallPart,
26
+ } from "@agentxjs/core/agent";
9
27
  import type {
10
28
  PresentationState,
29
+ Conversation,
11
30
  AssistantConversation,
12
31
  TextBlock,
13
32
  ToolBlock,
14
33
  Block,
34
+ TokenUsage,
15
35
  } from "./types";
16
36
  import { initialPresentationState } from "./types";
17
37
 
18
38
  // ============================================================================
19
- // Event Data Types (from stream events)
39
+ // Event Data Types
20
40
  // ============================================================================
21
41
 
22
42
  interface MessageStartData {
@@ -29,18 +49,21 @@ interface TextDeltaData {
29
49
  }
30
50
 
31
51
  interface ToolUseStartData {
32
- toolUseId: string;
52
+ toolCallId: string;
33
53
  toolName: string;
34
54
  }
35
55
 
36
- interface InputJsonDeltaData {
37
- delta: string;
56
+ interface ToolUseStopData {
57
+ toolCallId: string;
58
+ toolName: string;
59
+ input: Record<string, unknown>;
38
60
  }
39
61
 
40
- interface ToolResultData {
41
- toolUseId: string;
42
- result: string;
43
- isError?: boolean;
62
+ interface MessageDeltaData {
63
+ usage?: {
64
+ inputTokens: number;
65
+ outputTokens: number;
66
+ };
44
67
  }
45
68
 
46
69
  interface MessageStopData {
@@ -57,13 +80,16 @@ interface ErrorData {
57
80
  // ============================================================================
58
81
 
59
82
  /**
60
- * Reduce a stream event into presentation state
83
+ * Reduce an event into presentation state.
84
+ *
85
+ * Consumes:
86
+ * - Stream events: message_start, text_delta, tool_use_start, tool_use_stop, message_stop
87
+ * - Message events: tool_result_message
88
+ * - Error events: error
61
89
  */
62
- export function presentationReducer(
63
- state: PresentationState,
64
- event: BusEvent
65
- ): PresentationState {
90
+ export function presentationReducer(state: PresentationState, event: BusEvent): PresentationState {
66
91
  switch (event.type) {
92
+ // Stream layer — real-time display
67
93
  case "message_start":
68
94
  return handleMessageStart(state, event.data as MessageStartData);
69
95
 
@@ -73,15 +99,19 @@ export function presentationReducer(
73
99
  case "tool_use_start":
74
100
  return handleToolUseStart(state, event.data as ToolUseStartData);
75
101
 
76
- case "input_json_delta":
77
- return handleInputJsonDelta(state, event.data as InputJsonDeltaData);
102
+ case "tool_use_stop":
103
+ return handleToolUseStop(state, event.data as ToolUseStopData);
78
104
 
79
- case "tool_result":
80
- return handleToolResult(state, event.data as ToolResultData);
105
+ case "message_delta":
106
+ return handleMessageDelta(state, event.data as MessageDeltaData);
81
107
 
82
108
  case "message_stop":
83
109
  return handleMessageStop(state, event.data as MessageStopData);
84
110
 
111
+ // Message layer — tool results from Engine
112
+ case "tool_result_message":
113
+ return handleToolResultMessage(state, event.data as ToolResultMessage);
114
+
85
115
  case "error":
86
116
  return handleError(state, event.data as ErrorData);
87
117
 
@@ -94,11 +124,13 @@ export function presentationReducer(
94
124
  // Handlers
95
125
  // ============================================================================
96
126
 
97
- function handleMessageStart(
98
- state: PresentationState,
99
- _data: MessageStartData
100
- ): PresentationState {
101
- // Start a new streaming assistant conversation
127
+ function handleMessageStart(state: PresentationState, _data: MessageStartData): PresentationState {
128
+ // If streaming already exists (e.g. tool_use turn not yet flushed), flush it first
129
+ let conversations = state.conversations;
130
+ if (state.streaming && state.streaming.blocks.length > 0) {
131
+ conversations = [...conversations, { ...state.streaming, isStreaming: false }];
132
+ }
133
+
102
134
  const streaming: AssistantConversation = {
103
135
  role: "assistant",
104
136
  blocks: [],
@@ -107,15 +139,13 @@ function handleMessageStart(
107
139
 
108
140
  return {
109
141
  ...state,
142
+ conversations,
110
143
  streaming,
111
144
  status: "thinking",
112
145
  };
113
146
  }
114
147
 
115
- function handleTextDelta(
116
- state: PresentationState,
117
- data: TextDeltaData
118
- ): PresentationState {
148
+ function handleTextDelta(state: PresentationState, data: TextDeltaData): PresentationState {
119
149
  if (!state.streaming) {
120
150
  return state;
121
151
  }
@@ -123,7 +153,6 @@ function handleTextDelta(
123
153
  const blocks = [...state.streaming.blocks];
124
154
  const lastBlock = blocks[blocks.length - 1];
125
155
 
126
- // Append to existing TextBlock or create new one
127
156
  if (lastBlock && lastBlock.type === "text") {
128
157
  blocks[blocks.length - 1] = {
129
158
  ...lastBlock,
@@ -146,17 +175,15 @@ function handleTextDelta(
146
175
  };
147
176
  }
148
177
 
149
- function handleToolUseStart(
150
- state: PresentationState,
151
- data: ToolUseStartData
152
- ): PresentationState {
178
+ function handleToolUseStart(state: PresentationState, data: ToolUseStartData): PresentationState {
153
179
  if (!state.streaming) {
154
180
  return state;
155
181
  }
156
182
 
183
+ // Create a pending tool block — toolInput will be filled by tool_use_stop
157
184
  const toolBlock: ToolBlock = {
158
185
  type: "tool",
159
- toolUseId: data.toolUseId,
186
+ toolUseId: data.toolCallId,
160
187
  toolName: data.toolName,
161
188
  toolInput: {},
162
189
  status: "pending",
@@ -172,64 +199,22 @@ function handleToolUseStart(
172
199
  };
173
200
  }
174
201
 
175
- function handleInputJsonDelta(
176
- state: PresentationState,
177
- data: InputJsonDeltaData
178
- ): PresentationState {
179
- if (!state.streaming) {
180
- return state;
181
- }
182
-
183
- const blocks = [...state.streaming.blocks];
184
- const lastBlock = blocks[blocks.length - 1];
185
-
186
- // Find the last tool block and update its input
187
- if (lastBlock && lastBlock.type === "tool") {
188
- // Accumulate JSON delta (will be parsed when complete)
189
- const currentInput = (lastBlock as ToolBlock & { _rawInput?: string })._rawInput || "";
190
- const newInput = currentInput + data.delta;
191
-
192
- // Try to parse the accumulated JSON
193
- let toolInput = lastBlock.toolInput;
194
- try {
195
- toolInput = JSON.parse(newInput);
196
- } catch {
197
- // Not yet valid JSON, keep accumulating
198
- }
199
-
200
- blocks[blocks.length - 1] = {
201
- ...lastBlock,
202
- toolInput,
203
- _rawInput: newInput,
204
- status: "running",
205
- } as ToolBlock & { _rawInput?: string };
206
-
207
- return {
208
- ...state,
209
- streaming: {
210
- ...state.streaming,
211
- blocks,
212
- },
213
- };
214
- }
215
-
216
- return state;
217
- }
218
-
219
- function handleToolResult(
220
- state: PresentationState,
221
- data: ToolResultData
222
- ): PresentationState {
202
+ /**
203
+ * Handle tool_use_stop from stream layer.
204
+ * Fills in the complete toolInput for the matching pending tool block.
205
+ * The stream event carries the fully assembled input.
206
+ */
207
+ function handleToolUseStop(state: PresentationState, data: ToolUseStopData): PresentationState {
223
208
  if (!state.streaming) {
224
209
  return state;
225
210
  }
226
211
 
227
212
  const blocks = state.streaming.blocks.map((block): Block => {
228
- if (block.type === "tool" && block.toolUseId === data.toolUseId) {
213
+ if (block.type === "tool" && block.toolUseId === data.toolCallId) {
229
214
  return {
230
215
  ...block,
231
- toolResult: data.result,
232
- status: data.isError ? "error" : "completed",
216
+ toolInput: data.input,
217
+ status: "running",
233
218
  };
234
219
  }
235
220
  return block;
@@ -241,19 +226,43 @@ function handleToolResult(
241
226
  ...state.streaming,
242
227
  blocks,
243
228
  },
244
- status: "responding",
245
229
  };
246
230
  }
247
231
 
248
- function handleMessageStop(
249
- state: PresentationState,
250
- _data: MessageStopData
251
- ): PresentationState {
232
+ function handleMessageDelta(state: PresentationState, data: MessageDeltaData): PresentationState {
233
+ if (!state.streaming || !data.usage) {
234
+ return state;
235
+ }
236
+
237
+ const prev = state.streaming.usage;
238
+ const usage: TokenUsage = {
239
+ inputTokens: (prev?.inputTokens ?? 0) + data.usage.inputTokens,
240
+ outputTokens: (prev?.outputTokens ?? 0) + data.usage.outputTokens,
241
+ };
242
+
243
+ return {
244
+ ...state,
245
+ streaming: {
246
+ ...state.streaming,
247
+ usage,
248
+ },
249
+ };
250
+ }
251
+
252
+ function handleMessageStop(state: PresentationState, data: MessageStopData): PresentationState {
252
253
  if (!state.streaming) {
253
254
  return state;
254
255
  }
255
256
 
256
- // Move streaming to conversations
257
+ // tool_use stop don't flush, tool results are still incoming
258
+ if (data.stopReason === "tool_use") {
259
+ return {
260
+ ...state,
261
+ status: "executing",
262
+ };
263
+ }
264
+
265
+ // end_turn / max_tokens / etc → flush streaming to conversations
257
266
  const completedConversation: AssistantConversation = {
258
267
  ...state.streaming,
259
268
  isStreaming: false,
@@ -267,11 +276,49 @@ function handleMessageStop(
267
276
  };
268
277
  }
269
278
 
270
- function handleError(
279
+ /**
280
+ * Handle tool_result_message from Engine layer.
281
+ * Fills in the toolResult for the matching tool block.
282
+ *
283
+ * Note: tool_result_message arrives after message_stop(tool_use),
284
+ * but streaming is kept alive (not flushed) during tool_use turns.
285
+ */
286
+ function handleToolResultMessage(
271
287
  state: PresentationState,
272
- data: ErrorData
288
+ data: ToolResultMessage
273
289
  ): PresentationState {
274
- // Add error conversation
290
+ if (!state.streaming) {
291
+ return state;
292
+ }
293
+
294
+ const toolCallId = data.toolCallId;
295
+ const blocks = state.streaming.blocks.map((block): Block => {
296
+ if (block.type === "tool" && block.toolUseId === toolCallId) {
297
+ return {
298
+ ...block,
299
+ toolResult: formatToolResultOutput(data.toolResult.output),
300
+ status:
301
+ data.toolResult.output.type === "error-text" ||
302
+ data.toolResult.output.type === "error-json" ||
303
+ data.toolResult.output.type === "execution-denied"
304
+ ? "error"
305
+ : "completed",
306
+ };
307
+ }
308
+ return block;
309
+ });
310
+
311
+ return {
312
+ ...state,
313
+ streaming: {
314
+ ...state.streaming,
315
+ blocks,
316
+ },
317
+ status: "responding",
318
+ };
319
+ }
320
+
321
+ function handleError(state: PresentationState, data: ErrorData): PresentationState {
275
322
  return {
276
323
  ...state,
277
324
  conversations: [
@@ -290,13 +337,7 @@ function handleError(
290
337
  // Helper: Add user conversation
291
338
  // ============================================================================
292
339
 
293
- /**
294
- * Add a user conversation to state
295
- */
296
- export function addUserConversation(
297
- state: PresentationState,
298
- content: string
299
- ): PresentationState {
340
+ export function addUserConversation(state: PresentationState, content: string): PresentationState {
300
341
  return {
301
342
  ...state,
302
343
  conversations: [
@@ -309,9 +350,137 @@ export function addUserConversation(
309
350
  };
310
351
  }
311
352
 
312
- /**
313
- * Create initial state
314
- */
315
353
  export function createInitialState(): PresentationState {
316
354
  return { ...initialPresentationState };
317
355
  }
356
+
357
+ // ============================================================================
358
+ // Helper: Format tool result output
359
+ // ============================================================================
360
+
361
+ function formatToolResultOutput(output: ToolResultOutput): string {
362
+ switch (output.type) {
363
+ case "text":
364
+ case "error-text":
365
+ return output.value;
366
+ case "json":
367
+ case "error-json":
368
+ return JSON.stringify(output.value);
369
+ case "execution-denied":
370
+ return output.reason ?? "Execution denied";
371
+ case "content":
372
+ return output.value
373
+ .filter((p): p is { type: "text"; text: string } => p.type === "text")
374
+ .map((p) => p.text)
375
+ .join("");
376
+ }
377
+ }
378
+
379
+ // ============================================================================
380
+ // Message → Conversation Converter
381
+ // ============================================================================
382
+
383
+ /**
384
+ * Convert persisted Messages to Presentation Conversations.
385
+ *
386
+ * Groups consecutive assistant + tool-result messages
387
+ * into a single AssistantConversation.
388
+ *
389
+ * Tool calls are now part of AssistantMessage.content (as ToolCallPart),
390
+ * so we extract them directly from the assistant message.
391
+ */
392
+ export function messagesToConversations(messages: Message[]): Conversation[] {
393
+ const conversations: Conversation[] = [];
394
+ let currentAssistant: AssistantConversation | null = null;
395
+
396
+ function flushAssistant() {
397
+ if (currentAssistant && currentAssistant.blocks.length > 0) {
398
+ conversations.push(currentAssistant);
399
+ }
400
+ currentAssistant = null;
401
+ }
402
+
403
+ for (const msg of messages) {
404
+ switch (msg.subtype) {
405
+ case "user": {
406
+ flushAssistant();
407
+ const m = msg as UserMessage;
408
+ const text =
409
+ typeof m.content === "string"
410
+ ? m.content
411
+ : m.content
412
+ .filter((p): p is { type: "text"; text: string } => p.type === "text")
413
+ .map((p) => p.text)
414
+ .join("");
415
+ conversations.push({
416
+ role: "user",
417
+ blocks: [{ type: "text", content: text }],
418
+ });
419
+ break;
420
+ }
421
+
422
+ case "assistant": {
423
+ if (!currentAssistant) {
424
+ currentAssistant = { role: "assistant", blocks: [], isStreaming: false };
425
+ }
426
+ const m = msg as AssistantMessage;
427
+ if (typeof m.content === "string") {
428
+ if (m.content) {
429
+ currentAssistant.blocks.push({ type: "text", content: m.content } as TextBlock);
430
+ }
431
+ } else {
432
+ // Extract text and tool call parts from content
433
+ for (const part of m.content) {
434
+ if (part.type === "text") {
435
+ if (part.text) {
436
+ currentAssistant.blocks.push({ type: "text", content: part.text } as TextBlock);
437
+ }
438
+ } else if (part.type === "tool-call") {
439
+ const tc = part as ToolCallPart;
440
+ currentAssistant.blocks.push({
441
+ type: "tool",
442
+ toolUseId: tc.id,
443
+ toolName: tc.name,
444
+ toolInput: tc.input,
445
+ status: "completed",
446
+ } as ToolBlock);
447
+ }
448
+ }
449
+ }
450
+ break;
451
+ }
452
+
453
+ case "tool-result": {
454
+ const m = msg as ToolResultMessage;
455
+ if (currentAssistant) {
456
+ for (const block of currentAssistant.blocks) {
457
+ if (block.type === "tool" && block.toolUseId === m.toolResult.id) {
458
+ block.toolResult = formatToolResultOutput(m.toolResult.output);
459
+ block.status =
460
+ m.toolResult.output.type === "error-text" ||
461
+ m.toolResult.output.type === "error-json" ||
462
+ m.toolResult.output.type === "execution-denied"
463
+ ? "error"
464
+ : "completed";
465
+ break;
466
+ }
467
+ }
468
+ }
469
+ break;
470
+ }
471
+
472
+ case "error": {
473
+ flushAssistant();
474
+ const m = msg as ErrorMessage;
475
+ conversations.push({
476
+ role: "error",
477
+ message: m.content,
478
+ });
479
+ break;
480
+ }
481
+ }
482
+ }
483
+
484
+ flushAssistant();
485
+ return conversations;
486
+ }
@@ -55,6 +55,14 @@ export interface UserConversation {
55
55
  blocks: Block[];
56
56
  }
57
57
 
58
+ /**
59
+ * Token usage for a message (one LLM call / step)
60
+ */
61
+ export interface TokenUsage {
62
+ inputTokens: number;
63
+ outputTokens: number;
64
+ }
65
+
58
66
  /**
59
67
  * Assistant conversation
60
68
  */
@@ -62,6 +70,8 @@ export interface AssistantConversation {
62
70
  role: "assistant";
63
71
  blocks: Block[];
64
72
  isStreaming: boolean;
73
+ /** Accumulated token usage across all steps in this conversation */
74
+ usage?: TokenUsage;
65
75
  }
66
76
 
67
77
  /**