agents 0.0.0-197e86a → 0.0.0-1a3d226

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +255 -10
  2. package/dist/ai-chat-agent.d.ts +238 -31
  3. package/dist/ai-chat-agent.js +1118 -260
  4. package/dist/ai-chat-agent.js.map +1 -1
  5. package/dist/ai-chat-v5-migration.d.ts +155 -0
  6. package/dist/ai-chat-v5-migration.js +155 -0
  7. package/dist/ai-chat-v5-migration.js.map +1 -0
  8. package/dist/ai-react.d.ts +197 -86
  9. package/dist/ai-react.js +574 -199
  10. package/dist/ai-react.js.map +1 -1
  11. package/dist/ai-types-0OnT3FHg.d.ts +127 -0
  12. package/dist/ai-types-DEtF_8Km.js +28 -0
  13. package/dist/ai-types-DEtF_8Km.js.map +1 -0
  14. package/dist/ai-types.d.ts +6 -74
  15. package/dist/ai-types.js +3 -1
  16. package/dist/cli/index.d.ts +1 -0
  17. package/dist/cli/index.js +28 -0
  18. package/dist/cli/index.js.map +1 -0
  19. package/dist/client-BINtT7y-.d.ts +834 -0
  20. package/dist/client-CdM5I962.d.ts +104 -0
  21. package/dist/client-DjTPRM8-.js +117 -0
  22. package/dist/client-DjTPRM8-.js.map +1 -0
  23. package/dist/client-QZa2Rq0l.js +1105 -0
  24. package/dist/client-QZa2Rq0l.js.map +1 -0
  25. package/dist/client.d.ts +10 -92
  26. package/dist/client.js +3 -11
  27. package/dist/codemode/ai.d.ts +27 -0
  28. package/dist/codemode/ai.js +151 -0
  29. package/dist/codemode/ai.js.map +1 -0
  30. package/dist/context-BkKbAa1R.js +8 -0
  31. package/dist/context-BkKbAa1R.js.map +1 -0
  32. package/dist/context-DcbQ8o7k.d.ts +24 -0
  33. package/dist/context.d.ts +6 -0
  34. package/dist/context.js +3 -0
  35. package/dist/do-oauth-client-provider--To1Tsjj.d.ts +70 -0
  36. package/dist/do-oauth-client-provider-B1fVIshX.js +155 -0
  37. package/dist/do-oauth-client-provider-B1fVIshX.js.map +1 -0
  38. package/dist/{index-BCJclX6q.d.ts → index-CfZ2mfMI.d.ts} +131 -170
  39. package/dist/index-DLuxm_9W.d.ts +58 -0
  40. package/dist/index.d.ts +66 -40
  41. package/dist/index.js +7 -28
  42. package/dist/mcp/client.d.ts +2 -11
  43. package/dist/mcp/client.js +4 -9
  44. package/dist/mcp/do-oauth-client-provider.d.ts +2 -41
  45. package/dist/mcp/do-oauth-client-provider.js +3 -7
  46. package/dist/mcp/index.d.ts +197 -109
  47. package/dist/mcp/index.js +1429 -942
  48. package/dist/mcp/index.js.map +1 -1
  49. package/dist/mcp/x402.d.ts +34 -0
  50. package/dist/mcp/x402.js +198 -0
  51. package/dist/mcp/x402.js.map +1 -0
  52. package/dist/mcp-CPSfGUgd.d.ts +61 -0
  53. package/dist/observability/index.d.ts +2 -14
  54. package/dist/observability/index.js +7 -10
  55. package/dist/react.d.ts +48 -35
  56. package/dist/react.js +183 -110
  57. package/dist/react.js.map +1 -1
  58. package/dist/schedule.d.ts +61 -38
  59. package/dist/schedule.js +46 -21
  60. package/dist/schedule.js.map +1 -1
  61. package/dist/serializable-Crsj26mx.d.ts +39 -0
  62. package/dist/serializable.d.ts +7 -32
  63. package/dist/serializable.js +1 -1
  64. package/dist/src-BZDh910Z.js +1181 -0
  65. package/dist/src-BZDh910Z.js.map +1 -0
  66. package/package.json +93 -29
  67. package/dist/ai-types.js.map +0 -1
  68. package/dist/chunk-HY7ZLHJB.js +0 -598
  69. package/dist/chunk-HY7ZLHJB.js.map +0 -1
  70. package/dist/chunk-JXN5WZFQ.js +0 -1287
  71. package/dist/chunk-JXN5WZFQ.js.map +0 -1
  72. package/dist/chunk-KUH345EY.js +0 -116
  73. package/dist/chunk-KUH345EY.js.map +0 -1
  74. package/dist/chunk-PVQZBKN7.js +0 -106
  75. package/dist/chunk-PVQZBKN7.js.map +0 -1
  76. package/dist/client-DgyzBU_8.d.ts +0 -4601
  77. package/dist/client.js.map +0 -1
  78. package/dist/index.js.map +0 -1
  79. package/dist/mcp/client.js.map +0 -1
  80. package/dist/mcp/do-oauth-client-provider.js.map +0 -1
  81. package/dist/observability/index.js.map +0 -1
  82. package/dist/serializable.js.map +0 -1
  83. package/src/index.ts +0 -1917
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  ### 🧠 `agents` - A Framework for Digital Intelligence
2
2
 
3
- ![agents-header](https://github.com/user-attachments/assets/f6d99eeb-1803-4495-9c5e-3cf07a37b402)
3
+ ![npm install agents](../../assets/npm-install-agents.svg)
4
4
 
5
5
  Welcome to a new chapter in software development, where AI agents persist, think, and act with purpose. The `agents` framework creates an environment where artificial intelligence can flourish - maintaining state, engaging in meaningful interactions, and evolving over time.
6
6
 
@@ -166,7 +166,7 @@ export class DialogueAgent extends Agent {
166
166
  }
167
167
  ```
168
168
 
169
- #### Client Communion
169
+ #### Client Communication
170
170
 
171
171
  For direct connection to your agent:
172
172
 
@@ -317,24 +317,139 @@ Create meaningful conversations with intelligence:
317
317
  ```ts
318
318
  import { AIChatAgent } from "agents/ai-chat-agent";
319
319
  import { openai } from "@ai-sdk/openai";
320
+ import { streamText, generateText, createDataStreamResponse } from "ai";
320
321
 
321
322
  export class DialogueAgent extends AIChatAgent {
322
323
  async onChatMessage(onFinish) {
324
+ // Option 1: Streaming responses (recommended for real-time interaction)
323
325
  return createDataStreamResponse({
324
326
  execute: async (dataStream) => {
325
327
  const stream = streamText({
326
328
  model: openai("gpt-4o"),
327
329
  messages: this.messages,
328
- onFinish // call onFinish so that messages get saved
330
+ // Optional: onFinish is invoked by the AI SDK when generation completes.
331
+ // Persistence is handled automatically by AIChatAgent after streaming completes.
332
+ onFinish
329
333
  });
330
334
 
331
335
  stream.mergeIntoDataStream(dataStream);
332
336
  }
333
337
  });
338
+
339
+ // Option 2: Non-streaming responses (simpler, but no real-time updates)
340
+ // const result = await generateText({
341
+ // model: openai("gpt-4o"),
342
+ // messages: this.messages,
343
+ // });
344
+ //
345
+ // // For non-streaming with metadata, use toUIMessage:
346
+ // const message = result.toUIMessage({
347
+ // metadata: {
348
+ // model: 'gpt-4o',
349
+ // totalTokens: result.usage?.totalTokens,
350
+ // }
351
+ // });
352
+ //
353
+ // return new Response(JSON.stringify(message), {
354
+ // headers: { 'Content-Type': 'application/json' }
355
+ // });
356
+ }
357
+ }
358
+ ```
359
+
360
+ #### Metadata Support
361
+
362
+ The AI SDK provides native support for message metadata through the `messageMetadata` callback. This allows you to attach custom information to messages at the message level.
363
+
364
+ ##### AIChatAgent Integration
365
+
366
+ In the context of `AIChatAgent`, you can use metadata like this:
367
+
368
+ ```typescript
369
+ import { AIChatAgent } from "agents/ai-chat-agent";
370
+ import { streamText } from "ai";
371
+ import { openai } from "@ai-sdk/openai";
372
+
373
+ export class MyAgent extends AIChatAgent<Env> {
374
+ async onChatMessage(onFinish) {
375
+ const startTime = Date.now();
376
+
377
+ const result = streamText({
378
+ model: openai("gpt-4o"),
379
+ messages: this.messages,
380
+ onFinish
381
+ });
382
+
383
+ return result.toUIMessageStreamResponse({
384
+ messageMetadata: ({ part }) => {
385
+ if (part.type === "start") {
386
+ return {
387
+ model: "gpt-4o",
388
+ createdAt: Date.now(),
389
+ messageCount: this.messages.length
390
+ };
391
+ }
392
+ if (part.type === "finish") {
393
+ return {
394
+ responseTime: Date.now() - startTime,
395
+ totalTokens: part.totalUsage?.totalTokens
396
+ };
397
+ }
398
+ }
399
+ });
334
400
  }
335
401
  }
336
402
  ```
337
403
 
404
+ ##### Accessing Metadata on the Client
405
+
406
+ Access metadata through the `message.metadata` property:
407
+
408
+ ```typescript
409
+ 'use client';
410
+
411
+ import { useChat } from '@ai-sdk/react';
412
+ import { DefaultChatTransport } from 'ai';
413
+ import type { MyUIMessage } from '@/types';
414
+
415
+ export default function Chat() {
416
+ const { messages } = useChat<MyUIMessage>({
417
+ transport: new DefaultChatTransport({
418
+ api: '/api/chat',
419
+ }),
420
+ });
421
+
422
+ return (
423
+ <div>
424
+ {messages.map(message => (
425
+ <div key={message.id}>
426
+ <div>
427
+ {message.role === 'user' ? 'User: ' : 'AI: '}
428
+ {message.metadata?.createdAt && (
429
+ <span className="text-sm text-gray-500">
430
+ {new Date(message.metadata.createdAt).toLocaleTimeString()}
431
+ </span>
432
+ )}
433
+ </div>
434
+ {/* Render message content */}
435
+ {message.parts.map((part, index) =>
436
+ part.type === 'text' ? <div key={index}>{part.text}</div> : null,
437
+ )}
438
+ {/* Display additional metadata */}
439
+ {message.metadata?.totalTokens && (
440
+ <div className="text-xs text-gray-400">
441
+ {message.metadata.totalTokens} tokens
442
+ </div>
443
+ )}
444
+ </div>
445
+ ))}
446
+ </div>
447
+ );
448
+ }
449
+ ```
450
+
451
+ For more details, see the [AI SDK Message Metadata documentation](https://ai-sdk.dev/docs/ai-sdk-ui/message-metadata).
452
+
338
453
  #### Creating the Interface
339
454
 
340
455
  Connect with your agent through a React interface:
@@ -363,7 +478,14 @@ function ChatInterface() {
363
478
  {messages.map((message) => (
364
479
  <div key={message.id} className="message">
365
480
  <div className="role">{message.role}</div>
366
- <div className="content">{message.content}</div>
481
+ <div className="content">
482
+ {message.parts.map((part, i) => {
483
+ if (part.type === "text")
484
+ return <span key={i}>{part.text}</span>;
485
+ // Render other part types (e.g., files, tool calls) as desired
486
+ return null;
487
+ })}
488
+ </div>
367
489
  </div>
368
490
  ))}
369
491
  </div>
@@ -393,6 +515,127 @@ This creates:
393
515
  - Intuitive input handling
394
516
  - Easy conversation reset
395
517
 
518
+ #### Client-Defined Tools
519
+
520
+ For scenarios where each client needs to register its own tools dynamically (e.g., embeddable chat widgets), use the `tools` option with `execute` functions.
521
+
522
+ Tools with an `execute` function are automatically:
523
+
524
+ 1. Sent to the server as schemas with each request
525
+ 2. Executed on the client when the AI model calls them
526
+
527
+ ##### Client-Side Tool Definition
528
+
529
+ ```tsx
530
+ import { useAgent } from "agents/react";
531
+ import { useAgentChat, type AITool } from "agents/ai-react";
532
+
533
+ // Define tools outside component to avoid recreation on every render
534
+ const tools: Record<string, AITool> = {
535
+ showAlert: {
536
+ description: "Shows an alert dialog to the user",
537
+ parameters: {
538
+ type: "object",
539
+ properties: { message: { type: "string" } },
540
+ required: ["message"]
541
+ },
542
+ execute: async (input) => {
543
+ const { message } = input as { message: string };
544
+ alert(message);
545
+ return { success: true };
546
+ }
547
+ },
548
+ changeBackgroundColor: {
549
+ description: "Changes the page background color",
550
+ parameters: {
551
+ type: "object",
552
+ properties: { color: { type: "string" } }
553
+ },
554
+ execute: async (input) => {
555
+ const { color } = input as { color: string };
556
+ document.body.style.backgroundColor = color;
557
+ return { success: true, color };
558
+ }
559
+ }
560
+ };
561
+
562
+ function EmbeddableChat() {
563
+ const agent = useAgent({ agent: "chat-widget" });
564
+
565
+ const { messages, input, handleInputChange, handleSubmit } = useAgentChat({
566
+ agent,
567
+ tools // Schema + execute in one place
568
+ });
569
+
570
+ return (
571
+ <div className="chat-widget">
572
+ {messages.map((message) => (
573
+ <div key={message.id}>{/* Render message */}</div>
574
+ ))}
575
+ <form onSubmit={handleSubmit}>
576
+ <input value={input} onChange={handleInputChange} />
577
+ </form>
578
+ </div>
579
+ );
580
+ }
581
+ ```
582
+
583
+ ##### Server-Side Tool Handling
584
+
585
+ On the server, use `createToolsFromClientSchemas` to convert client tool schemas to AI SDK format:
586
+
587
+ ```typescript
588
+ import {
589
+ AIChatAgent,
590
+ createToolsFromClientSchemas
591
+ } from "agents/ai-chat-agent";
592
+ import { openai } from "@ai-sdk/openai";
593
+ import { streamText, convertToModelMessages } from "ai";
594
+
595
+ export class ChatWidget extends AIChatAgent {
596
+ async onChatMessage(onFinish, options) {
597
+ const result = streamText({
598
+ model: openai("gpt-4o"),
599
+ messages: convertToModelMessages(this.messages),
600
+ tools: {
601
+ // Server-side tools (execute on server)
602
+ getWeather: tool({
603
+ description: "Get weather for a city",
604
+ parameters: z.object({ city: z.string() }),
605
+ execute: async ({ city }) => fetchWeather(city)
606
+ }),
607
+ // Client-side tools (sent back to client for execution)
608
+ ...createToolsFromClientSchemas(options?.clientTools)
609
+ },
610
+ onFinish
611
+ });
612
+ return result.toUIMessageStreamResponse();
613
+ }
614
+ }
615
+ ```
616
+
617
+ ##### Advanced: Custom Request Data
618
+
619
+ For additional control (custom headers, dynamic context), use `prepareSendMessagesRequest`:
620
+
621
+ ```tsx
622
+ const { messages, handleSubmit } = useAgentChat({
623
+ agent,
624
+ tools, // Tool schemas auto-extracted and sent
625
+ prepareSendMessagesRequest: ({ id, messages }) => ({
626
+ body: {
627
+ // Add dynamic context alongside auto-extracted tool schemas
628
+ currentUrl: window.location.href,
629
+ userTimezone: Intl.DateTimeFormat().resolvedOptions().timeZone
630
+ },
631
+ headers: {
632
+ "X-Widget-Version": "1.0.0",
633
+ "X-Request-ID": crypto.randomUUID()
634
+ }
635
+ })
636
+ });
637
+ ```
638
+
396
639
  ### 🔗 MCP (Model Context Protocol) Integration
397
640
 
398
641
  Agents can seamlessly integrate with the Model Context Protocol, allowing them to act as both MCP servers (providing tools to AI assistants) and MCP clients (using tools from other services).
@@ -427,10 +670,12 @@ export class MyMCP extends McpAgent<Env, State, {}> {
427
670
  };
428
671
  });
429
672
 
430
- this.server.tool(
673
+ this.server.registerTool(
431
674
  "add",
432
- "Add to the counter, stored in the MCP",
433
- { a: z.number() },
675
+ {
676
+ description: "Add to the counter, stored in the MCP",
677
+ inputSchema: { a: z.number() }
678
+ },
434
679
  async ({ a }) => {
435
680
  this.setState({ ...this.state, counter: this.state.counter + a });
436
681
 
@@ -488,7 +733,7 @@ import { generateText } from "ai";
488
733
  // Convert MCP tools for AI use
489
734
  const result = await generateText({
490
735
  model: openai("gpt-4"),
491
- tools: client.unstable_getAITools(),
736
+ tools: client.getAITools(),
492
737
  prompt: "What's the weather in Tokyo?"
493
738
  });
494
739
  ```
@@ -524,8 +769,8 @@ Welcome to the future of intelligent agents. Create something meaningful. 🌟
524
769
  Contributions are welcome, but are especially welcome when:
525
770
 
526
771
  - You have opened an issue as a Request for Comment (RFC) to discuss your proposal, show your thinking, and iterate together.
527
- - Is not "AI slop": LLMs are powerful tools, but contributions entirely authored by vibe coding are unlikely to meet the quality bar, and will be rejected.
528
- - You're willing to accept feedback and make sure the changes fit the goals of the `agents` sdk. Not everything will, and that's OK.
772
+ - Not "AI slop": LLMs are powerful tools, but contributions entirely authored by vibe coding are unlikely to meet the quality bar, and will be rejected.
773
+ - You're willing to accept feedback and make sure the changes fit the goals of the `agents` SDK. Not everything will, and that's OK.
529
774
 
530
775
  Small fixes, type bugs, and documentation improvements can be raised directly as PRs.
531
776
 
@@ -1,61 +1,262 @@
1
- import { Message, StreamTextOnFinishCallback, ToolSet } from "ai";
2
- import { A as Agent, a as AgentContext } from "./index-BCJclX6q.js";
3
- import { Connection, WSMessage } from "partyserver";
4
- import "cloudflare:workers";
5
- import "@modelcontextprotocol/sdk/client/index.js";
6
- import "@modelcontextprotocol/sdk/types.js";
7
- import "./client-DgyzBU_8.js";
8
- import "zod";
9
- import "@modelcontextprotocol/sdk/shared/protocol.js";
10
- import "@modelcontextprotocol/sdk/client/sse.js";
11
- import "@modelcontextprotocol/sdk/client/streamableHttp.js";
12
- import "./mcp/do-oauth-client-provider.js";
13
- import "@modelcontextprotocol/sdk/client/auth.js";
14
- import "@modelcontextprotocol/sdk/shared/auth.js";
1
+ import "./context-DcbQ8o7k.js";
2
+ import "./client-BINtT7y-.js";
3
+ import "./ai-types-0OnT3FHg.js";
4
+ import { n as AgentContext, t as Agent } from "./index-CfZ2mfMI.js";
5
+ import {
6
+ JSONSchema7,
7
+ StreamTextOnFinishCallback,
8
+ Tool,
9
+ ToolSet,
10
+ UIMessage
11
+ } from "ai";
15
12
 
13
+ //#region src/ai-chat-agent.d.ts
14
+
15
+ /**
16
+ * Schema for a client-defined tool sent from the browser.
17
+ * These tools are executed on the client, not the server.
18
+ *
19
+ * Note: Uses `parameters` (JSONSchema7) rather than AI SDK's `inputSchema` (FlexibleSchema)
20
+ * because this is the wire format. Zod schemas cannot be serialized.
21
+ */
22
+ type ClientToolSchema = {
23
+ /** Unique name for the tool */
24
+ name: string;
25
+ /** Human-readable description of what the tool does */
26
+ description?: Tool["description"];
27
+ /** JSON Schema defining the tool's input parameters */
28
+ parameters?: JSONSchema7;
29
+ };
30
+ /**
31
+ * Options passed to the onChatMessage handler.
32
+ */
33
+ type OnChatMessageOptions = {
34
+ /** AbortSignal for cancelling the request */
35
+ abortSignal?: AbortSignal;
36
+ /**
37
+ * Tool schemas sent from the client for dynamic tool registration.
38
+ * These represent tools that will be executed on the client side.
39
+ * Use `createToolsFromClientSchemas()` to convert these to AI SDK tool format.
40
+ */
41
+ clientTools?: ClientToolSchema[];
42
+ };
43
+ /**
44
+ * Converts client tool schemas to AI SDK tool format.
45
+ *
46
+ * These tools have no `execute` function - when the AI model calls them,
47
+ * the tool call is sent back to the client for execution.
48
+ *
49
+ * @param clientTools - Array of tool schemas from the client
50
+ * @returns Record of AI SDK tools that can be spread into your tools object
51
+ */
52
+ declare function createToolsFromClientSchemas(
53
+ clientTools?: ClientToolSchema[]
54
+ ): ToolSet;
16
55
  /**
17
56
  * Extension of Agent with built-in chat capabilities
18
57
  * @template Env Environment type containing bindings
19
58
  */
20
- declare class AIChatAgent<Env = unknown, State = unknown> extends Agent<
21
- Env,
22
- State
23
- > {
59
+ declare class AIChatAgent<
60
+ Env extends Cloudflare.Env = Cloudflare.Env,
61
+ State = unknown
62
+ > extends Agent<Env, State> {
24
63
  /**
25
64
  * Map of message `id`s to `AbortController`s
26
65
  * useful to propagate request cancellation signals for any external calls made by the agent
27
66
  */
28
67
  private _chatMessageAbortControllers;
68
+ /**
69
+ * Currently active stream ID for resumable streaming.
70
+ * Stored in memory for quick access; persisted in stream_metadata table.
71
+ * @internal Protected for testing purposes.
72
+ */
73
+ protected _activeStreamId: string | null;
74
+ /**
75
+ * Request ID associated with the active stream.
76
+ * @internal Protected for testing purposes.
77
+ */
78
+ protected _activeRequestId: string | null;
79
+ /**
80
+ * The message currently being streamed. Used to apply tool results
81
+ * before the message is persisted.
82
+ * @internal
83
+ */
84
+ private _streamingMessage;
85
+ /**
86
+ * Promise that resolves when the current stream completes.
87
+ * Used to wait for message persistence before continuing after tool results.
88
+ * @internal
89
+ */
90
+ private _streamCompletionPromise;
91
+ private _streamCompletionResolve;
92
+ /**
93
+ * Current chunk index for the active stream
94
+ */
95
+ private _streamChunkIndex;
96
+ /**
97
+ * Buffer for stream chunks pending write to SQLite.
98
+ * Chunks are batched and flushed when buffer reaches CHUNK_BUFFER_SIZE.
99
+ */
100
+ private _chunkBuffer;
101
+ /**
102
+ * Lock to prevent concurrent flush operations
103
+ */
104
+ private _isFlushingChunks;
105
+ /**
106
+ * Timestamp of the last cleanup operation for old streams
107
+ */
108
+ private _lastCleanupTime;
29
109
  /** Array of chat messages for the current conversation */
30
- messages: Message[];
110
+ messages: UIMessage[];
31
111
  constructor(ctx: AgentContext, env: Env);
112
+ /**
113
+ * Restore active stream state if the agent was restarted during streaming.
114
+ * Called during construction to recover any interrupted streams.
115
+ * Validates stream freshness to avoid sending stale resume notifications.
116
+ * @internal Protected for testing purposes.
117
+ */
118
+ protected _restoreActiveStream(): void;
119
+ /**
120
+ * Notify a connection about an active stream that can be resumed.
121
+ * The client should respond with CF_AGENT_STREAM_RESUME_ACK to receive chunks.
122
+ * Uses in-memory state for request ID - no extra DB lookup needed.
123
+ * @param connection - The WebSocket connection to notify
124
+ */
125
+ private _notifyStreamResuming;
126
+ /**
127
+ * Send stream chunks to a connection after receiving ACK.
128
+ * @param connection - The WebSocket connection
129
+ * @param streamId - The stream to replay
130
+ * @param requestId - The original request ID
131
+ */
132
+ private _sendStreamChunks;
133
+ /**
134
+ * Buffer a stream chunk for batch write to SQLite.
135
+ * @param streamId - The stream this chunk belongs to
136
+ * @param body - The serialized chunk body
137
+ * @internal Protected for testing purposes.
138
+ */
139
+ protected _storeStreamChunk(streamId: string, body: string): void;
140
+ /**
141
+ * Flush buffered chunks to SQLite in a single batch.
142
+ * Uses a lock to prevent concurrent flush operations.
143
+ * @internal Protected for testing purposes.
144
+ */
145
+ protected _flushChunkBuffer(): void;
146
+ /**
147
+ * Start tracking a new stream for resumable streaming.
148
+ * Creates metadata entry in SQLite and sets up tracking state.
149
+ * @param requestId - The unique ID of the chat request
150
+ * @returns The generated stream ID
151
+ * @internal Protected for testing purposes.
152
+ */
153
+ protected _startStream(requestId: string): string;
154
+ /**
155
+ * Mark a stream as completed and flush any pending chunks.
156
+ * @param streamId - The stream to mark as completed
157
+ * @internal Protected for testing purposes.
158
+ */
159
+ protected _completeStream(streamId: string): void;
160
+ /**
161
+ * Clean up old completed streams if enough time has passed since last cleanup.
162
+ * This prevents database growth while avoiding cleanup overhead on every stream completion.
163
+ */
164
+ private _maybeCleanupOldStreams;
32
165
  private _broadcastChatMessage;
33
- onMessage(connection: Connection, message: WSMessage): Promise<void>;
166
+ private _loadMessagesFromDb;
34
167
  onRequest(request: Request): Promise<Response>;
35
168
  private _tryCatchChat;
36
169
  /**
37
170
  * Handle incoming chat messages and generate a response
38
171
  * @param onFinish Callback to be called when the response is finished
39
- * @param options.signal A signal to pass to any child requests which can be used to cancel them
172
+ * @param options Options including abort signal and client-defined tools
40
173
  * @returns Response to send to the client or undefined
41
174
  */
42
175
  onChatMessage(
43
176
  onFinish: StreamTextOnFinishCallback<ToolSet>,
44
- options?: {
45
- abortSignal: AbortSignal | undefined;
46
- }
177
+ options?: OnChatMessageOptions
47
178
  ): Promise<Response | undefined>;
48
179
  /**
49
- * Save messages on the server side and trigger AI response
180
+ * Save messages on the server side
50
181
  * @param messages Chat messages to save
51
182
  */
52
- saveMessages(messages: Message[]): Promise<void>;
183
+ saveMessages(messages: UIMessage[]): Promise<void>;
53
184
  persistMessages(
54
- messages: Message[],
185
+ messages: UIMessage[],
55
186
  excludeBroadcastIds?: string[]
56
187
  ): Promise<void>;
57
- private _messagesNotAlreadyInAgent;
188
+ /**
189
+ * Merges incoming messages with existing server state.
190
+ * This preserves tool outputs that the server has (via _applyToolResult)
191
+ * but the client doesn't have yet.
192
+ *
193
+ * @param incomingMessages - Messages from the client
194
+ * @returns Messages with server's tool outputs preserved
195
+ */
196
+ private _mergeIncomingWithServerState;
197
+ /**
198
+ * Resolves a message for persistence, handling tool result merging.
199
+ * If the message contains tool parts with output-available state, checks if there's
200
+ * an existing message with the same toolCallId that should be updated instead of
201
+ * creating a duplicate. This prevents the "Duplicate item found" error from OpenAI
202
+ * when client-side tool results arrive in a new request.
203
+ *
204
+ * @param message - The message to potentially merge
205
+ * @returns The message with the correct ID (either original or merged)
206
+ */
207
+ private _resolveMessageForToolMerge;
208
+ /**
209
+ * Finds an existing assistant message that contains a tool part with the given toolCallId.
210
+ * Used to detect when a tool result should update an existing message rather than
211
+ * creating a new one.
212
+ *
213
+ * @param toolCallId - The tool call ID to search for
214
+ * @returns The existing message if found, undefined otherwise
215
+ */
216
+ private _findMessageByToolCallId;
217
+ /**
218
+ * Sanitizes a message for persistence by removing ephemeral provider-specific
219
+ * data that should not be stored or sent back in subsequent requests.
220
+ *
221
+ * This handles two issues with the OpenAI Responses API:
222
+ *
223
+ * 1. **Duplicate item IDs**: The AI SDK's @ai-sdk/openai provider (v2.0.x+)
224
+ * defaults to using OpenAI's Responses API which assigns unique itemIds
225
+ * to each message part. When these IDs are persisted and sent back,
226
+ * OpenAI rejects them as duplicates.
227
+ *
228
+ * 2. **Empty reasoning parts**: OpenAI may return reasoning parts with empty
229
+ * text and encrypted content. These cause "Non-OpenAI reasoning parts are
230
+ * not supported" warnings when sent back via convertToModelMessages().
231
+ *
232
+ * @param message - The message to sanitize
233
+ * @returns A new message with ephemeral provider data removed
234
+ */
235
+ private _sanitizeMessageForPersistence;
236
+ /**
237
+ * Helper to strip OpenAI-specific ephemeral fields from a metadata object.
238
+ * Removes itemId and reasoningEncryptedContent while preserving other fields.
239
+ */
240
+ private _stripOpenAIMetadata;
241
+ /**
242
+ * Applies a tool result to an existing assistant message.
243
+ * This is used when the client sends CF_AGENT_TOOL_RESULT for client-side tools.
244
+ * The server is the source of truth, so we update the message here and broadcast
245
+ * the update to all clients.
246
+ *
247
+ * @param toolCallId - The tool call ID this result is for
248
+ * @param toolName - The name of the tool
249
+ * @param output - The output from the tool execution
250
+ * @returns true if the result was applied, false if the message was not found
251
+ */
252
+ private _applyToolResult;
58
253
  private _reply;
254
+ /**
255
+ * Mark a stream as errored and clean up state.
256
+ * @param streamId - The stream to mark as errored
257
+ * @internal Protected for testing purposes.
258
+ */
259
+ protected _markStreamError(streamId: string): void;
59
260
  /**
60
261
  * For the given message id, look up its associated AbortController
61
262
  * If the AbortController does not exist, create and store one in memory
@@ -76,9 +277,15 @@ declare class AIChatAgent<Env = unknown, State = unknown> extends Agent<
76
277
  */
77
278
  private _destroyAbortControllers;
78
279
  /**
79
- * When the DO is destroyed, cancel all pending requests
280
+ * When the DO is destroyed, cancel all pending requests and clean up resources
80
281
  */
81
282
  destroy(): Promise<void>;
82
283
  }
83
-
84
- export { AIChatAgent };
284
+ //#endregion
285
+ export {
286
+ AIChatAgent,
287
+ ClientToolSchema,
288
+ OnChatMessageOptions,
289
+ createToolsFromClientSchemas
290
+ };
291
+ //# sourceMappingURL=ai-chat-agent.d.ts.map