graphlit-client 1.0.20250610008 → 1.0.20250610010

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,37 @@
2
2
 
3
3
  ## Overview
4
4
 
5
- The Graphlit Client for Node.js enables straightforward interactions with the Graphlit API, allowing developers to execute GraphQL queries and mutations against the Graphlit service. This document outlines the setup process and provides examples of using the client, including the new streaming capabilities.
5
+ The Graphlit Client for Node.js enables straightforward interactions with the Graphlit API, allowing developers to execute GraphQL queries and mutations against the Graphlit service. This document outlines the setup process and provides examples of using the client, including advanced streaming capabilities with real-time token delivery and tool calling support.
6
+
7
+ ## Quick Start
8
+
9
+ Get up and running in 2 minutes:
10
+
11
+ ```bash
12
+ # Install the client
13
+ npm install graphlit-client
14
+
15
+ # Set your credentials (get from https://portal.graphlit.dev)
16
+ export GRAPHLIT_ORGANIZATION_ID=your_org_id
17
+ export GRAPHLIT_ENVIRONMENT_ID=your_env_id
18
+ export GRAPHLIT_JWT_SECRET=your_secret
19
+ ```
20
+
21
+ ```typescript
22
+ import { Graphlit } from "graphlit-client";
23
+
24
+ const client = new Graphlit();
25
+
26
+ // Stream a conversation
27
+ await client.streamAgent(
28
+ "Hello! Tell me a joke",
29
+ (event) => {
30
+ if (event.type === "message_update") {
31
+ console.log(event.message.message);
32
+ }
33
+ }
34
+ );
35
+ ```
6
36
 
7
37
  ## Prerequisites
8
38
 
@@ -102,17 +132,17 @@ const contents = await client.queryContents({
102
132
 
103
133
  ### Streaming Conversations with streamAgent
104
134
 
105
- The new `streamAgent` method provides real-time streaming responses with automatic UI event handling:
135
+ The `streamAgent` method provides real-time streaming responses with automatic UI event handling. It supports both native SDK streaming (when LLM clients are configured) and fallback streaming through the Graphlit API:
106
136
 
107
137
  ```typescript
108
- import { Graphlit, UIStreamEvent } from "graphlit-client";
138
+ import { Graphlit, AgentStreamEvent } from "graphlit-client";
109
139
 
110
140
  const client = new Graphlit();
111
141
 
112
142
  // Basic streaming conversation
113
143
  await client.streamAgent(
114
144
  "Tell me about artificial intelligence",
115
- (event: UIStreamEvent) => {
145
+ (event: AgentStreamEvent) => {
116
146
  switch (event.type) {
117
147
  case "conversation_started":
118
148
  console.log(`Started conversation: ${event.conversationId}`);
@@ -135,9 +165,27 @@ await client.streamAgent(
135
165
  );
136
166
  ```
137
167
 
168
+ ### Non-Streaming Conversations with promptAgent
169
+
170
+ For simpler use cases without streaming, use `promptAgent`:
171
+
172
+ ```typescript
173
+ const result = await client.promptAgent(
174
+ "What's the weather like?",
175
+ undefined, // conversationId (creates new)
176
+ { id: "your-specification-id" }, // specification
177
+ tools, // optional tools array
178
+ toolHandlers, // optional tool handlers
179
+ { timeout: 30000 } // optional timeout
180
+ );
181
+
182
+ console.log(result.message); // Complete response
183
+ console.log(result.conversationId); // For continuing the conversation
184
+ ```
185
+
138
186
  ### Tool Calling with Streaming
139
187
 
140
- `streamAgent` supports tool calling with automatic execution:
188
+ `streamAgent` supports sophisticated tool calling with automatic execution and parallel processing:
141
189
 
142
190
  ```typescript
143
191
  // Define tools
@@ -164,12 +212,15 @@ const toolHandlers = {
164
212
  // Stream with tools
165
213
  await client.streamAgent(
166
214
  "What's the weather in San Francisco?",
167
- (event: UIStreamEvent) => {
215
+ (event: AgentStreamEvent) => {
168
216
  if (event.type === "tool_update") {
169
217
  console.log(`Tool ${event.toolCall.name}: ${event.status}`);
170
218
  if (event.result) {
171
219
  console.log(`Result: ${JSON.stringify(event.result)}`);
172
220
  }
221
+ if (event.error) {
222
+ console.error(`Tool error: ${event.error}`);
223
+ }
173
224
  } else if (event.type === "conversation_completed") {
174
225
  console.log(`Final: ${event.message.message}`);
175
226
  }
@@ -306,17 +357,98 @@ client.streamAgent(
306
357
  setTimeout(() => controller.abort(), 5000);
307
358
  ```
308
359
 
360
+ ## Advanced Tool Calling
361
+
362
+ ### Multiple Tool Calls
363
+
364
+ The agent can make multiple tool calls in a single response:
365
+
366
+ ```typescript
367
+ const tools = [
368
+ {
369
+ name: "search_web",
370
+ description: "Search the web for information",
371
+ schema: JSON.stringify({
372
+ type: "object",
373
+ properties: {
374
+ query: { type: "string" }
375
+ },
376
+ required: ["query"]
377
+ })
378
+ },
379
+ {
380
+ name: "calculate",
381
+ description: "Perform calculations",
382
+ schema: JSON.stringify({
383
+ type: "object",
384
+ properties: {
385
+ expression: { type: "string" }
386
+ },
387
+ required: ["expression"]
388
+ })
389
+ }
390
+ ];
391
+
392
+ const toolHandlers = {
393
+ search_web: async ({ query }) => {
394
+ // Implement web search
395
+ return { results: ["Result 1", "Result 2"] };
396
+ },
397
+ calculate: async ({ expression }) => {
398
+ // Implement calculation
399
+ return { result: eval(expression) }; // Use a proper math parser in production
400
+ }
401
+ };
402
+
403
+ // The agent can use multiple tools to answer complex queries
404
+ await client.streamAgent(
405
+ "Search for the current GDP of Japan and calculate 2.5% of it",
406
+ handleStreamEvent,
407
+ undefined,
408
+ { id: specId },
409
+ tools,
410
+ toolHandlers,
411
+ { maxToolRounds: 5 } // Allow multiple rounds of tool calls
412
+ );
413
+ ```
414
+
415
+ ### Tool Calling with Timeouts
416
+
417
+ Protect against hanging tools with timeouts:
418
+
419
+ ```typescript
420
+ const toolHandlers = {
421
+ slow_operation: async (args) => {
422
+ // This will timeout after 30 seconds (default)
423
+ await someSlowOperation(args);
424
+ }
425
+ };
426
+
427
+ // Or set custom timeout in AgentOptions
428
+ await client.promptAgent(
429
+ "Run the slow operation",
430
+ undefined,
431
+ { id: specId },
432
+ tools,
433
+ toolHandlers,
434
+ {
435
+ timeout: 60000, // 60 second timeout for entire operation
436
+ maxToolRounds: 3
437
+ }
438
+ );
439
+ ```
440
+
309
441
  ## Stream Event Reference
310
442
 
311
- ### UI Stream Events
443
+ ### Agent Stream Events
312
444
 
313
445
  | Event Type | Description | Properties |
314
446
  |------------|-------------|------------|
315
- | `conversation_started` | Conversation initialized | `conversationId`, `timestamp` |
447
+ | `conversation_started` | Conversation initialized | `conversationId`, `timestamp`, `model?` |
316
448
  | `message_update` | Message text updated | `message` (complete text), `isStreaming` |
317
449
  | `tool_update` | Tool execution status | `toolCall`, `status`, `result?`, `error?` |
318
450
  | `conversation_completed` | Streaming finished | `message` (final) |
319
- | `error` | Error occurred | `error` object with `message`, `code`, `recoverable` |
451
+ | `error` | Error occurred | `error` object with `message`, `code?`, `recoverable` |
320
452
 
321
453
  ### Tool Execution Statuses
322
454
 
@@ -405,12 +537,111 @@ await client.streamAgent(
405
537
  );
406
538
  ```
407
539
 
540
+ ## Best Practices
541
+
542
+ ### 1. Always Handle Errors
543
+
544
+ ```typescript
545
+ await client.streamAgent(
546
+ prompt,
547
+ (event) => {
548
+ if (event.type === "error") {
549
+ // Check if error is recoverable
550
+ if (event.error.recoverable) {
551
+ // Could retry or fallback
552
+ console.warn("Recoverable error:", event.error.message);
553
+ } else {
554
+ // Fatal error
555
+ console.error("Fatal error:", event.error.message);
556
+ }
557
+ }
558
+ }
559
+ );
560
+ ```
561
+
562
+ ### 2. Clean Up Resources
563
+
564
+ ```typescript
565
+ // Always clean up conversations when done
566
+ let conversationId: string;
567
+
568
+ try {
569
+ await client.streamAgent(
570
+ prompt,
571
+ (event) => {
572
+ if (event.type === "conversation_started") {
573
+ conversationId = event.conversationId;
574
+ }
575
+ }
576
+ );
577
+ } finally {
578
+ if (conversationId) {
579
+ await client.deleteConversation(conversationId);
580
+ }
581
+ }
582
+ ```
583
+
584
+ ### 3. Tool Handler Best Practices
585
+
586
+ ```typescript
587
+ const toolHandlers = {
588
+ // Always validate inputs
589
+ calculate: async (args) => {
590
+ if (!args.expression || typeof args.expression !== 'string') {
591
+ throw new Error("Invalid expression");
592
+ }
593
+
594
+ // Return structured data
595
+ return {
596
+ expression: args.expression,
597
+ result: evaluateExpression(args.expression),
598
+ timestamp: new Date().toISOString()
599
+ };
600
+ },
601
+
602
+ // Handle errors gracefully
603
+ fetch_data: async (args) => {
604
+ try {
605
+ const data = await fetchFromAPI(args.url);
606
+ return { success: true, data };
607
+ } catch (error) {
608
+ return {
609
+ success: false,
610
+ error: error.message,
611
+ // Help the LLM understand what went wrong
612
+ suggestion: "The URL might be invalid or the service is down"
613
+ };
614
+ }
615
+ }
616
+ };
617
+ ```
618
+
619
+ ### 4. Optimize for Performance
620
+
621
+ ```typescript
622
+ // Use appropriate chunking strategy for your use case
623
+ const options = {
624
+ // For code generation or technical content
625
+ chunkingStrategy: 'character' as const,
626
+
627
+ // For natural conversation (default)
628
+ chunkingStrategy: 'word' as const,
629
+
630
+ // For long-form content
631
+ chunkingStrategy: 'sentence' as const,
632
+
633
+ // Adjust smoothing delay for your UI
634
+ smoothingDelay: 20, // Faster updates
635
+ smoothingDelay: 50, // Smoother updates (default: 30)
636
+ };
637
+ ```
638
+
408
639
  ## Migration Guide
409
640
 
410
- If you're upgrading from `promptConversation` to `streamAgent`:
641
+ If you're upgrading from `promptConversation` to `streamAgent` or `promptAgent`:
411
642
 
412
643
  ```typescript
413
- // Before
644
+ // Before (v1.0)
414
645
  const response = await client.promptConversation(
415
646
  "Your prompt",
416
647
  undefined,
@@ -418,7 +649,15 @@ const response = await client.promptConversation(
418
649
  );
419
650
  console.log(response.promptConversation.message.message);
420
651
 
421
- // After
652
+ // After (v1.1) - Non-streaming
653
+ const result = await client.promptAgent(
654
+ "Your prompt",
655
+ undefined,
656
+ { id: specId }
657
+ );
658
+ console.log(result.message);
659
+
660
+ // After (v1.1) - Streaming
422
661
  await client.streamAgent(
423
662
  "Your prompt",
424
663
  (event) => {
@@ -431,6 +670,69 @@ await client.streamAgent(
431
670
  );
432
671
  ```
433
672
 
673
+ ## Troubleshooting
674
+
675
+ ### Common Issues
676
+
677
+ #### 1. Streaming Not Working
678
+
679
+ ```typescript
680
+ // Check if streaming is supported
681
+ if (!client.supportsStreaming()) {
682
+ console.log("Streaming not supported - using fallback mode");
683
+ }
684
+
685
+ // Ensure LLM clients are properly configured
686
+ const hasNativeStreaming =
687
+ client.hasOpenAIClient() ||
688
+ client.hasAnthropicClient() ||
689
+ client.hasGoogleClient();
690
+ ```
691
+
692
+ #### 2. Tool Calls Not Executing
693
+
694
+ ```typescript
695
+ // Ensure tool schemas are valid JSON Schema
696
+ const validSchema = {
697
+ type: "object",
698
+ properties: {
699
+ param: { type: "string", description: "Parameter description" }
700
+ },
701
+ required: ["param"] // Don't forget required fields
702
+ };
703
+
704
+ // Tool names must match exactly
705
+ const tools = [{ name: "my_tool", /* ... */ }];
706
+ const toolHandlers = {
707
+ "my_tool": async (args) => { /* ... */ } // Name must match
708
+ };
709
+ ```
710
+
711
+ #### 3. Incomplete Streaming Responses
712
+
713
+ Some LLM providers may truncate responses. The client handles this automatically, but you can enable debug logging:
714
+
715
+ ```bash
716
+ DEBUG_STREAMING=true npm start
717
+ ```
718
+
719
+ #### 4. Type Errors
720
+
721
+ Ensure you're importing the correct types:
722
+
723
+ ```typescript
724
+ import {
725
+ Graphlit,
726
+ AgentStreamEvent, // For streaming events
727
+ AgentResult, // For promptAgent results
728
+ ToolHandler, // For tool handler functions
729
+ StreamAgentOptions // For streaming options
730
+ } from "graphlit-client";
731
+
732
+ // Also available if needed
733
+ import * as Types from "graphlit-client/generated/graphql-types";
734
+ ```
735
+
434
736
  ## Support
435
737
 
436
738
  Please refer to the [Graphlit API Documentation](https://docs.graphlit.dev/).
package/dist/client.d.ts CHANGED
@@ -2,40 +2,6 @@ import { ApolloClient, NormalizedCacheObject } from "@apollo/client/core";
2
2
  import * as Types from "./generated/graphql-types.js";
3
3
  import { AgentOptions, AgentResult, StreamAgentOptions, ToolHandler } from "./types/agent.js";
4
4
  import { AgentStreamEvent } from "./types/ui-events.js";
5
- export type StreamEvent = {
6
- type: "start";
7
- conversationId: string;
8
- } | {
9
- type: "token";
10
- token: string;
11
- } | {
12
- type: "message";
13
- message: string;
14
- } | {
15
- type: "tool_call_start";
16
- toolCall: {
17
- id: string;
18
- name: string;
19
- };
20
- } | {
21
- type: "tool_call_delta";
22
- toolCallId: string;
23
- argumentDelta: string;
24
- } | {
25
- type: "tool_call_complete";
26
- toolCall: {
27
- id: string;
28
- name: string;
29
- arguments: string;
30
- };
31
- } | {
32
- type: "complete";
33
- messageId?: string;
34
- conversationId?: string;
35
- } | {
36
- type: "error";
37
- error: string;
38
- };
39
5
  export type { AgentOptions, AgentResult, StreamAgentOptions, ToolCallResult, UsageInfo, AgentError, } from "./types/agent.js";
40
6
  export type { AgentStreamEvent } from "./types/ui-events.js";
41
7
  declare class Graphlit {
@@ -376,7 +342,7 @@ declare class Graphlit {
376
342
  * @param options - Stream agent options
377
343
  * @throws Error if streaming is not supported
378
344
  */
379
- streamAgent(prompt: string, onEvent: (event: StreamEvent | AgentStreamEvent) => void, conversationId?: string, specification?: Types.EntityReferenceInput, tools?: Types.ToolDefinitionInput[], toolHandlers?: Record<string, ToolHandler>, options?: StreamAgentOptions, mimeType?: string, data?: string, // base64 encoded
345
+ streamAgent(prompt: string, onEvent: (event: AgentStreamEvent) => void, conversationId?: string, specification?: Types.EntityReferenceInput, tools?: Types.ToolDefinitionInput[], toolHandlers?: Record<string, ToolHandler>, options?: StreamAgentOptions, mimeType?: string, data?: string, // base64 encoded
380
346
  correlationId?: string): Promise<void>;
381
347
  /**
382
348
  * Execute the streaming agent workflow with tool calling loop
@@ -417,4 +383,3 @@ declare class Graphlit {
417
383
  }
418
384
  export { Graphlit };
419
385
  export * as Types from "./generated/graphql-types.js";
420
- export { StreamEventAggregator, AggregatedEvent, formatSSEEvent, createSSEStream, wrapToolHandlers, enhanceToolCalls, ConversationMetrics, ToolResultEmitter, ServerMapping, } from "./stream-helpers.js";
package/dist/client.js CHANGED
@@ -2113,5 +2113,3 @@ class Graphlit {
2113
2113
  }
2114
2114
  export { Graphlit };
2115
2115
  export * as Types from "./generated/graphql-types.js";
2116
- // Export streaming helpers
2117
- export { StreamEventAggregator, formatSSEEvent, createSSEStream, wrapToolHandlers, enhanceToolCalls, ConversationMetrics, } from "./stream-helpers.js";
@@ -1,6 +1,6 @@
1
1
  import { ConversationToolCall, Specification, ToolDefinitionInput } from "../generated/graphql-types.js";
2
- import { StreamEvent } from "../client.js";
3
2
  import { OpenAIMessage, AnthropicMessage, GoogleMessage } from "./llm-formatters.js";
3
+ import { StreamEvent } from "../types/internal.js";
4
4
  /**
5
5
  * Stream with OpenAI SDK
6
6
  */
@@ -1,5 +1,5 @@
1
1
  import { AgentStreamEvent } from "../types/ui-events.js";
2
- import { StreamEvent } from "../client.js";
2
+ import { StreamEvent } from "../types/internal.js";
3
3
  import { ChunkingStrategy } from "./chunk-buffer.js";
4
4
  /**
5
5
  * Adapter that transforms low-level streaming events into high-level UI events
@@ -0,0 +1,42 @@
1
+ /**
2
+ * Internal types used by the streaming implementation
3
+ * These are not exported to consumers of the library
4
+ */
5
+ /**
6
+ * Low-level streaming events used internally by providers
7
+ * These get transformed into AgentStreamEvent by UIEventAdapter
8
+ */
9
+ export type StreamEvent = {
10
+ type: "start";
11
+ conversationId: string;
12
+ } | {
13
+ type: "token";
14
+ token: string;
15
+ } | {
16
+ type: "message";
17
+ message: string;
18
+ } | {
19
+ type: "tool_call_start";
20
+ toolCall: {
21
+ id: string;
22
+ name: string;
23
+ };
24
+ } | {
25
+ type: "tool_call_delta";
26
+ toolCallId: string;
27
+ argumentDelta: string;
28
+ } | {
29
+ type: "tool_call_complete";
30
+ toolCall: {
31
+ id: string;
32
+ name: string;
33
+ arguments: string;
34
+ };
35
+ } | {
36
+ type: "complete";
37
+ messageId?: string;
38
+ conversationId?: string;
39
+ } | {
40
+ type: "error";
41
+ error: string;
42
+ };
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Internal types used by the streaming implementation
3
+ * These are not exported to consumers of the library
4
+ */
5
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250610008",
3
+ "version": "1.0.20250610010",
4
4
  "description": "Graphlit API Client for TypeScript",
5
5
  "main": "dist/client.js",
6
6
  "types": "dist/client.d.ts",
@@ -1,106 +0,0 @@
1
- import { StreamEvent } from "./client.js";
2
- import { ConversationRoleTypes } from "./generated/graphql-types.js";
3
- export declare class StreamEventAggregator {
4
- private conversationId;
5
- private messageBuffer;
6
- private toolCallsBuffer;
7
- private isFirstAssistantMessage;
8
- private hasReceivedToolCalls;
9
- private tokenBuffer;
10
- /**
11
- * Process a stream event and return any complete messages ready for the UI
12
- */
13
- processEvent(event: StreamEvent): AggregatedEvent | null;
14
- /**
15
- * Reset the aggregator for a new conversation
16
- */
17
- reset(): void;
18
- /**
19
- * Get current state (useful for debugging)
20
- */
21
- getState(): {
22
- conversationId: string;
23
- messageBuffer: string;
24
- toolCallsCount: number;
25
- hasReceivedToolCalls: boolean;
26
- isFirstAssistantMessage: boolean;
27
- tokenCount: number;
28
- };
29
- }
30
- /**
31
- * Aggregated event types that are ready for UI consumption
32
- */
33
- export type AggregatedEvent = {
34
- type: "conversationStarted";
35
- conversationId: string;
36
- } | {
37
- type: "token";
38
- token: string;
39
- accumulated: string;
40
- } | {
41
- type: "assistantMessage";
42
- message: {
43
- message?: string | null;
44
- role?: ConversationRoleTypes | null;
45
- toolCalls?: any[];
46
- };
47
- isFinal: boolean;
48
- conversationId?: string;
49
- } | {
50
- type: "streamComplete";
51
- conversationId?: string;
52
- } | {
53
- type: "error";
54
- error: string;
55
- };
56
- /**
57
- * Helper to create an SSE response with proper formatting
58
- */
59
- export declare function formatSSEEvent(data: any, eventName?: string): string;
60
- /**
61
- * Helper to create a TransformStream for SSE with built-in ping support
62
- */
63
- export declare function createSSEStream(options?: {
64
- pingInterval?: number;
65
- }): {
66
- readable: ReadableStream<any>;
67
- sendEvent: (data: any, eventName?: string) => Promise<void>;
68
- close: () => Promise<void>;
69
- writer: WritableStreamDefaultWriter<any>;
70
- };
71
- /**
72
- * Helper to wrap tool handlers with result emission
73
- */
74
- export interface ToolResultEmitter {
75
- (toolCallId: string, result: any, status: "complete" | "error" | "blocked", duration: number): void;
76
- }
77
- export declare function wrapToolHandlers(handlers: Record<string, (args: any) => Promise<any>>, emitResult: ToolResultEmitter): Record<string, (args: any) => Promise<any>>;
78
- /**
79
- * Helper to enhance tool calls with server information
80
- */
81
- export interface ServerMapping {
82
- toolName: string;
83
- serverName: string;
84
- serverId: string;
85
- }
86
- export declare function enhanceToolCalls(toolCalls: any[], serverMappings: ServerMapping[]): any[];
87
- /**
88
- * Helper to track conversation metrics
89
- */
90
- export declare class ConversationMetrics {
91
- private startTime;
92
- private tokenCount;
93
- private toolCallCount;
94
- private errorCount;
95
- recordToken(): void;
96
- recordToolCall(): void;
97
- recordError(): void;
98
- getMetrics(): {
99
- duration: number;
100
- tokenCount: number;
101
- toolCallCount: number;
102
- errorCount: number;
103
- tokensPerSecond: number;
104
- };
105
- reset(): void;
106
- }
@@ -1,237 +0,0 @@
1
- import { ConversationRoleTypes, } from "./generated/graphql-types.js";
2
- export class StreamEventAggregator {
3
- conversationId = "";
4
- messageBuffer = "";
5
- toolCallsBuffer = new Map();
6
- isFirstAssistantMessage = true;
7
- hasReceivedToolCalls = false;
8
- tokenBuffer = [];
9
- /**
10
- * Process a stream event and return any complete messages ready for the UI
11
- */
12
- processEvent(event) {
13
- switch (event.type) {
14
- case "start":
15
- this.conversationId = event.conversationId;
16
- return {
17
- type: "conversationStarted",
18
- conversationId: event.conversationId,
19
- };
20
- case "token":
21
- this.messageBuffer += event.token;
22
- this.tokenBuffer.push(event.token);
23
- return {
24
- type: "token",
25
- token: event.token,
26
- accumulated: this.messageBuffer,
27
- };
28
- case "message":
29
- // SDK provides accumulated message - we can use this instead of our buffer
30
- this.messageBuffer = event.message;
31
- return null; // Don't emit, wait for complete event
32
- case "tool_call_start":
33
- this.hasReceivedToolCalls = true;
34
- this.toolCallsBuffer.set(event.toolCall.id, {
35
- id: event.toolCall.id,
36
- name: event.toolCall.name,
37
- argumentsBuffer: "",
38
- isComplete: false,
39
- startTime: Date.now(),
40
- });
41
- return null; // Buffer until complete
42
- case "tool_call_delta":
43
- const toolCall = this.toolCallsBuffer.get(event.toolCallId);
44
- if (toolCall) {
45
- toolCall.argumentsBuffer += event.argumentDelta;
46
- }
47
- return null; // Buffer until complete
48
- case "tool_call_complete":
49
- const completeToolCall = this.toolCallsBuffer.get(event.toolCall.id);
50
- if (completeToolCall) {
51
- completeToolCall.argumentsBuffer = event.toolCall.arguments;
52
- completeToolCall.isComplete = true;
53
- }
54
- // Check if all tool calls are complete
55
- const allComplete = Array.from(this.toolCallsBuffer.values()).every((tc) => tc.isComplete);
56
- if (allComplete &&
57
- this.hasReceivedToolCalls &&
58
- this.isFirstAssistantMessage) {
59
- // Emit complete assistant message with all tool calls
60
- const toolCalls = Array.from(this.toolCallsBuffer.values()).map((tc) => ({
61
- id: tc.id,
62
- name: tc.name,
63
- arguments: tc.argumentsBuffer,
64
- status: "pending",
65
- }));
66
- this.isFirstAssistantMessage = false;
67
- return {
68
- type: "assistantMessage",
69
- message: {
70
- message: this.messageBuffer,
71
- role: ConversationRoleTypes.Assistant,
72
- toolCalls,
73
- },
74
- isFinal: false,
75
- };
76
- }
77
- return null;
78
- case "complete":
79
- // If we haven't sent a message yet (no tool calls), send it now
80
- if (this.isFirstAssistantMessage && !this.hasReceivedToolCalls) {
81
- return {
82
- type: "assistantMessage",
83
- message: {
84
- message: this.messageBuffer,
85
- role: ConversationRoleTypes.Assistant,
86
- },
87
- isFinal: true,
88
- conversationId: event.conversationId,
89
- };
90
- }
91
- return { type: "streamComplete", conversationId: event.conversationId };
92
- case "error":
93
- return { type: "error", error: event.error };
94
- default:
95
- return null;
96
- }
97
- }
98
- /**
99
- * Reset the aggregator for a new conversation
100
- */
101
- reset() {
102
- this.conversationId = "";
103
- this.messageBuffer = "";
104
- this.toolCallsBuffer.clear();
105
- this.isFirstAssistantMessage = true;
106
- this.hasReceivedToolCalls = false;
107
- this.tokenBuffer = [];
108
- }
109
- /**
110
- * Get current state (useful for debugging)
111
- */
112
- getState() {
113
- return {
114
- conversationId: this.conversationId,
115
- messageBuffer: this.messageBuffer,
116
- toolCallsCount: this.toolCallsBuffer.size,
117
- hasReceivedToolCalls: this.hasReceivedToolCalls,
118
- isFirstAssistantMessage: this.isFirstAssistantMessage,
119
- tokenCount: this.tokenBuffer.length,
120
- };
121
- }
122
- }
123
- /**
124
- * Helper to create an SSE response with proper formatting
125
- */
126
- export function formatSSEEvent(data, eventName = "message") {
127
- if (typeof data === "string") {
128
- return `event: ${eventName}\ndata: ${data}\n\n`;
129
- }
130
- return `event: ${eventName}\ndata: ${JSON.stringify(data)}\n\n`;
131
- }
132
- /**
133
- * Helper to create a TransformStream for SSE with built-in ping support
134
- */
135
- export function createSSEStream(options) {
136
- const encoder = new TextEncoder();
137
- const { readable, writable } = new TransformStream();
138
- const writer = writable.getWriter();
139
- let pingInterval = null;
140
- if (options?.pingInterval) {
141
- pingInterval = globalThis.setInterval(() => {
142
- writer.write(encoder.encode(":\n\n")).catch(() => {
143
- // Ignore errors on ping
144
- });
145
- }, options.pingInterval);
146
- }
147
- const sendEvent = (data, eventName = "message") => {
148
- const formatted = formatSSEEvent(data, eventName);
149
- return writer.write(encoder.encode(formatted));
150
- };
151
- const close = async () => {
152
- if (pingInterval) {
153
- globalThis.clearInterval(pingInterval);
154
- }
155
- await writer.close();
156
- };
157
- return {
158
- readable,
159
- sendEvent,
160
- close,
161
- writer,
162
- };
163
- }
164
- export function wrapToolHandlers(handlers, emitResult) {
165
- const wrapped = {};
166
- Object.entries(handlers).forEach(([name, handler]) => {
167
- wrapped[name] = async (args) => {
168
- const toolCallId = `tool_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
169
- const startTime = Date.now();
170
- try {
171
- const result = await handler(args);
172
- const duration = Date.now() - startTime;
173
- // Emit success result
174
- emitResult(toolCallId, { status: "success", result }, "complete", duration);
175
- return result;
176
- }
177
- catch (error) {
178
- const duration = Date.now() - startTime;
179
- // Emit error result
180
- emitResult(toolCallId, {
181
- status: "error",
182
- error: error instanceof Error ? error.message : String(error),
183
- }, "error", duration);
184
- throw error;
185
- }
186
- };
187
- });
188
- return wrapped;
189
- }
190
- export function enhanceToolCalls(toolCalls, serverMappings) {
191
- const mappingDict = serverMappings.reduce((acc, mapping) => {
192
- acc[mapping.toolName] = {
193
- serverName: mapping.serverName,
194
- serverId: mapping.serverId,
195
- };
196
- return acc;
197
- }, {});
198
- return toolCalls.map((toolCall) => ({
199
- ...toolCall,
200
- serverName: mappingDict[toolCall.name]?.serverName,
201
- serverId: mappingDict[toolCall.name]?.serverId,
202
- }));
203
- }
204
- /**
205
- * Helper to track conversation metrics
206
- */
207
- export class ConversationMetrics {
208
- startTime = Date.now();
209
- tokenCount = 0;
210
- toolCallCount = 0;
211
- errorCount = 0;
212
- recordToken() {
213
- this.tokenCount++;
214
- }
215
- recordToolCall() {
216
- this.toolCallCount++;
217
- }
218
- recordError() {
219
- this.errorCount++;
220
- }
221
- getMetrics() {
222
- const duration = Date.now() - this.startTime;
223
- return {
224
- duration,
225
- tokenCount: this.tokenCount,
226
- toolCallCount: this.toolCallCount,
227
- errorCount: this.errorCount,
228
- tokensPerSecond: this.tokenCount / (duration / 1000),
229
- };
230
- }
231
- reset() {
232
- this.startTime = Date.now();
233
- this.tokenCount = 0;
234
- this.toolCallCount = 0;
235
- this.errorCount = 0;
236
- }
237
- }