graphlit-client 1.0.20250531004 → 1.0.20250610001

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,288 @@
1
+ import { ConversationRoleTypes, } from "../generated/graphql-types.js";
2
+ import { ChunkBuffer } from "./chunk-buffer.js";
3
+ /**
4
+ * Adapter that transforms low-level streaming events into high-level UI events
5
+ * using GraphQL types for type safety
6
+ */
7
+ export class UIEventAdapter {
8
+ onEvent;
9
+ conversationId;
10
+ model;
11
+ currentMessage = "";
12
+ isStreaming = false;
13
+ activeToolCalls = new Map();
14
+ lastUpdateTime = 0;
15
+ updateTimer;
16
+ showTokenStream;
17
+ chunkBuffer;
18
+ smoothingDelay = 30;
19
+ chunkQueue = []; // Queue of chunks waiting to be emitted
20
+ constructor(onEvent, conversationId, options = {}) {
21
+ this.onEvent = onEvent;
22
+ this.conversationId = conversationId;
23
+ this.showTokenStream = options.showTokenStream ?? true;
24
+ this.smoothingDelay = options.smoothingDelay ?? 30;
25
+ if (options.smoothingEnabled) {
26
+ this.chunkBuffer = new ChunkBuffer(options.chunkingStrategy || "word");
27
+ }
28
+ }
29
+ /**
30
+ * Process a raw streaming event and emit appropriate UI events
31
+ */
32
+ handleEvent(event) {
33
+ switch (event.type) {
34
+ case "start":
35
+ this.handleStart(event.conversationId);
36
+ break;
37
+ case "token":
38
+ if (this.showTokenStream) {
39
+ this.handleToken(event.token);
40
+ }
41
+ break;
42
+ case "message":
43
+ this.handleMessage(event.message);
44
+ break;
45
+ case "tool_call_start":
46
+ this.handleToolCallStart(event.toolCall);
47
+ break;
48
+ case "tool_call_delta":
49
+ this.handleToolCallDelta(event.toolCallId, event.argumentDelta);
50
+ break;
51
+ case "tool_call_complete":
52
+ this.handleToolCallComplete(event.toolCall);
53
+ break;
54
+ case "complete":
55
+ this.handleComplete();
56
+ break;
57
+ case "error":
58
+ this.handleError(event.error);
59
+ break;
60
+ }
61
+ }
62
+ /**
63
+ * Set tool execution result directly (for tool handlers)
64
+ */
65
+ setToolResult(toolCallId, result, error) {
66
+ const toolData = this.activeToolCalls.get(toolCallId);
67
+ if (toolData) {
68
+ if (error) {
69
+ toolData.status = "failed";
70
+ this.emitUIEvent({
71
+ type: "tool_update",
72
+ toolCall: toolData.toolCall,
73
+ status: "failed",
74
+ error,
75
+ });
76
+ }
77
+ else {
78
+ toolData.status = "completed";
79
+ this.emitUIEvent({
80
+ type: "tool_update",
81
+ toolCall: toolData.toolCall,
82
+ status: "completed",
83
+ result,
84
+ });
85
+ }
86
+ }
87
+ }
88
+ handleStart(conversationId) {
89
+ this.conversationId = conversationId;
90
+ this.isStreaming = true;
91
+ this.emitUIEvent({
92
+ type: "conversation_started",
93
+ conversationId,
94
+ timestamp: new Date(),
95
+ model: this.model,
96
+ });
97
+ }
98
+ handleToken(token) {
99
+ if (this.chunkBuffer) {
100
+ const chunks = this.chunkBuffer.addToken(token);
101
+ // Add chunks to queue for all chunking modes (character, word, sentence)
102
+ this.chunkQueue.push(...chunks);
103
+ this.scheduleChunkEmission();
104
+ }
105
+ else {
106
+ // No chunking - emit tokens directly
107
+ this.currentMessage += token;
108
+ this.scheduleMessageUpdate();
109
+ }
110
+ }
111
+ handleMessage(message) {
112
+ this.currentMessage = message;
113
+ this.emitMessageUpdate(false);
114
+ }
115
+ handleToolCallStart(toolCall) {
116
+ const conversationToolCall = {
117
+ __typename: "ConversationToolCall",
118
+ id: toolCall.id,
119
+ name: toolCall.name,
120
+ arguments: "",
121
+ };
122
+ this.activeToolCalls.set(toolCall.id, {
123
+ toolCall: conversationToolCall,
124
+ status: "preparing",
125
+ });
126
+ this.emitUIEvent({
127
+ type: "tool_update",
128
+ toolCall: conversationToolCall,
129
+ status: "preparing",
130
+ });
131
+ }
132
+ handleToolCallDelta(toolCallId, argumentDelta) {
133
+ const toolData = this.activeToolCalls.get(toolCallId);
134
+ if (toolData && toolData.status === "preparing") {
135
+ toolData.toolCall.arguments += argumentDelta;
136
+ toolData.status = "executing";
137
+ this.emitUIEvent({
138
+ type: "tool_update",
139
+ toolCall: toolData.toolCall,
140
+ status: "executing",
141
+ });
142
+ }
143
+ }
144
+ handleToolCallComplete(toolCall) {
145
+ const toolData = this.activeToolCalls.get(toolCall.id);
146
+ if (toolData) {
147
+ toolData.toolCall.arguments = toolCall.arguments;
148
+ toolData.status = "completed";
149
+ this.emitUIEvent({
150
+ type: "tool_update",
151
+ toolCall: toolData.toolCall,
152
+ status: "completed",
153
+ });
154
+ }
155
+ }
156
+ handleComplete() {
157
+ // Flush any remaining chunks from buffer
158
+ if (this.chunkBuffer) {
159
+ const remaining = this.chunkBuffer.flush();
160
+ this.chunkQueue.push(...remaining);
161
+ }
162
+ // Clear any pending updates
163
+ if (this.updateTimer) {
164
+ globalThis.clearTimeout(this.updateTimer);
165
+ this.updateTimer = undefined;
166
+ }
167
+ // Immediately flush all queued chunks
168
+ while (this.chunkQueue.length > 0) {
169
+ const chunk = this.chunkQueue.shift();
170
+ this.currentMessage += chunk;
171
+ }
172
+ this.isStreaming = false;
173
+ // Create final message
174
+ const finalMessage = {
175
+ __typename: "ConversationMessage",
176
+ role: ConversationRoleTypes.Assistant,
177
+ message: this.currentMessage,
178
+ timestamp: new Date().toISOString(),
179
+ tokens: undefined, // Will be set by caller if available
180
+ toolCalls: Array.from(this.activeToolCalls.values()).map((t) => t.toolCall),
181
+ };
182
+ this.emitUIEvent({
183
+ type: "conversation_completed",
184
+ message: finalMessage,
185
+ });
186
+ }
187
+ handleError(error) {
188
+ this.isStreaming = false;
189
+ this.emitUIEvent({
190
+ type: "error",
191
+ error: {
192
+ message: error,
193
+ recoverable: false,
194
+ },
195
+ conversationId: this.conversationId,
196
+ timestamp: new Date(),
197
+ });
198
+ }
199
+ scheduleMessageUpdate() {
200
+ const now = Date.now();
201
+ const timeSinceLastUpdate = now - this.lastUpdateTime;
202
+ // If enough time has passed, update immediately
203
+ if (timeSinceLastUpdate >= this.smoothingDelay) {
204
+ this.emitMessageUpdate(true);
205
+ return;
206
+ }
207
+ // Otherwise, schedule an update
208
+ if (!this.updateTimer) {
209
+ const delay = this.smoothingDelay - timeSinceLastUpdate;
210
+ this.updateTimer = globalThis.setTimeout(() => {
211
+ this.emitMessageUpdate(true);
212
+ }, delay);
213
+ }
214
+ }
215
+ scheduleChunkEmission() {
216
+ // If timer is already running, let it handle the queue
217
+ if (this.updateTimer) {
218
+ return;
219
+ }
220
+ // If queue is empty, nothing to do
221
+ if (this.chunkQueue.length === 0) {
222
+ return;
223
+ }
224
+ const now = Date.now();
225
+ const timeSinceLastUpdate = now - this.lastUpdateTime;
226
+ // If enough time has passed, emit a chunk immediately
227
+ if (timeSinceLastUpdate >= this.smoothingDelay) {
228
+ this.emitNextChunk();
229
+ return;
230
+ }
231
+ // Otherwise, schedule the next chunk emission
232
+ const delay = this.smoothingDelay - timeSinceLastUpdate;
233
+ this.updateTimer = globalThis.setTimeout(() => {
234
+ this.emitNextChunk();
235
+ }, delay);
236
+ }
237
+ emitNextChunk() {
238
+ if (this.chunkQueue.length === 0) {
239
+ this.updateTimer = undefined;
240
+ return;
241
+ }
242
+ // Take one chunk from the queue
243
+ const chunk = this.chunkQueue.shift();
244
+ this.currentMessage += chunk;
245
+ // Emit the update
246
+ this.emitMessageUpdate(true);
247
+ // Schedule next chunk if queue is not empty
248
+ if (this.chunkQueue.length > 0) {
249
+ this.updateTimer = globalThis.setTimeout(() => {
250
+ this.emitNextChunk();
251
+ }, this.smoothingDelay);
252
+ }
253
+ else {
254
+ this.updateTimer = undefined;
255
+ }
256
+ }
257
+ emitMessageUpdate(isStreaming) {
258
+ this.lastUpdateTime = Date.now();
259
+ if (this.updateTimer) {
260
+ globalThis.clearTimeout(this.updateTimer);
261
+ this.updateTimer = undefined;
262
+ }
263
+ const message = {
264
+ __typename: "ConversationMessage",
265
+ role: ConversationRoleTypes.Assistant,
266
+ message: this.currentMessage,
267
+ timestamp: new Date().toISOString(),
268
+ };
269
+ this.emitUIEvent({
270
+ type: "message_update",
271
+ message,
272
+ isStreaming,
273
+ });
274
+ }
275
+ emitUIEvent(event) {
276
+ this.onEvent(event);
277
+ }
278
+ /**
279
+ * Clean up any pending timers
280
+ */
281
+ dispose() {
282
+ if (this.updateTimer) {
283
+ globalThis.clearTimeout(this.updateTimer);
284
+ this.updateTimer = undefined;
285
+ }
286
+ this.activeToolCalls.clear();
287
+ }
288
+ }
@@ -0,0 +1,39 @@
1
+ export type ToolHandler = (args: any) => Promise<any>;
2
+ export interface AgentOptions {
3
+ maxToolRounds?: number;
4
+ timeout?: number;
5
+ }
6
+ export interface AgentResult {
7
+ message: string;
8
+ conversationId: string;
9
+ error?: AgentError;
10
+ }
11
+ export interface StreamAgentOptions {
12
+ maxToolRounds?: number;
13
+ abortSignal?: AbortSignal;
14
+ showTokenStream?: boolean;
15
+ smoothingEnabled?: boolean;
16
+ chunkingStrategy?: 'character' | 'word' | 'sentence';
17
+ smoothingDelay?: number;
18
+ }
19
+ export interface ToolCallResult {
20
+ id: string;
21
+ name: string;
22
+ arguments: any;
23
+ result?: any;
24
+ error?: string;
25
+ duration?: number;
26
+ }
27
+ export interface UsageInfo {
28
+ promptTokens: number;
29
+ completionTokens: number;
30
+ totalTokens: number;
31
+ cost?: number;
32
+ model?: string;
33
+ }
34
+ export interface AgentError {
35
+ message: string;
36
+ code?: string;
37
+ recoverable: boolean;
38
+ details?: any;
39
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,58 @@
1
+ export declare enum SmoothChunkingStrategy {
2
+ Word = "word",
3
+ Sentence = "sentence",
4
+ Character = "char"
5
+ }
6
+ export interface SmoothStreamOptions {
7
+ /** Enable smooth streaming. Set to false to get raw tokens. Default: true */
8
+ enabled?: boolean;
9
+ /** Delay between chunk emissions in milliseconds. Default varies by provider */
10
+ delay?: number;
11
+ /** How to break up content into chunks */
12
+ chunking?: SmoothChunkingStrategy | RegExp | ((buffer: string) => string | null);
13
+ }
14
+ /**
15
+ * Configuration options for UI streaming mode
16
+ */
17
+ export interface StreamOptions {
18
+ /**
19
+ * Enable UI-focused streaming mode
20
+ * @default true (simplified events for better DX)
21
+ */
22
+ enabled?: boolean;
23
+ /**
24
+ * Whether to show real-time token streaming in the UI
25
+ * @default true
26
+ */
27
+ showTokenStream?: boolean;
28
+ /**
29
+ * Minimum interval between message updates (in milliseconds)
30
+ * Helps prevent UI flicker with very fast token streams
31
+ * @default 30
32
+ */
33
+ updateInterval?: number;
34
+ /**
35
+ * Whether to include token usage information in metadata
36
+ * @default false
37
+ */
38
+ includeUsage?: boolean;
39
+ /**
40
+ * Custom tool descriptions for better UI display
41
+ * Maps tool names to human-readable descriptions
42
+ */
43
+ toolDescriptions?: Record<string, string>;
44
+ /**
45
+ * Whether to automatically retry on recoverable errors
46
+ * @default true
47
+ */
48
+ autoRetry?: boolean;
49
+ /**
50
+ * Maximum number of retry attempts
51
+ * @default 3
52
+ */
53
+ maxRetries?: number;
54
+ /**
55
+ * Smooth streaming options
56
+ */
57
+ smooth?: SmoothStreamOptions;
58
+ }
@@ -0,0 +1,7 @@
1
+ // Smooth streaming configuration
2
+ export var SmoothChunkingStrategy;
3
+ (function (SmoothChunkingStrategy) {
4
+ SmoothChunkingStrategy["Word"] = "word";
5
+ SmoothChunkingStrategy["Sentence"] = "sentence";
6
+ SmoothChunkingStrategy["Character"] = "char";
7
+ })(SmoothChunkingStrategy || (SmoothChunkingStrategy = {}));
@@ -0,0 +1,38 @@
1
+ import { ConversationMessage, ConversationToolCall } from "../generated/graphql-types.js";
2
+ /**
3
+ * Tool execution status for streaming
4
+ */
5
+ export type ToolExecutionStatus = "preparing" | "executing" | "completed" | "failed";
6
+ /**
7
+ * Simplified UI-focused streaming events using GraphQL types
8
+ */
9
+ export type AgentStreamEvent = {
10
+ type: "conversation_started";
11
+ conversationId: string;
12
+ timestamp: Date;
13
+ model?: string;
14
+ } | {
15
+ type: "message_update";
16
+ message: Partial<ConversationMessage> & {
17
+ message: string;
18
+ };
19
+ isStreaming: boolean;
20
+ } | {
21
+ type: "tool_update";
22
+ toolCall: ConversationToolCall;
23
+ status: ToolExecutionStatus;
24
+ result?: unknown;
25
+ error?: string;
26
+ } | {
27
+ type: "conversation_completed";
28
+ message: ConversationMessage;
29
+ } | {
30
+ type: "error";
31
+ error: {
32
+ message: string;
33
+ code?: string;
34
+ recoverable?: boolean;
35
+ };
36
+ conversationId: string;
37
+ timestamp: Date;
38
+ };
@@ -0,0 +1 @@
1
+ export {};
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250531004",
4
- "description": "Graphlit API TypeScript Client",
3
+ "version": "1.0.20250610001",
4
+ "description": "Graphlit API Client for TypeScript",
5
5
  "main": "dist/client.js",
6
6
  "types": "dist/client.d.ts",
7
7
  "repository": {
@@ -16,7 +16,13 @@
16
16
  },
17
17
  "scripts": {
18
18
  "generate": "graphql-codegen --config codegen.yml",
19
- "build": "tsc -p tsconfig.json"
19
+ "format": "prettier --write .",
20
+ "build": "tsc -p tsconfig.json",
21
+ "test": "vitest",
22
+ "test:watch": "vitest --watch",
23
+ "test:coverage": "vitest --coverage",
24
+ "test:ui": "vitest --ui",
25
+ "test:streaming": "vitest --run src/tests/streaming"
20
26
  },
21
27
  "keywords": [
22
28
  "Graphlit",
@@ -37,11 +43,32 @@
37
43
  "@graphql-codegen/typescript": "^4.1.6",
38
44
  "@graphql-codegen/typescript-operations": "^4.6.0",
39
45
  "graphql": "^16.10.0",
40
- "jsonwebtoken": "^9.0.2"
46
+ "jsonwebtoken": "^9.0.2",
47
+ "prettier": "^3.5.3"
48
+ },
49
+ "peerDependenciesMeta": {
50
+ "openai": {
51
+ "optional": true
52
+ },
53
+ "@anthropic-ai/sdk": {
54
+ "optional": true
55
+ },
56
+ "@google/generative-ai": {
57
+ "optional": true
58
+ }
41
59
  },
42
60
  "devDependencies": {
43
61
  "@graphql-codegen/typescript-document-nodes": "^4.0.16",
44
62
  "@types/jsonwebtoken": "^9.0.9",
45
- "typescript": "^5.8.2"
63
+ "@types/node": "^20.0.0",
64
+ "@vitest/coverage-v8": "^1.0.0",
65
+ "dotenv": "^16.5.0",
66
+ "typescript": "^5.8.2",
67
+ "vitest": "^1.0.0"
68
+ },
69
+ "optionalDependencies": {
70
+ "@anthropic-ai/sdk": "^0.53.0",
71
+ "@google/generative-ai": "^0.24.1",
72
+ "openai": "^5.2.0"
46
73
  }
47
74
  }