graphlit-client 1.0.20250613001 ā 1.0.20250613003
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.js
CHANGED
@@ -1851,6 +1851,12 @@ class Graphlit {
|
|
1851
1851
|
}
|
1852
1852
|
// Execute tools and prepare for next round
|
1853
1853
|
if (toolHandlers && toolCalls.length > 0) {
|
1854
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1855
|
+
console.log(`\nš§ [executeStreamingAgent] Round ${currentRound}: Processing ${toolCalls.length} tool calls`);
|
1856
|
+
toolCalls.forEach((tc, idx) => {
|
1857
|
+
console.log(` ${idx + 1}. ${tc.name} (${tc.id}) - Args length: ${tc.arguments.length}`);
|
1858
|
+
});
|
1859
|
+
}
|
1854
1860
|
// Add assistant message with tool calls to conversation
|
1855
1861
|
const assistantMessage = {
|
1856
1862
|
__typename: "ConversationMessage",
|
@@ -1944,17 +1950,18 @@ class Graphlit {
|
|
1944
1950
|
continue;
|
1945
1951
|
}
|
1946
1952
|
}
|
1947
|
-
// Update UI
|
1953
|
+
// Update UI - emit the full tool call with arguments
|
1948
1954
|
uiAdapter.handleEvent({
|
1949
|
-
type: "
|
1955
|
+
type: "tool_call_complete",
|
1950
1956
|
toolCall: {
|
1951
1957
|
id: toolCall.id,
|
1952
1958
|
name: toolCall.name,
|
1959
|
+
arguments: toolCall.arguments,
|
1953
1960
|
},
|
1954
1961
|
});
|
1955
1962
|
// Execute tool
|
1956
1963
|
const result = await handler(args);
|
1957
|
-
// Update UI
|
1964
|
+
// Update UI with result
|
1958
1965
|
uiAdapter.setToolResult(toolCall.id, result);
|
1959
1966
|
// Add tool response to messages
|
1960
1967
|
messages.push({
|
@@ -1983,14 +1990,21 @@ class Graphlit {
|
|
1983
1990
|
}
|
1984
1991
|
currentRound++;
|
1985
1992
|
}
|
1986
|
-
// Complete the conversation
|
1993
|
+
// Complete the conversation and get token count
|
1994
|
+
let finalTokens;
|
1987
1995
|
if (fullMessage) {
|
1988
|
-
await this.completeConversation(fullMessage.trim(), conversationId, correlationId);
|
1996
|
+
const completeResponse = await this.completeConversation(fullMessage.trim(), conversationId, correlationId);
|
1997
|
+
// Extract token count from the response
|
1998
|
+
finalTokens = completeResponse.completeConversation?.message?.tokens ?? undefined;
|
1999
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2000
|
+
console.log(`š [completeConversation] Tokens used: ${finalTokens || 'unknown'}`);
|
2001
|
+
}
|
1989
2002
|
}
|
1990
|
-
// Emit completion event
|
2003
|
+
// Emit completion event with token count
|
1991
2004
|
uiAdapter.handleEvent({
|
1992
2005
|
type: "complete",
|
1993
2006
|
conversationId,
|
2007
|
+
tokens: finalTokens,
|
1994
2008
|
});
|
1995
2009
|
}
|
1996
2010
|
/**
|
package/dist/model-mapping.js
CHANGED
@@ -5,13 +5,10 @@ import * as Types from "./generated/graphql-types.js";
|
|
5
5
|
*/
|
6
6
|
// OpenAI model mappings
|
7
7
|
const OPENAI_MODEL_MAP = {
|
8
|
-
[Types.OpenAiModels.Gpt4]: "gpt-4",
|
9
8
|
[Types.OpenAiModels.Gpt4Turbo_128K]: "gpt-4-turbo",
|
10
9
|
[Types.OpenAiModels.Gpt4O_128K]: "gpt-4o",
|
11
10
|
[Types.OpenAiModels.Gpt4OMini_128K]: "gpt-4o-mini",
|
12
11
|
[Types.OpenAiModels.Gpt4OChat_128K]: "chatgpt-4o-latest",
|
13
|
-
[Types.OpenAiModels.Gpt35Turbo]: "gpt-3.5-turbo",
|
14
|
-
[Types.OpenAiModels.Gpt35Turbo_16K]: "gpt-3.5-turbo-16k",
|
15
12
|
[Types.OpenAiModels.Gpt41_1024K]: "gpt-4.1",
|
16
13
|
[Types.OpenAiModels.Gpt41Mini_1024K]: "gpt-4.1-mini",
|
17
14
|
[Types.OpenAiModels.Gpt41Nano_1024K]: "gpt-4.1-nano",
|
@@ -36,7 +33,6 @@ const GOOGLE_MODEL_MAP = {
|
|
36
33
|
[Types.GoogleModels.Gemini_1_5Flash]: "gemini-1.5-flash",
|
37
34
|
[Types.GoogleModels.Gemini_1_5Flash_8B]: "gemini-1.5-flash-8b",
|
38
35
|
[Types.GoogleModels.Gemini_2_0Flash]: "gemini-2.0-flash-exp",
|
39
|
-
[Types.GoogleModels.Gemini_2_0FlashExperimental]: "gemini-2.0-flash-exp",
|
40
36
|
[Types.GoogleModels.Gemini_2_5FlashPreview]: "gemini-2.5-flash-preview-05-20",
|
41
37
|
[Types.GoogleModels.Gemini_2_5ProPreview]: "gemini-2.5-pro-preview-06-05",
|
42
38
|
};
|
@@ -13,9 +13,11 @@ export declare class UIEventAdapter {
|
|
13
13
|
private tokenCount;
|
14
14
|
private currentMessage;
|
15
15
|
private isStreaming;
|
16
|
+
private conversationStartTime;
|
16
17
|
private streamStartTime;
|
17
18
|
private firstTokenTime;
|
18
19
|
private lastTokenTime;
|
20
|
+
private tokenDelays;
|
19
21
|
private activeToolCalls;
|
20
22
|
private lastUpdateTime;
|
21
23
|
private updateTimer?;
|
@@ -12,9 +12,11 @@ export class UIEventAdapter {
|
|
12
12
|
tokenCount = 0;
|
13
13
|
currentMessage = "";
|
14
14
|
isStreaming = false;
|
15
|
-
|
15
|
+
conversationStartTime = 0; // When user sent the message
|
16
|
+
streamStartTime = 0; // When streaming actually began
|
16
17
|
firstTokenTime = 0;
|
17
18
|
lastTokenTime = 0;
|
19
|
+
tokenDelays = [];
|
18
20
|
activeToolCalls = new Map();
|
19
21
|
lastUpdateTime = 0;
|
20
22
|
updateTimer;
|
@@ -27,6 +29,7 @@ export class UIEventAdapter {
|
|
27
29
|
this.smoothingDelay = options.smoothingDelay ?? 30;
|
28
30
|
this.model = options.model;
|
29
31
|
this.modelService = options.modelService;
|
32
|
+
this.conversationStartTime = Date.now(); // Capture when conversation began
|
30
33
|
if (options.smoothingEnabled) {
|
31
34
|
this.chunkBuffer = new ChunkBuffer(options.chunkingStrategy || "word");
|
32
35
|
}
|
@@ -55,7 +58,7 @@ export class UIEventAdapter {
|
|
55
58
|
this.handleToolCallComplete(event.toolCall);
|
56
59
|
break;
|
57
60
|
case "complete":
|
58
|
-
this.handleComplete();
|
61
|
+
this.handleComplete(event.tokens);
|
59
62
|
break;
|
60
63
|
case "error":
|
61
64
|
this.handleError(event.error);
|
@@ -89,11 +92,23 @@ export class UIEventAdapter {
|
|
89
92
|
}
|
90
93
|
}
|
91
94
|
handleStart(conversationId) {
|
95
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
96
|
+
console.log(`š [UIEventAdapter] Handle start - Conversation ID: ${conversationId}`);
|
97
|
+
console.log(`š [UIEventAdapter] Active tool calls at start: ${this.activeToolCalls.size}`);
|
98
|
+
}
|
92
99
|
this.conversationId = conversationId;
|
93
100
|
this.isStreaming = true;
|
94
101
|
this.streamStartTime = Date.now();
|
95
102
|
this.firstTokenTime = 0;
|
96
103
|
this.lastTokenTime = 0;
|
104
|
+
this.tokenCount = 0;
|
105
|
+
this.tokenDelays = [];
|
106
|
+
// Note: We only clear tool calls here if this is truly a new conversation start
|
107
|
+
// For multi-round tool calling, handleStart is only called once at the beginning
|
108
|
+
if (this.activeToolCalls.size > 0) {
|
109
|
+
console.log(`š [UIEventAdapter] Warning: ${this.activeToolCalls.size} tool calls still active at start`);
|
110
|
+
}
|
111
|
+
this.activeToolCalls.clear();
|
97
112
|
this.emitUIEvent({
|
98
113
|
type: "conversation_started",
|
99
114
|
conversationId,
|
@@ -107,7 +122,12 @@ export class UIEventAdapter {
|
|
107
122
|
if (this.firstTokenTime === 0) {
|
108
123
|
this.firstTokenTime = now;
|
109
124
|
}
|
125
|
+
// Track inter-token delays
|
126
|
+
if (this.lastTokenTime > 0) {
|
127
|
+
this.tokenDelays.push(now - this.lastTokenTime);
|
128
|
+
}
|
110
129
|
this.lastTokenTime = now;
|
130
|
+
this.tokenCount++;
|
111
131
|
if (this.chunkBuffer) {
|
112
132
|
const chunks = this.chunkBuffer.addToken(token);
|
113
133
|
// Add chunks to queue for all chunking modes (character, word, sentence)
|
@@ -125,6 +145,10 @@ export class UIEventAdapter {
|
|
125
145
|
this.emitMessageUpdate(false);
|
126
146
|
}
|
127
147
|
handleToolCallStart(toolCall) {
|
148
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
149
|
+
console.log(`š§ [UIEventAdapter] Tool call start - ID: ${toolCall.id}, Name: ${toolCall.name}`);
|
150
|
+
console.log(`š§ [UIEventAdapter] Active tool calls before: ${this.activeToolCalls.size}`);
|
151
|
+
}
|
128
152
|
const conversationToolCall = {
|
129
153
|
__typename: "ConversationToolCall",
|
130
154
|
id: toolCall.id,
|
@@ -135,6 +159,9 @@ export class UIEventAdapter {
|
|
135
159
|
toolCall: conversationToolCall,
|
136
160
|
status: "preparing",
|
137
161
|
});
|
162
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
163
|
+
console.log(`š§ [UIEventAdapter] Active tool calls after: ${this.activeToolCalls.size}`);
|
164
|
+
}
|
138
165
|
this.emitUIEvent({
|
139
166
|
type: "tool_update",
|
140
167
|
toolCall: conversationToolCall,
|
@@ -142,20 +169,38 @@ export class UIEventAdapter {
|
|
142
169
|
});
|
143
170
|
}
|
144
171
|
handleToolCallDelta(toolCallId, argumentDelta) {
|
172
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
173
|
+
console.log(`š§ [UIEventAdapter] Tool call delta - ID: ${toolCallId}, Delta length: ${argumentDelta.length}`);
|
174
|
+
console.log(`š§ [UIEventAdapter] Delta content: ${argumentDelta.substring(0, 100)}...`);
|
175
|
+
}
|
145
176
|
const toolData = this.activeToolCalls.get(toolCallId);
|
146
|
-
if (toolData
|
177
|
+
if (toolData) {
|
147
178
|
toolData.toolCall.arguments += argumentDelta;
|
148
|
-
|
179
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
180
|
+
console.log(`š§ [UIEventAdapter] Tool ${toolCallId} accumulated args length: ${toolData.toolCall.arguments.length}`);
|
181
|
+
}
|
182
|
+
if (toolData.status === "preparing") {
|
183
|
+
toolData.status = "executing";
|
184
|
+
}
|
149
185
|
this.emitUIEvent({
|
150
186
|
type: "tool_update",
|
151
187
|
toolCall: toolData.toolCall,
|
152
188
|
status: "executing",
|
153
189
|
});
|
154
190
|
}
|
191
|
+
else {
|
192
|
+
console.warn(`š§ [UIEventAdapter] WARNING: Tool call delta for unknown tool ID: ${toolCallId}`);
|
193
|
+
}
|
155
194
|
}
|
156
195
|
handleToolCallComplete(toolCall) {
|
196
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
197
|
+
console.log(`š§ [UIEventAdapter] Tool call complete - ID: ${toolCall.id}, Name: ${toolCall.name}`);
|
198
|
+
console.log(`š§ [UIEventAdapter] Final arguments length: ${toolCall.arguments.length}`);
|
199
|
+
console.log(`š§ [UIEventAdapter] Final arguments: ${toolCall.arguments.substring(0, 200)}...`);
|
200
|
+
}
|
157
201
|
const toolData = this.activeToolCalls.get(toolCall.id);
|
158
202
|
if (toolData) {
|
203
|
+
// Update the arguments with the final complete version
|
159
204
|
toolData.toolCall.arguments = toolCall.arguments;
|
160
205
|
toolData.status = "completed";
|
161
206
|
this.emitUIEvent({
|
@@ -164,8 +209,33 @@ export class UIEventAdapter {
|
|
164
209
|
status: "completed",
|
165
210
|
});
|
166
211
|
}
|
212
|
+
else {
|
213
|
+
// If we don't have this tool call tracked, create it now
|
214
|
+
console.warn(`š§ [UIEventAdapter] Tool call complete for untracked tool ID: ${toolCall.id}, creating entry`);
|
215
|
+
const conversationToolCall = {
|
216
|
+
__typename: "ConversationToolCall",
|
217
|
+
id: toolCall.id,
|
218
|
+
name: toolCall.name,
|
219
|
+
arguments: toolCall.arguments,
|
220
|
+
};
|
221
|
+
this.activeToolCalls.set(toolCall.id, {
|
222
|
+
toolCall: conversationToolCall,
|
223
|
+
status: "completed",
|
224
|
+
});
|
225
|
+
this.emitUIEvent({
|
226
|
+
type: "tool_update",
|
227
|
+
toolCall: conversationToolCall,
|
228
|
+
status: "completed",
|
229
|
+
});
|
230
|
+
}
|
167
231
|
}
|
168
|
-
handleComplete() {
|
232
|
+
handleComplete(tokens) {
|
233
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
234
|
+
console.log(`š [UIEventAdapter] Handle complete - Active tool calls: ${this.activeToolCalls.size}`);
|
235
|
+
this.activeToolCalls.forEach((toolData, id) => {
|
236
|
+
console.log(`š [UIEventAdapter] Tool ${id}: ${toolData.toolCall.name}, Status: ${toolData.status}, Args length: ${toolData.toolCall.arguments.length}`);
|
237
|
+
});
|
238
|
+
}
|
169
239
|
// Clear any pending updates
|
170
240
|
if (this.updateTimer) {
|
171
241
|
globalThis.clearTimeout(this.updateTimer);
|
@@ -200,7 +270,7 @@ export class UIEventAdapter {
|
|
200
270
|
role: ConversationRoleTypes.Assistant,
|
201
271
|
message: this.currentMessage,
|
202
272
|
timestamp: new Date().toISOString(),
|
203
|
-
tokens:
|
273
|
+
tokens: tokens, // Now we have the actual LLM token count!
|
204
274
|
toolCalls: Array.from(this.activeToolCalls.values()).map((t) => t.toolCall),
|
205
275
|
model: this.model,
|
206
276
|
modelService: this.modelService,
|
@@ -222,9 +292,32 @@ export class UIEventAdapter {
|
|
222
292
|
}
|
223
293
|
}
|
224
294
|
}
|
295
|
+
// Build final metrics
|
296
|
+
const completionTime = Date.now();
|
297
|
+
const finalMetrics = {
|
298
|
+
totalTime: this.streamStartTime > 0 ? completionTime - this.streamStartTime : 0,
|
299
|
+
conversationDuration: this.conversationStartTime > 0 ? completionTime - this.conversationStartTime : 0,
|
300
|
+
};
|
301
|
+
// Add TTFT if we have it
|
302
|
+
if (this.firstTokenTime > 0 && this.streamStartTime > 0) {
|
303
|
+
finalMetrics.ttft = this.firstTokenTime - this.streamStartTime;
|
304
|
+
}
|
305
|
+
// Add token counts
|
306
|
+
if (this.tokenCount > 0) {
|
307
|
+
finalMetrics.tokenCount = this.tokenCount; // Streaming chunks
|
308
|
+
}
|
309
|
+
if (tokens) {
|
310
|
+
finalMetrics.llmTokens = tokens; // Actual LLM tokens used
|
311
|
+
}
|
312
|
+
// Calculate average token delay
|
313
|
+
if (this.tokenDelays.length > 0) {
|
314
|
+
const avgDelay = this.tokenDelays.reduce((a, b) => a + b, 0) / this.tokenDelays.length;
|
315
|
+
finalMetrics.avgTokenDelay = Math.round(avgDelay);
|
316
|
+
}
|
225
317
|
this.emitUIEvent({
|
226
318
|
type: "conversation_completed",
|
227
319
|
message: finalMessage,
|
320
|
+
metrics: finalMetrics,
|
228
321
|
});
|
229
322
|
}
|
230
323
|
handleError(error) {
|
@@ -330,10 +423,30 @@ export class UIEventAdapter {
|
|
330
423
|
message.completionTime = elapsedTime / 1000;
|
331
424
|
}
|
332
425
|
}
|
426
|
+
// Build metrics object
|
427
|
+
const now = Date.now();
|
428
|
+
const metrics = {
|
429
|
+
elapsedTime: this.streamStartTime > 0 ? now - this.streamStartTime : 0,
|
430
|
+
conversationDuration: this.conversationStartTime > 0 ? now - this.conversationStartTime : 0,
|
431
|
+
};
|
432
|
+
// Add TTFT if we have it
|
433
|
+
if (this.firstTokenTime > 0 && this.streamStartTime > 0) {
|
434
|
+
metrics.ttft = this.firstTokenTime - this.streamStartTime;
|
435
|
+
}
|
436
|
+
// Add token count if available
|
437
|
+
if (this.tokenCount > 0) {
|
438
|
+
metrics.tokenCount = this.tokenCount;
|
439
|
+
}
|
440
|
+
// Calculate average token delay
|
441
|
+
if (this.tokenDelays.length > 0) {
|
442
|
+
const avgDelay = this.tokenDelays.reduce((a, b) => a + b, 0) / this.tokenDelays.length;
|
443
|
+
metrics.avgTokenDelay = Math.round(avgDelay);
|
444
|
+
}
|
333
445
|
this.emitUIEvent({
|
334
446
|
type: "message_update",
|
335
447
|
message,
|
336
448
|
isStreaming,
|
449
|
+
metrics,
|
337
450
|
});
|
338
451
|
}
|
339
452
|
emitUIEvent(event) {
|
package/dist/types/internal.d.ts
CHANGED
@@ -17,6 +17,13 @@ export type AgentStreamEvent = {
|
|
17
17
|
message: string;
|
18
18
|
};
|
19
19
|
isStreaming: boolean;
|
20
|
+
metrics?: {
|
21
|
+
ttft?: number;
|
22
|
+
elapsedTime: number;
|
23
|
+
conversationDuration: number;
|
24
|
+
tokenCount?: number;
|
25
|
+
avgTokenDelay?: number;
|
26
|
+
};
|
20
27
|
} | {
|
21
28
|
type: "tool_update";
|
22
29
|
toolCall: ConversationToolCall;
|
@@ -26,6 +33,14 @@ export type AgentStreamEvent = {
|
|
26
33
|
} | {
|
27
34
|
type: "conversation_completed";
|
28
35
|
message: ConversationMessage;
|
36
|
+
metrics?: {
|
37
|
+
ttft?: number;
|
38
|
+
totalTime: number;
|
39
|
+
conversationDuration: number;
|
40
|
+
tokenCount?: number;
|
41
|
+
llmTokens?: number;
|
42
|
+
avgTokenDelay?: number;
|
43
|
+
};
|
29
44
|
} | {
|
30
45
|
type: "error";
|
31
46
|
error: {
|