graphlit-client 1.0.20250716002 ā 1.0.20250716004
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.d.ts
CHANGED
@@ -72,8 +72,8 @@ declare class Graphlit {
|
|
72
72
|
*/
|
73
73
|
setGroqClient(client: any): void;
|
74
74
|
/**
|
75
|
-
* Set a custom Cerebras client instance for streaming
|
76
|
-
* @param client -
|
75
|
+
* Set a custom Cerebras client instance for streaming
|
76
|
+
* @param client - Cerebras client instance (e.g., new Cerebras({ apiKey: "..." }))
|
77
77
|
*/
|
78
78
|
setCerebrasClient(client: any): void;
|
79
79
|
/**
|
@@ -493,7 +493,7 @@ declare class Graphlit {
|
|
493
493
|
*/
|
494
494
|
private streamWithGroq;
|
495
495
|
/**
|
496
|
-
* Stream with Cerebras client (
|
496
|
+
* Stream with Cerebras client (native SDK)
|
497
497
|
*/
|
498
498
|
private streamWithCerebras;
|
499
499
|
/**
|
package/dist/client.js
CHANGED
@@ -23,6 +23,7 @@ let CohereClient;
|
|
23
23
|
let CohereClientV2;
|
24
24
|
let Mistral;
|
25
25
|
let BedrockRuntimeClient;
|
26
|
+
let Cerebras;
|
26
27
|
try {
|
27
28
|
OpenAI = optionalRequire("openai").default || optionalRequire("openai");
|
28
29
|
if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
|
@@ -110,6 +111,20 @@ catch (e) {
|
|
110
111
|
console.log("[SDK Loading] Bedrock SDK not found:", e.message);
|
111
112
|
}
|
112
113
|
}
|
114
|
+
try {
|
115
|
+
Cerebras =
|
116
|
+
optionalRequire("@cerebras/cerebras_cloud_sdk").default ||
|
117
|
+
optionalRequire("@cerebras/cerebras_cloud_sdk");
|
118
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
|
119
|
+
console.log("[SDK Loading] Cerebras SDK loaded successfully");
|
120
|
+
}
|
121
|
+
}
|
122
|
+
catch (e) {
|
123
|
+
// Cerebras SDK not installed
|
124
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
|
125
|
+
console.log("[SDK Loading] Cerebras SDK not found:", e.message);
|
126
|
+
}
|
127
|
+
}
|
113
128
|
const DEFAULT_MAX_TOOL_ROUNDS = 1000;
|
114
129
|
// Helper function to validate GUID format
|
115
130
|
function isValidGuid(guid) {
|
@@ -319,8 +334,8 @@ class Graphlit {
|
|
319
334
|
this.groqClient = client;
|
320
335
|
}
|
321
336
|
/**
|
322
|
-
* Set a custom Cerebras client instance for streaming
|
323
|
-
* @param client -
|
337
|
+
* Set a custom Cerebras client instance for streaming
|
338
|
+
* @param client - Cerebras client instance (e.g., new Cerebras({ apiKey: "..." }))
|
324
339
|
*/
|
325
340
|
setCerebrasClient(client) {
|
326
341
|
this.cerebrasClient = client;
|
@@ -1831,7 +1846,7 @@ class Graphlit {
|
|
1831
1846
|
case Types.ModelServiceTypes.Groq:
|
1832
1847
|
return Groq !== undefined || this.groqClient !== undefined;
|
1833
1848
|
case Types.ModelServiceTypes.Cerebras:
|
1834
|
-
return
|
1849
|
+
return Cerebras !== undefined || this.cerebrasClient !== undefined;
|
1835
1850
|
case Types.ModelServiceTypes.Cohere:
|
1836
1851
|
return (CohereClient !== undefined ||
|
1837
1852
|
CohereClientV2 !== undefined ||
|
@@ -1867,7 +1882,7 @@ class Graphlit {
|
|
1867
1882
|
const hasAnthropic = Anthropic !== undefined || this.anthropicClient !== undefined;
|
1868
1883
|
const hasGoogle = GoogleGenerativeAI !== undefined || this.googleClient !== undefined;
|
1869
1884
|
const hasGroq = Groq !== undefined || this.groqClient !== undefined;
|
1870
|
-
const hasCerebras =
|
1885
|
+
const hasCerebras = Cerebras !== undefined || this.cerebrasClient !== undefined;
|
1871
1886
|
const hasCohere = CohereClient !== undefined ||
|
1872
1887
|
CohereClientV2 !== undefined ||
|
1873
1888
|
this.cohereClient !== undefined;
|
@@ -2406,7 +2421,7 @@ class Graphlit {
|
|
2406
2421
|
}
|
2407
2422
|
}
|
2408
2423
|
else if (serviceType === Types.ModelServiceTypes.Cerebras &&
|
2409
|
-
(
|
2424
|
+
(Cerebras || this.cerebrasClient)) {
|
2410
2425
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2411
2426
|
console.log(`\nā
[Streaming] Using Cerebras native streaming (Round ${currentRound})`);
|
2412
2427
|
}
|
@@ -2973,24 +2988,23 @@ class Graphlit {
|
|
2973
2988
|
await streamWithGroq(specification, messages, tools, groqClient, (event) => uiAdapter.handleEvent(event), onComplete, abortSignal);
|
2974
2989
|
}
|
2975
2990
|
/**
|
2976
|
-
* Stream with Cerebras client (
|
2991
|
+
* Stream with Cerebras client (native SDK)
|
2977
2992
|
*/
|
2978
2993
|
async streamWithCerebras(specification, messages, tools, uiAdapter, onComplete, abortSignal) {
|
2979
|
-
// Check if we have either the
|
2980
|
-
if (!
|
2994
|
+
// Check if we have either the Cerebras module or a provided client
|
2995
|
+
if (!Cerebras && !this.cerebrasClient) {
|
2981
2996
|
throw new Error("Cerebras client not available");
|
2982
2997
|
}
|
2983
|
-
// Use provided client or create a new one
|
2998
|
+
// Use provided client or create a new one with Cerebras native SDK
|
2984
2999
|
const cerebrasClient = this.cerebrasClient ||
|
2985
|
-
(
|
2986
|
-
? new
|
3000
|
+
(Cerebras
|
3001
|
+
? new Cerebras({
|
2987
3002
|
apiKey: process.env.CEREBRAS_API_KEY || "",
|
2988
|
-
baseURL: "https://api.cerebras.ai/v1",
|
2989
3003
|
maxRetries: 3,
|
2990
3004
|
timeout: 60000, // 60 seconds
|
2991
3005
|
})
|
2992
3006
|
: (() => {
|
2993
|
-
throw new Error("
|
3007
|
+
throw new Error("Cerebras module not available");
|
2994
3008
|
})());
|
2995
3009
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2996
3010
|
console.log(`š [Graphlit SDK] Routing to Cerebras streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
|
@@ -26,9 +26,9 @@ onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls:
|
|
26
26
|
export declare function streamWithGroq(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, groqClient: any, // Groq client instance (OpenAI-compatible)
|
27
27
|
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
|
28
28
|
/**
|
29
|
-
* Stream with Cerebras SDK
|
29
|
+
* Stream with Cerebras native SDK
|
30
30
|
*/
|
31
|
-
export declare function streamWithCerebras(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, cerebrasClient: any, //
|
31
|
+
export declare function streamWithCerebras(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, cerebrasClient: any, // Cerebras native client instance
|
32
32
|
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
|
33
33
|
/**
|
34
34
|
* Stream with Deepseek SDK (OpenAI-compatible)
|
@@ -1309,14 +1309,24 @@ onEvent, onComplete, abortSignal) {
|
|
1309
1309
|
}
|
1310
1310
|
}
|
1311
1311
|
/**
|
1312
|
-
* Stream with Cerebras SDK
|
1312
|
+
* Stream with Cerebras native SDK
|
1313
1313
|
*/
|
1314
|
-
export async function streamWithCerebras(specification, messages, tools, cerebrasClient, //
|
1314
|
+
export async function streamWithCerebras(specification, messages, tools, cerebrasClient, // Cerebras native client instance
|
1315
1315
|
onEvent, onComplete, abortSignal) {
|
1316
|
+
let fullMessage = "";
|
1317
|
+
let toolCalls = [];
|
1318
|
+
let usageData = null;
|
1319
|
+
// Performance metrics
|
1320
|
+
const startTime = Date.now();
|
1321
|
+
let firstTokenTime = 0;
|
1322
|
+
let tokenCount = 0;
|
1316
1323
|
try {
|
1317
1324
|
const modelName = getModelName(specification);
|
1325
|
+
if (!modelName) {
|
1326
|
+
throw new Error(`No model name found for specification: ${specification.name} (service: ${specification.serviceType})`);
|
1327
|
+
}
|
1318
1328
|
// Cerebras has very limited tool support
|
1319
|
-
let cerebrasTools =
|
1329
|
+
let cerebrasTools = undefined;
|
1320
1330
|
let filteredMessages = messages;
|
1321
1331
|
if (modelName) {
|
1322
1332
|
const isQwen = modelName.toLowerCase().includes("qwen-3-32b");
|
@@ -1326,7 +1336,17 @@ onEvent, onComplete, abortSignal) {
|
|
1326
1336
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1327
1337
|
console.log(`ā ļø [Cerebras] Disabling tools for ${modelName} - only qwen-3-32b supports tools`);
|
1328
1338
|
}
|
1329
|
-
|
1339
|
+
}
|
1340
|
+
else {
|
1341
|
+
// Format tools for Cerebras
|
1342
|
+
cerebrasTools = tools.map((tool) => ({
|
1343
|
+
type: "function",
|
1344
|
+
function: {
|
1345
|
+
name: tool.name,
|
1346
|
+
description: tool.description,
|
1347
|
+
parameters: tool.schema ? JSON.parse(tool.schema) : {},
|
1348
|
+
},
|
1349
|
+
}));
|
1330
1350
|
}
|
1331
1351
|
}
|
1332
1352
|
// For non-qwen models, we need to filter out any assistant messages with tool_calls
|
@@ -1346,8 +1366,199 @@ onEvent, onComplete, abortSignal) {
|
|
1346
1366
|
});
|
1347
1367
|
}
|
1348
1368
|
}
|
1349
|
-
//
|
1350
|
-
|
1369
|
+
// Format messages for Cerebras API
|
1370
|
+
const cerebrasMessages = filteredMessages.map((msg) => {
|
1371
|
+
if (msg.role === "system") {
|
1372
|
+
return { role: "system", content: msg.content || "" };
|
1373
|
+
}
|
1374
|
+
else if (msg.role === "user") {
|
1375
|
+
return { role: "user", content: msg.content || "" };
|
1376
|
+
}
|
1377
|
+
else if (msg.role === "assistant") {
|
1378
|
+
if (msg.tool_calls && msg.tool_calls.length > 0) {
|
1379
|
+
return {
|
1380
|
+
role: "assistant",
|
1381
|
+
content: msg.content || null,
|
1382
|
+
tool_calls: msg.tool_calls.map((tc) => ({
|
1383
|
+
id: tc.id,
|
1384
|
+
type: "function",
|
1385
|
+
function: {
|
1386
|
+
name: tc.function.name,
|
1387
|
+
arguments: tc.function.arguments,
|
1388
|
+
},
|
1389
|
+
})),
|
1390
|
+
};
|
1391
|
+
}
|
1392
|
+
return { role: "assistant", content: msg.content || "" };
|
1393
|
+
}
|
1394
|
+
else if (msg.role === "tool") {
|
1395
|
+
return {
|
1396
|
+
role: "tool",
|
1397
|
+
content: msg.content || "",
|
1398
|
+
tool_call_id: msg.tool_call_id || "",
|
1399
|
+
};
|
1400
|
+
}
|
1401
|
+
return msg;
|
1402
|
+
});
|
1403
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1404
|
+
console.log(`š¤ [Cerebras] Model Config: Service=Cerebras | Model=${modelName} | Temperature=${specification.cerebras?.temperature} | MaxTokens=${specification.cerebras?.completionTokenLimit || "null"} | Tools=${cerebrasTools?.length || 0} | Spec="${specification.name}"`);
|
1405
|
+
}
|
1406
|
+
// Cerebras treats tool calling as structured outputs
|
1407
|
+
// Their reasoning models don't support streaming with structured outputs
|
1408
|
+
const hasTools = cerebrasTools && cerebrasTools.length > 0;
|
1409
|
+
const streamConfig = {
|
1410
|
+
model: modelName,
|
1411
|
+
messages: cerebrasMessages,
|
1412
|
+
stream: !hasTools, // Disable streaming when tools are present
|
1413
|
+
temperature: specification.cerebras?.temperature,
|
1414
|
+
};
|
1415
|
+
// Only add max_tokens if it's defined
|
1416
|
+
if (specification.cerebras?.completionTokenLimit) {
|
1417
|
+
streamConfig.max_tokens = specification.cerebras.completionTokenLimit;
|
1418
|
+
}
|
1419
|
+
// Add tools if available
|
1420
|
+
if (cerebrasTools) {
|
1421
|
+
streamConfig.tools = cerebrasTools;
|
1422
|
+
}
|
1423
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1424
|
+
console.log(`ā±ļø [Cerebras] Starting LLM call at: ${new Date().toISOString()}`);
|
1425
|
+
console.log(`š¦ [Cerebras] Full request config:`, JSON.stringify(streamConfig, null, 2));
|
1426
|
+
}
|
1427
|
+
if (hasTools) {
|
1428
|
+
// Non-streaming response when tools are present
|
1429
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1430
|
+
console.log(`š§ [Cerebras] Using non-streaming mode due to tools`);
|
1431
|
+
}
|
1432
|
+
const response = await cerebrasClient.chat.completions.create(streamConfig);
|
1433
|
+
// Process the complete response
|
1434
|
+
if (response.choices && response.choices.length > 0) {
|
1435
|
+
const choice = response.choices[0];
|
1436
|
+
const message = choice.message;
|
1437
|
+
// Handle content
|
1438
|
+
if (message.content) {
|
1439
|
+
fullMessage = message.content;
|
1440
|
+
onEvent({ type: "token", token: message.content });
|
1441
|
+
onEvent({ type: "message", message: fullMessage });
|
1442
|
+
}
|
1443
|
+
// Handle tool calls
|
1444
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
1445
|
+
for (const toolCall of message.tool_calls) {
|
1446
|
+
const tc = {
|
1447
|
+
id: toolCall.id,
|
1448
|
+
name: toolCall.function.name,
|
1449
|
+
arguments: toolCall.function.arguments,
|
1450
|
+
};
|
1451
|
+
toolCalls.push(tc);
|
1452
|
+
// Emit tool events
|
1453
|
+
onEvent({
|
1454
|
+
type: "tool_call_start",
|
1455
|
+
toolCall: { id: tc.id, name: tc.name },
|
1456
|
+
});
|
1457
|
+
onEvent({
|
1458
|
+
type: "tool_call_parsed",
|
1459
|
+
toolCall: tc,
|
1460
|
+
});
|
1461
|
+
}
|
1462
|
+
}
|
1463
|
+
}
|
1464
|
+
// Capture usage data
|
1465
|
+
if (response.usage) {
|
1466
|
+
usageData = {
|
1467
|
+
prompt_tokens: response.usage.prompt_tokens,
|
1468
|
+
completion_tokens: response.usage.completion_tokens,
|
1469
|
+
total_tokens: response.usage.total_tokens,
|
1470
|
+
};
|
1471
|
+
}
|
1472
|
+
tokenCount = fullMessage.length; // Approximate for non-streaming
|
1473
|
+
}
|
1474
|
+
else {
|
1475
|
+
// Streaming response when no tools
|
1476
|
+
const stream = await cerebrasClient.chat.completions.create(streamConfig);
|
1477
|
+
for await (const chunk of stream) {
|
1478
|
+
// Handle abort signal
|
1479
|
+
if (abortSignal?.aborted) {
|
1480
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1481
|
+
console.log(`š [Cerebras] Stream aborted by user`);
|
1482
|
+
}
|
1483
|
+
break;
|
1484
|
+
}
|
1485
|
+
const currentTime = Date.now();
|
1486
|
+
tokenCount++;
|
1487
|
+
if (tokenCount === 1) {
|
1488
|
+
firstTokenTime = currentTime - startTime;
|
1489
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1490
|
+
console.log(`ā” [Cerebras] First token received in ${firstTokenTime}ms`);
|
1491
|
+
}
|
1492
|
+
}
|
1493
|
+
// Process the chunk
|
1494
|
+
if (chunk.choices && chunk.choices.length > 0) {
|
1495
|
+
const delta = chunk.choices[0].delta;
|
1496
|
+
// Handle content delta
|
1497
|
+
if (delta?.content) {
|
1498
|
+
fullMessage += delta.content;
|
1499
|
+
onEvent({ type: "token", token: delta.content });
|
1500
|
+
}
|
1501
|
+
// Handle tool calls (shouldn't happen in streaming mode but just in case)
|
1502
|
+
if (delta?.tool_calls) {
|
1503
|
+
for (const toolCall of delta.tool_calls) {
|
1504
|
+
const index = toolCall.index || 0;
|
1505
|
+
// Initialize tool call if needed
|
1506
|
+
if (!toolCalls[index]) {
|
1507
|
+
toolCalls[index] = {
|
1508
|
+
id: toolCall.id || `tool_${Date.now()}_${index}`,
|
1509
|
+
name: toolCall.function?.name || "",
|
1510
|
+
arguments: "",
|
1511
|
+
};
|
1512
|
+
if (toolCall.function?.name) {
|
1513
|
+
onEvent({
|
1514
|
+
type: "tool_call_start",
|
1515
|
+
toolCall: {
|
1516
|
+
id: toolCalls[index].id,
|
1517
|
+
name: toolCall.function.name,
|
1518
|
+
},
|
1519
|
+
});
|
1520
|
+
}
|
1521
|
+
}
|
1522
|
+
// Accumulate arguments
|
1523
|
+
if (toolCall.function?.arguments) {
|
1524
|
+
toolCalls[index].arguments += toolCall.function.arguments;
|
1525
|
+
}
|
1526
|
+
}
|
1527
|
+
}
|
1528
|
+
// Check for finish reason
|
1529
|
+
if (chunk.choices[0].finish_reason === "tool_calls" && toolCalls.length > 0) {
|
1530
|
+
// Emit tool_call_parsed events for completed tool calls
|
1531
|
+
for (const toolCall of toolCalls) {
|
1532
|
+
onEvent({
|
1533
|
+
type: "tool_call_parsed",
|
1534
|
+
toolCall: toolCall,
|
1535
|
+
});
|
1536
|
+
}
|
1537
|
+
}
|
1538
|
+
}
|
1539
|
+
// Capture usage data if available
|
1540
|
+
if (chunk.usage) {
|
1541
|
+
usageData = {
|
1542
|
+
prompt_tokens: chunk.usage.prompt_tokens,
|
1543
|
+
completion_tokens: chunk.usage.completion_tokens,
|
1544
|
+
total_tokens: chunk.usage.total_tokens,
|
1545
|
+
};
|
1546
|
+
}
|
1547
|
+
// Emit current message
|
1548
|
+
onEvent({
|
1549
|
+
type: "message",
|
1550
|
+
message: fullMessage,
|
1551
|
+
});
|
1552
|
+
}
|
1553
|
+
}
|
1554
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
1555
|
+
console.log(`ā
[Cerebras] Complete. Total tokens: ${tokenCount} | Message length: ${fullMessage.length}`);
|
1556
|
+
}
|
1557
|
+
onEvent({
|
1558
|
+
type: "complete",
|
1559
|
+
tokens: tokenCount,
|
1560
|
+
});
|
1561
|
+
onComplete(fullMessage, toolCalls, usageData);
|
1351
1562
|
}
|
1352
1563
|
catch (error) {
|
1353
1564
|
// Handle Cerebras-specific 429 errors
|
@@ -31,6 +31,8 @@ export declare class UIEventAdapter {
|
|
31
31
|
private reasoningSignature?;
|
32
32
|
private isInReasoning;
|
33
33
|
private usageData?;
|
34
|
+
private hasToolCallsInProgress;
|
35
|
+
private hadToolCallsBeforeResume;
|
34
36
|
constructor(onEvent: (event: AgentStreamEvent) => void, conversationId: string, options?: {
|
35
37
|
smoothingEnabled?: boolean;
|
36
38
|
chunkingStrategy?: ChunkingStrategy;
|
@@ -30,6 +30,8 @@ export class UIEventAdapter {
|
|
30
30
|
reasoningSignature;
|
31
31
|
isInReasoning = false;
|
32
32
|
usageData;
|
33
|
+
hasToolCallsInProgress = false;
|
34
|
+
hadToolCallsBeforeResume = false;
|
33
35
|
constructor(onEvent, conversationId, options = {}) {
|
34
36
|
this.onEvent = onEvent;
|
35
37
|
this.conversationId = conversationId;
|
@@ -99,6 +101,9 @@ export class UIEventAdapter {
|
|
99
101
|
this.lastTokenTime = 0;
|
100
102
|
this.tokenCount = 0;
|
101
103
|
this.tokenDelays = [];
|
104
|
+
// Reset tool call tracking flags
|
105
|
+
this.hasToolCallsInProgress = false;
|
106
|
+
this.hadToolCallsBeforeResume = false;
|
102
107
|
// Note: We only clear tool calls here if this is truly a new conversation start
|
103
108
|
// For multi-round tool calling, handleStart is only called once at the beginning
|
104
109
|
if (this.activeToolCalls.size > 0) {
|
@@ -124,6 +129,19 @@ export class UIEventAdapter {
|
|
124
129
|
}
|
125
130
|
this.lastTokenTime = now;
|
126
131
|
this.tokenCount++;
|
132
|
+
// Check if we're resuming after tool calls and need to add newlines
|
133
|
+
if (this.hadToolCallsBeforeResume && this.hasToolCallsInProgress === false) {
|
134
|
+
// We had tool calls before and now we're receiving content again
|
135
|
+
// Add double newline to separate the content from tool results
|
136
|
+
if (this.currentMessage.length > 0 && !this.currentMessage.endsWith('\n\n')) {
|
137
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
138
|
+
console.log(`š [UIEventAdapter] Adding newlines after tool calls before resuming content`);
|
139
|
+
}
|
140
|
+
this.currentMessage += '\n\n';
|
141
|
+
}
|
142
|
+
// Reset the flag now that we've added the newlines
|
143
|
+
this.hadToolCallsBeforeResume = false;
|
144
|
+
}
|
127
145
|
if (this.chunkBuffer) {
|
128
146
|
const chunks = this.chunkBuffer.addToken(token);
|
129
147
|
// Add chunks to queue for all chunking modes (character, word, sentence)
|
@@ -155,6 +173,9 @@ export class UIEventAdapter {
|
|
155
173
|
toolCall: conversationToolCall,
|
156
174
|
status: "preparing",
|
157
175
|
});
|
176
|
+
// Mark that we have tool calls in progress
|
177
|
+
this.hasToolCallsInProgress = true;
|
178
|
+
this.hadToolCallsBeforeResume = true;
|
158
179
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
159
180
|
console.log(`š§ [UIEventAdapter] Active tool calls after: ${this.activeToolCalls.size}`);
|
160
181
|
}
|
@@ -219,6 +240,9 @@ export class UIEventAdapter {
|
|
219
240
|
toolCall: conversationToolCall,
|
220
241
|
status: "ready",
|
221
242
|
});
|
243
|
+
// Mark that we have tool calls
|
244
|
+
this.hasToolCallsInProgress = true;
|
245
|
+
this.hadToolCallsBeforeResume = true;
|
222
246
|
this.emitUIEvent({
|
223
247
|
type: "tool_update",
|
224
248
|
toolCall: conversationToolCall,
|
@@ -246,6 +270,21 @@ export class UIEventAdapter {
|
|
246
270
|
else {
|
247
271
|
console.warn(`š§ [UIEventAdapter] Tool call complete for unknown tool ID: ${toolCall.id}`);
|
248
272
|
}
|
273
|
+
// Check if all tool calls are complete
|
274
|
+
let allComplete = true;
|
275
|
+
for (const [, data] of this.activeToolCalls) {
|
276
|
+
if (data.status !== "completed" && data.status !== "failed") {
|
277
|
+
allComplete = false;
|
278
|
+
break;
|
279
|
+
}
|
280
|
+
}
|
281
|
+
if (allComplete && this.activeToolCalls.size > 0) {
|
282
|
+
// All tool calls are complete, mark that we're no longer processing tools
|
283
|
+
this.hasToolCallsInProgress = false;
|
284
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
285
|
+
console.log(`š§ [UIEventAdapter] All tool calls complete, ready to resume content streaming`);
|
286
|
+
}
|
287
|
+
}
|
249
288
|
}
|
250
289
|
handleComplete(tokens) {
|
251
290
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "graphlit-client",
|
3
|
-
"version": "1.0.
|
3
|
+
"version": "1.0.20250716004",
|
4
4
|
"description": "Graphlit API Client for TypeScript",
|
5
5
|
"type": "module",
|
6
6
|
"main": "./dist/client.js",
|
@@ -28,11 +28,7 @@
|
|
28
28
|
"format": "prettier --write .",
|
29
29
|
"build": "tsc -p tsconfig.json",
|
30
30
|
"prepublishOnly": "npm run build",
|
31
|
-
"test": "vitest"
|
32
|
-
"test:watch": "vitest --watch",
|
33
|
-
"test:coverage": "vitest --coverage",
|
34
|
-
"test:ui": "vitest --ui",
|
35
|
-
"test:streaming": "vitest --run src/tests/streaming"
|
31
|
+
"test": "vitest"
|
36
32
|
},
|
37
33
|
"keywords": [
|
38
34
|
"Graphlit",
|
@@ -49,6 +45,7 @@
|
|
49
45
|
"license": "MIT",
|
50
46
|
"dependencies": {
|
51
47
|
"@apollo/client": "^3.13.8",
|
48
|
+
"@cerebras/cerebras_cloud_sdk": "^1.35.0",
|
52
49
|
"@graphql-codegen/cli": "^5.0.7",
|
53
50
|
"@graphql-codegen/typescript": "^4.1.6",
|
54
51
|
"@graphql-codegen/typescript-operations": "^4.6.1",
|