graphlit-client 1.0.20250612003 → 1.0.20250612005

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.js CHANGED
@@ -18,13 +18,13 @@ let Anthropic;
18
18
  let GoogleGenerativeAI;
19
19
  try {
20
20
  OpenAI = optionalRequire("openai").default || optionalRequire("openai");
21
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
21
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
22
22
  console.log("[SDK Loading] OpenAI SDK loaded successfully");
23
23
  }
24
24
  }
25
25
  catch (e) {
26
26
  // OpenAI not installed
27
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
27
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
28
28
  console.log("[SDK Loading] OpenAI SDK not found:", e.message);
29
29
  }
30
30
  }
@@ -32,25 +32,25 @@ try {
32
32
  Anthropic =
33
33
  optionalRequire("@anthropic-ai/sdk").default ||
34
34
  optionalRequire("@anthropic-ai/sdk");
35
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
35
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
36
36
  console.log("[SDK Loading] Anthropic SDK loaded successfully");
37
37
  }
38
38
  }
39
39
  catch (e) {
40
40
  // Anthropic SDK not installed
41
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
41
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
42
42
  console.log("[SDK Loading] Anthropic SDK not found:", e.message);
43
43
  }
44
44
  }
45
45
  try {
46
46
  GoogleGenerativeAI = optionalRequire("@google/generative-ai").GoogleGenerativeAI;
47
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
47
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
48
48
  console.log("[SDK Loading] Google Generative AI SDK loaded successfully");
49
49
  }
50
50
  }
51
51
  catch (e) {
52
52
  // Google Generative AI not installed
53
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
53
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
54
54
  console.log("[SDK Loading] Google Generative AI SDK not found:", e.message);
55
55
  }
56
56
  }
@@ -1416,7 +1416,7 @@ class Graphlit {
1416
1416
  // If we have a full specification, check its service type
1417
1417
  if (specification) {
1418
1418
  const serviceType = specification.serviceType;
1419
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1419
+ if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
1420
1420
  console.log("[supportsStreaming] Checking support for:", {
1421
1421
  serviceType,
1422
1422
  hasOpenAI: OpenAI !== undefined || this.openaiClient !== undefined,
@@ -1430,7 +1430,7 @@ class Graphlit {
1430
1430
  case Types.ModelServiceTypes.Anthropic:
1431
1431
  return Anthropic !== undefined || this.anthropicClient !== undefined;
1432
1432
  case Types.ModelServiceTypes.Google:
1433
- return GoogleGenerativeAI !== undefined || this.googleClient !== undefined;
1433
+ return (GoogleGenerativeAI !== undefined || this.googleClient !== undefined);
1434
1434
  default:
1435
1435
  return false;
1436
1436
  }
@@ -1728,15 +1728,14 @@ class Graphlit {
1728
1728
  console.log(` Anthropic available: ${!!(Anthropic || this.anthropicClient)}`);
1729
1729
  console.log(` Google available: ${!!(GoogleGenerativeAI || this.googleClient)}`);
1730
1730
  }
1731
- if (serviceType === Types.ModelServiceTypes.OpenAi && (OpenAI || this.openaiClient)) {
1731
+ if (serviceType === Types.ModelServiceTypes.OpenAi &&
1732
+ (OpenAI || this.openaiClient)) {
1732
1733
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1733
1734
  console.log(`\nāœ… [Streaming] Using OpenAI native streaming (Round ${currentRound})`);
1734
1735
  }
1735
1736
  const openaiMessages = formatMessagesForOpenAI(messages);
1736
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1737
- console.log("\nšŸ” [OpenAI] Formatted messages being sent to LLM:");
1738
- console.log(JSON.stringify(openaiMessages, null, 2));
1739
- console.log("Total messages:", openaiMessages.length);
1737
+ if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1738
+ console.log(`šŸ” [OpenAI] Sending ${openaiMessages.length} messages to LLM: ${JSON.stringify(openaiMessages)}`);
1740
1739
  }
1741
1740
  await this.streamWithOpenAI(specification, openaiMessages, tools, uiAdapter, (message, calls) => {
1742
1741
  roundMessage = message;
@@ -1752,11 +1751,8 @@ class Graphlit {
1752
1751
  console.log(`\nāœ… [Streaming] Using Anthropic native streaming (Round ${currentRound})`);
1753
1752
  }
1754
1753
  const { system, messages: anthropicMessages } = formatMessagesForAnthropic(messages);
1755
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1756
- console.log("\nšŸ” [Anthropic] Formatted messages being sent to LLM:");
1757
- console.log("System prompt:", system);
1758
- console.log(JSON.stringify(anthropicMessages, null, 2));
1759
- console.log("Total messages:", anthropicMessages.length);
1754
+ if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1755
+ console.log(`šŸ” [Anthropic] Sending ${anthropicMessages.length} messages to LLM (system: ${system ? "yes" : "no"}): ${JSON.stringify(anthropicMessages)}`);
1760
1756
  }
1761
1757
  await this.streamWithAnthropic(specification, anthropicMessages, system, tools, uiAdapter, (message, calls) => {
1762
1758
  roundMessage = message;
@@ -1772,10 +1768,8 @@ class Graphlit {
1772
1768
  console.log(`\nāœ… [Streaming] Using Google native streaming (Round ${currentRound})`);
1773
1769
  }
1774
1770
  const googleMessages = formatMessagesForGoogle(messages);
1775
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1776
- console.log("\nšŸ” [Google] Formatted messages being sent to LLM:");
1777
- console.log(JSON.stringify(googleMessages, null, 2));
1778
- console.log("Total messages:", googleMessages.length);
1771
+ if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1772
+ console.log(`šŸ” [Google] Sending ${googleMessages.length} messages to LLM: ${JSON.stringify(googleMessages)}`);
1779
1773
  }
1780
1774
  // Google doesn't use system prompts separately, they're incorporated into messages
1781
1775
  await this.streamWithGoogle(specification, googleMessages, undefined, // systemPrompt - Google handles this differently
@@ -2042,19 +2036,12 @@ class Graphlit {
2042
2036
  */
2043
2037
  async fallbackToNonStreaming(prompt, conversationId, specification, tools, mimeType, data, uiAdapter, correlationId) {
2044
2038
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2045
- console.log(`\nšŸ”„ [Fallback] Starting non-streaming fallback`);
2046
- console.log(` Conversation ID: ${conversationId}`);
2047
- console.log(` Specification: ${specification.name} (${specification.serviceType})`);
2048
- console.log(` Prompt: "${prompt.substring(0, 100)}${prompt.length > 100 ? '...' : ''}"`);
2049
- console.log(` About to call promptConversation...`);
2039
+ console.log(`šŸ”„ [Fallback] Starting non-streaming fallback | ConvID: ${conversationId} | Spec: ${specification.name} (${specification.serviceType}) | Prompt: "${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}"`);
2050
2040
  }
2051
2041
  const response = await this.promptConversation(prompt, conversationId, { id: specification.id }, mimeType, data, tools, false, false, correlationId);
2052
2042
  const message = response.promptConversation?.message;
2053
2043
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2054
- console.log(`\nāœ… [Fallback] promptConversation completed`);
2055
- console.log(` Response message length: ${message?.message?.length || 0} chars`);
2056
- console.log(` Response preview: "${message?.message?.substring(0, 100) || 'NO MESSAGE'}${(message?.message?.length || 0) > 100 ? '...' : ''}"`);
2057
- console.log(` Now simulating streaming by splitting into tokens...`);
2044
+ console.log(`āœ… [Fallback] promptConversation completed | Length: ${message?.message?.length || 0} chars | Preview: "${message?.message?.substring(0, 50) || "NO MESSAGE"}${(message?.message?.length || 0) > 50 ? "..." : ""}"`);
2058
2045
  }
2059
2046
  if (message?.message) {
2060
2047
  // Simulate streaming by emitting tokens
@@ -2079,14 +2066,15 @@ class Graphlit {
2079
2066
  }
2080
2067
  // Use provided client or create a new one
2081
2068
  const openaiClient = this.openaiClient ||
2082
- (OpenAI ? new OpenAI({
2083
- apiKey: process.env.OPENAI_API_KEY || "",
2084
- }) : (() => { throw new Error("OpenAI module not available"); })());
2069
+ (OpenAI
2070
+ ? new OpenAI({
2071
+ apiKey: process.env.OPENAI_API_KEY || "",
2072
+ })
2073
+ : (() => {
2074
+ throw new Error("OpenAI module not available");
2075
+ })());
2085
2076
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2086
- console.log("\nšŸš€ [Graphlit SDK] Routing to OpenAI streaming provider");
2087
- console.log(` šŸ“‹ Specification: ${specification.name} (${specification.id})`);
2088
- console.log(` šŸ“ Messages: ${messages.length}`);
2089
- console.log(` šŸ”§ Tools: ${tools?.length || 0}`);
2077
+ console.log(`šŸš€ [Graphlit SDK] Routing to OpenAI streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
2090
2078
  }
2091
2079
  await streamWithOpenAI(specification, messages, tools, openaiClient, (event) => uiAdapter.handleEvent(event), onComplete);
2092
2080
  }
@@ -2100,15 +2088,15 @@ class Graphlit {
2100
2088
  }
2101
2089
  // Use provided client or create a new one
2102
2090
  const anthropicClient = this.anthropicClient ||
2103
- (Anthropic ? new Anthropic({
2104
- apiKey: process.env.ANTHROPIC_API_KEY || "",
2105
- }) : (() => { throw new Error("Anthropic module not available"); })());
2091
+ (Anthropic
2092
+ ? new Anthropic({
2093
+ apiKey: process.env.ANTHROPIC_API_KEY || "",
2094
+ })
2095
+ : (() => {
2096
+ throw new Error("Anthropic module not available");
2097
+ })());
2106
2098
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2107
- console.log("\nšŸš€ [Graphlit SDK] Routing to Anthropic streaming provider");
2108
- console.log(` šŸ“‹ Specification: ${specification.name} (${specification.id})`);
2109
- console.log(` šŸ“ Messages: ${messages.length}`);
2110
- console.log(` šŸ”§ Tools: ${tools?.length || 0}`);
2111
- console.log(` šŸ’¬ System Prompt: ${systemPrompt ? 'Yes' : 'No'}`);
2099
+ console.log(`šŸš€ [Graphlit SDK] Routing to Anthropic streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0} | SystemPrompt: ${systemPrompt ? "Yes" : "No"}`);
2112
2100
  }
2113
2101
  await streamWithAnthropic(specification, messages, systemPrompt, tools, anthropicClient, (event) => uiAdapter.handleEvent(event), onComplete);
2114
2102
  }
@@ -2122,13 +2110,13 @@ class Graphlit {
2122
2110
  }
2123
2111
  // Use provided client or create a new one
2124
2112
  const googleClient = this.googleClient ||
2125
- (GoogleGenerativeAI ? new GoogleGenerativeAI(process.env.GOOGLE_API_KEY || "") : (() => { throw new Error("Google GenerativeAI module not available"); })());
2113
+ (GoogleGenerativeAI
2114
+ ? new GoogleGenerativeAI(process.env.GOOGLE_API_KEY || "")
2115
+ : (() => {
2116
+ throw new Error("Google GenerativeAI module not available");
2117
+ })());
2126
2118
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2127
- console.log("\nšŸš€ [Graphlit SDK] Routing to Google streaming provider");
2128
- console.log(` šŸ“‹ Specification: ${specification.name} (${specification.id})`);
2129
- console.log(` šŸ“ Messages: ${messages.length}`);
2130
- console.log(` šŸ”§ Tools: ${tools?.length || 0}`);
2131
- console.log(` šŸ’¬ System Prompt: ${systemPrompt ? 'Yes' : 'No'}`);
2119
+ console.log(`šŸš€ [Graphlit SDK] Routing to Google streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0} | SystemPrompt: ${systemPrompt ? "Yes" : "No"}`);
2132
2120
  }
2133
2121
  await streamWithGoogle(specification, messages, systemPrompt, tools, googleClient, (event) => uiAdapter.handleEvent(event), onComplete);
2134
2122
  }
@@ -35,7 +35,7 @@ onEvent, onComplete) {
35
35
  currentToolStart: 0,
36
36
  roundStartTime: startTime,
37
37
  rounds: [],
38
- currentRound: 1
38
+ currentRound: 1,
39
39
  };
40
40
  try {
41
41
  const modelName = getModelName(specification);
@@ -43,13 +43,7 @@ onEvent, onComplete) {
43
43
  throw new Error(`No model name found for OpenAI specification: ${specification.name}`);
44
44
  }
45
45
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
46
- console.log("\nšŸ¤– [OpenAI] Model Configuration:");
47
- console.log(" Service: OpenAI");
48
- console.log(" Model:", modelName);
49
- console.log(" Temperature:", specification.openAI?.temperature);
50
- console.log(" Max Tokens:", specification.openAI?.completionTokenLimit);
51
- console.log(" Tools:", tools?.length || 0);
52
- console.log(" Specification Name:", specification.name);
46
+ console.log(`šŸ¤– [OpenAI] Model Config: Service=OpenAI | Model=${modelName} | Temperature=${specification.openAI?.temperature} | MaxTokens=${specification.openAI?.completionTokenLimit || "null"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
53
47
  }
54
48
  const streamConfig = {
55
49
  model: modelName,
@@ -75,7 +69,7 @@ onEvent, onComplete) {
75
69
  }));
76
70
  }
77
71
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
78
- console.log("\nā±ļø [OpenAI] Starting LLM call at:", new Date().toISOString());
72
+ console.log(`ā±ļø [OpenAI] Starting LLM call at: ${new Date().toISOString()}`);
79
73
  }
80
74
  const stream = await openaiClient.chat.completions.create(streamConfig);
81
75
  for await (const chunk of stream) {
@@ -142,7 +136,7 @@ onEvent, onComplete) {
142
136
  name: toolCallDelta.function?.name || "unknown",
143
137
  startTime: toolMetrics.currentToolStart,
144
138
  argumentBuildTime: 0,
145
- totalTime: 0
139
+ totalTime: 0,
146
140
  });
147
141
  // Track TTFT for first tool if no content yet
148
142
  if (firstTokenTime === 0) {
@@ -240,38 +234,49 @@ onEvent, onComplete) {
240
234
  roundNumber: toolMetrics.currentRound,
241
235
  llmTime: llmTime,
242
236
  toolTime: totalToolTime,
243
- toolCount: toolCalls.length
237
+ toolCount: toolCalls.length,
244
238
  });
245
239
  }
246
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
247
- console.log("\nšŸ“Š [OpenAI] Performance Metrics:");
248
- console.log(` ā±ļø Total Time: ${totalTime}ms`);
249
- console.log(` ⚔ Time to First Token (TTFT): ${firstTokenTime}ms`);
250
- if (firstMeaningfulContentTime > 0) {
251
- console.log(` šŸŽÆ Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
252
- }
253
- console.log(` šŸ“ˆ Content Tokens: ${tokenCount}`);
254
- console.log(` šŸ”§ Tool Argument Tokens: ${toolArgumentTokens}`);
255
- console.log(` šŸ“Š Total Tokens: ${totalTokens}`);
256
- console.log(` šŸ’Ø Tokens Per Second (TPS): ${tokensPerSecond.toFixed(2)}`);
240
+ if (process.env.DEBUG_GRAPHLIT_METRICS) {
241
+ const metricsData = {
242
+ totalTime: `${totalTime}ms`,
243
+ ttft: `${firstTokenTime}ms`,
244
+ ttfmc: firstMeaningfulContentTime > 0
245
+ ? `${firstMeaningfulContentTime}ms`
246
+ : null,
247
+ contentTokens: tokenCount,
248
+ toolTokens: toolArgumentTokens,
249
+ totalTokens: totalTokens,
250
+ tps: tokensPerSecond.toFixed(2),
251
+ };
252
+ console.log(`šŸ“Š [OpenAI] Performance: Total=${metricsData.totalTime} | TTFT=${metricsData.ttft}${metricsData.ttfmc ? ` | TTFMC=${metricsData.ttfmc}` : ""} | Tokens(content/tool/total)=${metricsData.contentTokens}/${metricsData.toolTokens}/${metricsData.totalTokens} | TPS=${metricsData.tps}`);
257
253
  // Tool calling metrics
258
254
  if (toolCalls.length > 0) {
259
- console.log(`\nšŸ”§ [OpenAI] Tool Calling Metrics:`);
260
- console.log(` šŸ› ļø Total Tools Called: ${toolMetrics.totalTools}`);
261
- console.log(` āœ… Successful Tools: ${toolMetrics.successfulTools}`);
262
- console.log(` āŒ Failed Tools: ${toolMetrics.failedTools}`);
263
- console.log(` šŸ“Š Success Rate: ${((toolMetrics.successfulTools / toolMetrics.totalTools) * 100).toFixed(1)}%`);
264
- // Tool timing details
265
- toolMetrics.toolTimes.forEach((tool, idx) => {
266
- console.log(` šŸ”Ø Tool ${idx + 1} (${tool.name}): ${tool.argumentBuildTime}ms`);
267
- });
268
- const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) / toolMetrics.toolTimes.length;
269
- console.log(` ā±ļø Average Tool Time: ${avgToolTime.toFixed(2)}ms`);
270
- // Round metrics
271
- toolMetrics.rounds.forEach(round => {
272
- const efficiency = round.toolCount > 0 ? (round.llmTime / (round.llmTime + round.toolTime) * 100).toFixed(1) : 100;
273
- console.log(` šŸ”„ Round ${round.roundNumber}: LLM=${round.llmTime}ms, Tools=${round.toolTime}ms (${round.toolCount} tools), Efficiency=${efficiency}%`);
274
- });
255
+ const successRate = ((toolMetrics.successfulTools / toolMetrics.totalTools) *
256
+ 100).toFixed(1);
257
+ const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) /
258
+ toolMetrics.toolTimes.length;
259
+ console.log(`šŸ”§ [OpenAI] Tools: Total=${toolMetrics.totalTools} | Success=${toolMetrics.successfulTools} | Failed=${toolMetrics.failedTools} | SuccessRate=${successRate}% | AvgTime=${avgToolTime.toFixed(2)}ms`);
260
+ // Tool timing details (consolidated)
261
+ const toolTimings = toolMetrics.toolTimes
262
+ .map((tool, idx) => `${tool.name}:${tool.argumentBuildTime}ms`)
263
+ .join(" | ");
264
+ if (toolTimings) {
265
+ console.log(`šŸ”Ø [OpenAI] Tool Timings: ${toolTimings}`);
266
+ }
267
+ // Round metrics (consolidated)
268
+ const roundMetrics = toolMetrics.rounds
269
+ .map((round) => {
270
+ const efficiency = round.toolCount > 0
271
+ ? ((round.llmTime / (round.llmTime + round.toolTime)) *
272
+ 100).toFixed(1)
273
+ : 100;
274
+ return `R${round.roundNumber}(LLM:${round.llmTime}ms,Tools:${round.toolTime}ms,Eff:${efficiency}%)`;
275
+ })
276
+ .join(" | ");
277
+ if (roundMetrics) {
278
+ console.log(`šŸ”„ [OpenAI] Rounds: ${roundMetrics}`);
279
+ }
275
280
  }
276
281
  if (interTokenDelays.length > 0) {
277
282
  const avgDelay = interTokenDelays.reduce((a, b) => a + b, 0) / interTokenDelays.length;
@@ -279,13 +284,9 @@ onEvent, onComplete) {
279
284
  const p50Delay = sortedDelays[Math.floor(sortedDelays.length * 0.5)];
280
285
  const p95Delay = sortedDelays[Math.floor(sortedDelays.length * 0.95)];
281
286
  const p99Delay = sortedDelays[Math.floor(sortedDelays.length * 0.99)];
282
- console.log(`\nā³ [OpenAI] Inter-Token Timing:`);
283
- console.log(` šŸ“Š Average Delay: ${avgDelay.toFixed(2)}ms`);
284
- console.log(` šŸ“Š P50 Delay: ${p50Delay}ms`);
285
- console.log(` āš ļø P95 Delay: ${p95Delay}ms`);
286
- console.log(` 🚨 P99 Delay: ${p99Delay}ms`);
287
+ console.log(`ā³ [OpenAI] Inter-Token: Avg=${avgDelay.toFixed(2)}ms | P50=${p50Delay}ms | P95=${p95Delay}ms | P99=${p99Delay}ms`);
287
288
  }
288
- console.log(`\nāœ… [OpenAI] Final message (${fullMessage.length} chars): "${fullMessage}"`);
289
+ console.log(`āœ… [OpenAI] Final message (${fullMessage.length} chars): "${fullMessage}"`);
289
290
  }
290
291
  onComplete(fullMessage, toolCalls);
291
292
  }
@@ -318,7 +319,7 @@ onEvent, onComplete) {
318
319
  currentToolStart: 0,
319
320
  roundStartTime: startTime,
320
321
  rounds: [],
321
- currentRound: 1
322
+ currentRound: 1,
322
323
  };
323
324
  try {
324
325
  const modelName = getModelName(specification);
@@ -326,14 +327,7 @@ onEvent, onComplete) {
326
327
  throw new Error(`No model name found for Anthropic specification: ${specification.name}`);
327
328
  }
328
329
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
329
- console.log("\nšŸ¤– [Anthropic] Model Configuration:");
330
- console.log(" Service: Anthropic");
331
- console.log(" Model:", modelName);
332
- console.log(" Temperature:", specification.anthropic?.temperature);
333
- console.log(" Max Tokens:", specification.anthropic?.completionTokenLimit || 8192);
334
- console.log(" System Prompt:", systemPrompt ? "Yes" : "No");
335
- console.log(" Tools:", tools?.length || 0);
336
- console.log(" Specification Name:", specification.name);
330
+ console.log(`šŸ¤– [Anthropic] Model Config: Service=Anthropic | Model=${modelName} | Temperature=${specification.anthropic?.temperature} | MaxTokens=${specification.anthropic?.completionTokenLimit || 8192} | SystemPrompt=${systemPrompt ? "Yes" : "No"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
337
331
  }
338
332
  const streamConfig = {
339
333
  model: modelName,
@@ -355,7 +349,7 @@ onEvent, onComplete) {
355
349
  }));
356
350
  }
357
351
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
358
- console.log("\nā±ļø [Anthropic] Starting LLM call at:", new Date().toISOString());
352
+ console.log(`ā±ļø [Anthropic] Starting LLM call at: ${new Date().toISOString()}`);
359
353
  }
360
354
  const stream = await anthropicClient.messages.create(streamConfig);
361
355
  let activeContentBlock = false;
@@ -380,7 +374,7 @@ onEvent, onComplete) {
380
374
  name: toolCall.name,
381
375
  startTime: toolMetrics.currentToolStart,
382
376
  argumentBuildTime: 0,
383
- totalTime: 0
377
+ totalTime: 0,
384
378
  });
385
379
  // Track TTFT for first tool if no content yet
386
380
  if (firstTokenTime === 0) {
@@ -550,38 +544,49 @@ onEvent, onComplete) {
550
544
  roundNumber: toolMetrics.currentRound,
551
545
  llmTime: llmTime,
552
546
  toolTime: totalToolTime,
553
- toolCount: validToolCalls.length
547
+ toolCount: validToolCalls.length,
554
548
  });
555
549
  }
556
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
557
- console.log("\nšŸ“Š [Anthropic] Performance Metrics:");
558
- console.log(` ā±ļø Total Time: ${totalTime}ms`);
559
- console.log(` ⚔ Time to First Token (TTFT): ${firstTokenTime}ms`);
560
- if (firstMeaningfulContentTime > 0) {
561
- console.log(` šŸŽÆ Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
562
- }
563
- console.log(` šŸ“ˆ Content Tokens: ${tokenCount}`);
564
- console.log(` šŸ”§ Tool Argument Tokens: ${toolArgumentTokens}`);
565
- console.log(` šŸ“Š Total Tokens: ${totalTokens}`);
566
- console.log(` šŸ’Ø Tokens Per Second (TPS): ${tokensPerSecond.toFixed(2)}`);
550
+ if (process.env.DEBUG_GRAPHLIT_METRICS) {
551
+ const metricsData = {
552
+ totalTime: `${totalTime}ms`,
553
+ ttft: `${firstTokenTime}ms`,
554
+ ttfmc: firstMeaningfulContentTime > 0
555
+ ? `${firstMeaningfulContentTime}ms`
556
+ : null,
557
+ contentTokens: tokenCount,
558
+ toolTokens: toolArgumentTokens,
559
+ totalTokens: totalTokens,
560
+ tps: tokensPerSecond.toFixed(2),
561
+ };
562
+ console.log(`šŸ“Š [Anthropic] Performance: Total=${metricsData.totalTime} | TTFT=${metricsData.ttft}${metricsData.ttfmc ? ` | TTFMC=${metricsData.ttfmc}` : ""} | Tokens(content/tool/total)=${metricsData.contentTokens}/${metricsData.toolTokens}/${metricsData.totalTokens} | TPS=${metricsData.tps}`);
567
563
  // Tool calling metrics
568
564
  if (validToolCalls.length > 0) {
569
- console.log(`\nšŸ”§ [Anthropic] Tool Calling Metrics:`);
570
- console.log(` šŸ› ļø Total Tools Called: ${toolMetrics.totalTools}`);
571
- console.log(` āœ… Successful Tools: ${toolMetrics.successfulTools}`);
572
- console.log(` āŒ Failed Tools: ${toolMetrics.failedTools}`);
573
- console.log(` šŸ“Š Success Rate: ${((toolMetrics.successfulTools / toolMetrics.totalTools) * 100).toFixed(1)}%`);
574
- // Tool timing details
575
- toolMetrics.toolTimes.forEach((tool, idx) => {
576
- console.log(` šŸ”Ø Tool ${idx + 1} (${tool.name}): ${tool.argumentBuildTime}ms`);
577
- });
578
- const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) / toolMetrics.toolTimes.length;
579
- console.log(` ā±ļø Average Tool Time: ${avgToolTime.toFixed(2)}ms`);
580
- // Round metrics
581
- toolMetrics.rounds.forEach(round => {
582
- const efficiency = round.toolCount > 0 ? (round.llmTime / (round.llmTime + round.toolTime) * 100).toFixed(1) : 100;
583
- console.log(` šŸ”„ Round ${round.roundNumber}: LLM=${round.llmTime}ms, Tools=${round.toolTime}ms (${round.toolCount} tools), Efficiency=${efficiency}%`);
584
- });
565
+ const successRate = ((toolMetrics.successfulTools / toolMetrics.totalTools) *
566
+ 100).toFixed(1);
567
+ const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) /
568
+ toolMetrics.toolTimes.length;
569
+ console.log(`šŸ”§ [Anthropic] Tools: Total=${toolMetrics.totalTools} | Success=${toolMetrics.successfulTools} | Failed=${toolMetrics.failedTools} | SuccessRate=${successRate}% | AvgTime=${avgToolTime.toFixed(2)}ms`);
570
+ // Tool timing details (consolidated)
571
+ const toolTimings = toolMetrics.toolTimes
572
+ .map((tool, idx) => `${tool.name}:${tool.argumentBuildTime}ms`)
573
+ .join(" | ");
574
+ if (toolTimings) {
575
+ console.log(`šŸ”Ø [Anthropic] Tool Timings: ${toolTimings}`);
576
+ }
577
+ // Round metrics (consolidated)
578
+ const roundMetrics = toolMetrics.rounds
579
+ .map((round) => {
580
+ const efficiency = round.toolCount > 0
581
+ ? ((round.llmTime / (round.llmTime + round.toolTime)) *
582
+ 100).toFixed(1)
583
+ : 100;
584
+ return `R${round.roundNumber}(LLM:${round.llmTime}ms,Tools:${round.toolTime}ms,Eff:${efficiency}%)`;
585
+ })
586
+ .join(" | ");
587
+ if (roundMetrics) {
588
+ console.log(`šŸ”„ [Anthropic] Rounds: ${roundMetrics}`);
589
+ }
585
590
  }
586
591
  if (interTokenDelays.length > 0) {
587
592
  const avgDelay = interTokenDelays.reduce((a, b) => a + b, 0) / interTokenDelays.length;
@@ -589,13 +594,9 @@ onEvent, onComplete) {
589
594
  const p50Delay = sortedDelays[Math.floor(sortedDelays.length * 0.5)];
590
595
  const p95Delay = sortedDelays[Math.floor(sortedDelays.length * 0.95)];
591
596
  const p99Delay = sortedDelays[Math.floor(sortedDelays.length * 0.99)];
592
- console.log(`\nā³ [Anthropic] Inter-Token Timing:`);
593
- console.log(` šŸ“Š Average Delay: ${avgDelay.toFixed(2)}ms`);
594
- console.log(` šŸ“Š P50 Delay: ${p50Delay}ms`);
595
- console.log(` āš ļø P95 Delay: ${p95Delay}ms`);
596
- console.log(` 🚨 P99 Delay: ${p99Delay}ms`);
597
+ console.log(`ā³ [Anthropic] Inter-Token: Avg=${avgDelay.toFixed(2)}ms | P50=${p50Delay}ms | P95=${p95Delay}ms | P99=${p99Delay}ms`);
597
598
  }
598
- console.log(`\nāœ… [Anthropic] Final message (${fullMessage.length} chars): "${fullMessage}"`);
599
+ console.log(`āœ… [Anthropic] Final message (${fullMessage.length} chars): "${fullMessage}"`);
599
600
  }
600
601
  onComplete(fullMessage, validToolCalls);
601
602
  }
@@ -628,7 +629,7 @@ onEvent, onComplete) {
628
629
  currentToolStart: 0,
629
630
  roundStartTime: startTime,
630
631
  rounds: [],
631
- currentRound: 1
632
+ currentRound: 1,
632
633
  };
633
634
  try {
634
635
  const modelName = getModelName(specification);
@@ -636,14 +637,7 @@ onEvent, onComplete) {
636
637
  throw new Error(`No model name found for Google specification: ${specification.name}`);
637
638
  }
638
639
  if (process.env.DEBUG_GRAPHLIT_STREAMING) {
639
- console.log("\nšŸ¤– [Google] Model Configuration:");
640
- console.log(" Service: Google");
641
- console.log(" Model:", modelName);
642
- console.log(" Temperature:", specification.google?.temperature);
643
- console.log(" Max Tokens:", specification.google?.completionTokenLimit);
644
- console.log(" System Prompt:", systemPrompt ? "Yes" : "No");
645
- console.log(" Tools:", tools?.length || 0);
646
- console.log(" Specification Name:", specification.name);
640
+ console.log(`šŸ¤– [Google] Model Config: Service=Google | Model=${modelName} | Temperature=${specification.google?.temperature} | MaxTokens=${specification.google?.completionTokenLimit || "null"} | SystemPrompt=${systemPrompt ? "Yes" : "No"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
647
641
  }
648
642
  const streamConfig = {
649
643
  model: modelName,
@@ -749,7 +743,7 @@ onEvent, onComplete) {
749
743
  name: part.functionCall.name,
750
744
  startTime: Date.now(),
751
745
  argumentBuildTime: 0, // Google returns complete args at once
752
- totalTime: 0
746
+ totalTime: 0,
753
747
  });
754
748
  // Track TTFT for first tool if no content yet
755
749
  if (firstTokenTime === 0) {
@@ -892,38 +886,49 @@ onEvent, onComplete) {
892
886
  roundNumber: toolMetrics.currentRound,
893
887
  llmTime: llmTime,
894
888
  toolTime: totalToolTime,
895
- toolCount: toolCalls.length
889
+ toolCount: toolCalls.length,
896
890
  });
897
891
  }
898
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
899
- console.log("\nšŸ“Š [Google] Performance Metrics:");
900
- console.log(` ā±ļø Total Time: ${totalTime}ms`);
901
- console.log(` ⚔ Time to First Token (TTFT): ${firstTokenTime}ms`);
902
- if (firstMeaningfulContentTime > 0) {
903
- console.log(` šŸŽÆ Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
904
- }
905
- console.log(` šŸ“ˆ Content Tokens: ${tokenCount}`);
906
- console.log(` šŸ”§ Tool Argument Tokens: ${toolArgumentTokens}`);
907
- console.log(` šŸ“Š Total Tokens: ${totalTokens}`);
908
- console.log(` šŸ’Ø Tokens Per Second (TPS): ${tokensPerSecond.toFixed(2)}`);
892
+ if (process.env.DEBUG_GRAPHLIT_METRICS) {
893
+ const metricsData = {
894
+ totalTime: `${totalTime}ms`,
895
+ ttft: `${firstTokenTime}ms`,
896
+ ttfmc: firstMeaningfulContentTime > 0
897
+ ? `${firstMeaningfulContentTime}ms`
898
+ : null,
899
+ contentTokens: tokenCount,
900
+ toolTokens: toolArgumentTokens,
901
+ totalTokens: totalTokens,
902
+ tps: tokensPerSecond.toFixed(2),
903
+ };
904
+ console.log(`šŸ“Š [Google] Performance: Total=${metricsData.totalTime} | TTFT=${metricsData.ttft}${metricsData.ttfmc ? ` | TTFMC=${metricsData.ttfmc}` : ""} | Tokens(content/tool/total)=${metricsData.contentTokens}/${metricsData.toolTokens}/${metricsData.totalTokens} | TPS=${metricsData.tps}`);
909
905
  // Tool calling metrics
910
906
  if (toolCalls.length > 0) {
911
- console.log(`\nšŸ”§ [Google] Tool Calling Metrics:`);
912
- console.log(` šŸ› ļø Total Tools Called: ${toolMetrics.totalTools}`);
913
- console.log(` āœ… Successful Tools: ${toolMetrics.successfulTools}`);
914
- console.log(` āŒ Failed Tools: ${toolMetrics.failedTools}`);
915
- console.log(` šŸ“Š Success Rate: ${((toolMetrics.successfulTools / toolMetrics.totalTools) * 100).toFixed(1)}%`);
916
- // Tool timing details
917
- toolMetrics.toolTimes.forEach((tool, idx) => {
918
- console.log(` šŸ”Ø Tool ${idx + 1} (${tool.name}): ${tool.argumentBuildTime}ms`);
919
- });
920
- const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) / toolMetrics.toolTimes.length;
921
- console.log(` ā±ļø Average Tool Time: ${avgToolTime.toFixed(2)}ms`);
922
- // Round metrics
923
- toolMetrics.rounds.forEach(round => {
924
- const efficiency = round.toolCount > 0 ? (round.llmTime / (round.llmTime + round.toolTime) * 100).toFixed(1) : 100;
925
- console.log(` šŸ”„ Round ${round.roundNumber}: LLM=${round.llmTime}ms, Tools=${round.toolTime}ms (${round.toolCount} tools), Efficiency=${efficiency}%`);
926
- });
907
+ const successRate = ((toolMetrics.successfulTools / toolMetrics.totalTools) *
908
+ 100).toFixed(1);
909
+ const avgToolTime = toolMetrics.toolTimes.reduce((sum, tool) => sum + tool.totalTime, 0) /
910
+ toolMetrics.toolTimes.length;
911
+ console.log(`šŸ”§ [Google] Tools: Total=${toolMetrics.totalTools} | Success=${toolMetrics.successfulTools} | Failed=${toolMetrics.failedTools} | SuccessRate=${successRate}% | AvgTime=${avgToolTime.toFixed(2)}ms`);
912
+ // Tool timing details (consolidated)
913
+ const toolTimings = toolMetrics.toolTimes
914
+ .map((tool, idx) => `${tool.name}:${tool.argumentBuildTime}ms`)
915
+ .join(" | ");
916
+ if (toolTimings) {
917
+ console.log(`šŸ”Ø [Google] Tool Timings: ${toolTimings}`);
918
+ }
919
+ // Round metrics (consolidated)
920
+ const roundMetrics = toolMetrics.rounds
921
+ .map((round) => {
922
+ const efficiency = round.toolCount > 0
923
+ ? ((round.llmTime / (round.llmTime + round.toolTime)) *
924
+ 100).toFixed(1)
925
+ : 100;
926
+ return `R${round.roundNumber}(LLM:${round.llmTime}ms,Tools:${round.toolTime}ms,Eff:${efficiency}%)`;
927
+ })
928
+ .join(" | ");
929
+ if (roundMetrics) {
930
+ console.log(`šŸ”„ [Google] Rounds: ${roundMetrics}`);
931
+ }
927
932
  }
928
933
  if (interTokenDelays.length > 0) {
929
934
  const avgDelay = interTokenDelays.reduce((a, b) => a + b, 0) / interTokenDelays.length;
@@ -931,13 +936,9 @@ onEvent, onComplete) {
931
936
  const p50Delay = sortedDelays[Math.floor(sortedDelays.length * 0.5)];
932
937
  const p95Delay = sortedDelays[Math.floor(sortedDelays.length * 0.95)];
933
938
  const p99Delay = sortedDelays[Math.floor(sortedDelays.length * 0.99)];
934
- console.log(`\nā³ [Google] Inter-Token Timing:`);
935
- console.log(` šŸ“Š Average Delay: ${avgDelay.toFixed(2)}ms`);
936
- console.log(` šŸ“Š P50 Delay: ${p50Delay}ms`);
937
- console.log(` āš ļø P95 Delay: ${p95Delay}ms`);
938
- console.log(` 🚨 P99 Delay: ${p99Delay}ms`);
939
+ console.log(`ā³ [Google] Inter-Token: Avg=${avgDelay.toFixed(2)}ms | P50=${p50Delay}ms | P95=${p95Delay}ms | P99=${p99Delay}ms`);
939
940
  }
940
- console.log(`\nāœ… [Google] Final message (${fullMessage.length} chars): "${fullMessage}"`);
941
+ console.log(`āœ… [Google] Final message (${fullMessage.length} chars): "${fullMessage}"`);
941
942
  }
942
943
  onComplete(fullMessage, toolCalls);
943
944
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250612003",
3
+ "version": "1.0.20250612005",
4
4
  "description": "Graphlit API Client for TypeScript",
5
5
  "type": "module",
6
6
  "main": "./dist/client.js",