graphlit-client 1.0.20250612005 → 1.0.20250612007

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.js CHANGED
@@ -18,13 +18,13 @@ let Anthropic;
18
18
  let GoogleGenerativeAI;
19
19
  try {
20
20
  OpenAI = optionalRequire("openai").default || optionalRequire("openai");
21
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
21
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
22
22
  console.log("[SDK Loading] OpenAI SDK loaded successfully");
23
23
  }
24
24
  }
25
25
  catch (e) {
26
26
  // OpenAI not installed
27
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
27
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
28
28
  console.log("[SDK Loading] OpenAI SDK not found:", e.message);
29
29
  }
30
30
  }
@@ -32,25 +32,25 @@ try {
32
32
  Anthropic =
33
33
  optionalRequire("@anthropic-ai/sdk").default ||
34
34
  optionalRequire("@anthropic-ai/sdk");
35
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
35
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
36
36
  console.log("[SDK Loading] Anthropic SDK loaded successfully");
37
37
  }
38
38
  }
39
39
  catch (e) {
40
40
  // Anthropic SDK not installed
41
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
41
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
42
42
  console.log("[SDK Loading] Anthropic SDK not found:", e.message);
43
43
  }
44
44
  }
45
45
  try {
46
46
  GoogleGenerativeAI = optionalRequire("@google/generative-ai").GoogleGenerativeAI;
47
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
47
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
48
48
  console.log("[SDK Loading] Google Generative AI SDK loaded successfully");
49
49
  }
50
50
  }
51
51
  catch (e) {
52
52
  // Google Generative AI not installed
53
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
53
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
54
54
  console.log("[SDK Loading] Google Generative AI SDK not found:", e.message);
55
55
  }
56
56
  }
@@ -1416,12 +1416,16 @@ class Graphlit {
1416
1416
  // If we have a full specification, check its service type
1417
1417
  if (specification) {
1418
1418
  const serviceType = specification.serviceType;
1419
- if (process.env.DEBUG_GRAPHLIT_INITIALIZATION) {
1420
- console.log("[supportsStreaming] Checking support for:", {
1419
+ if (process.env.DEBUG_GRAPHLIT_SDK_INITIALIZATION) {
1420
+ console.log("[supportsStreaming] Checking support for specification:", {
1421
+ specificationName: specification.name,
1421
1422
  serviceType,
1422
- hasOpenAI: OpenAI !== undefined || this.openaiClient !== undefined,
1423
- hasAnthropic: Anthropic !== undefined || this.anthropicClient !== undefined,
1424
- hasGoogle: GoogleGenerativeAI !== undefined || this.googleClient !== undefined,
1423
+ moduleOpenAI: OpenAI !== undefined,
1424
+ instanceOpenAI: this.openaiClient !== undefined,
1425
+ moduleAnthropic: Anthropic !== undefined,
1426
+ instanceAnthropic: this.anthropicClient !== undefined,
1427
+ moduleGoogle: GoogleGenerativeAI !== undefined,
1428
+ instanceGoogle: this.googleClient !== undefined,
1425
1429
  });
1426
1430
  }
1427
1431
  switch (serviceType) {
@@ -1583,7 +1587,7 @@ class Graphlit {
1583
1587
  }
1584
1588
  // Check streaming support - fallback to promptAgent if not supported
1585
1589
  if (fullSpec && !this.supportsStreaming(fullSpec)) {
1586
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1590
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1587
1591
  console.log("\n⚠️ [streamAgent] Streaming not supported, falling back to promptAgent with same conversation");
1588
1592
  }
1589
1593
  // Fallback to promptAgent using the same conversation and parameters
@@ -1677,7 +1681,7 @@ class Graphlit {
1677
1681
  if (!formattedMessage?.message) {
1678
1682
  throw new Error("Failed to format conversation");
1679
1683
  }
1680
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1684
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1681
1685
  console.log("\n📋 [formatConversation] Response", formattedMessage.message);
1682
1686
  }
1683
1687
  // Build message array with conversation history
@@ -1704,7 +1708,7 @@ class Graphlit {
1704
1708
  if (mimeType && data) {
1705
1709
  messageToAdd.mimeType = mimeType;
1706
1710
  messageToAdd.data = data;
1707
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1711
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1708
1712
  console.log(`\n🖼️ [Streaming] Adding image data to message: ${mimeType}, ${data.length} chars`);
1709
1713
  }
1710
1714
  }
@@ -1722,7 +1726,7 @@ class Graphlit {
1722
1726
  let toolCalls = [];
1723
1727
  let roundMessage = "";
1724
1728
  // Stream with appropriate provider
1725
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1729
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1726
1730
  console.log(`\n🔀 [Streaming Decision] Service: ${serviceType}, Round: ${currentRound}`);
1727
1731
  console.log(` OpenAI available: ${!!(OpenAI || this.openaiClient)}`);
1728
1732
  console.log(` Anthropic available: ${!!(Anthropic || this.anthropicClient)}`);
@@ -1730,45 +1734,45 @@ class Graphlit {
1730
1734
  }
1731
1735
  if (serviceType === Types.ModelServiceTypes.OpenAi &&
1732
1736
  (OpenAI || this.openaiClient)) {
1733
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1737
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1734
1738
  console.log(`\n✅ [Streaming] Using OpenAI native streaming (Round ${currentRound})`);
1735
1739
  }
1736
1740
  const openaiMessages = formatMessagesForOpenAI(messages);
1737
- if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1741
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING_MESSAGES) {
1738
1742
  console.log(`🔍 [OpenAI] Sending ${openaiMessages.length} messages to LLM: ${JSON.stringify(openaiMessages)}`);
1739
1743
  }
1740
1744
  await this.streamWithOpenAI(specification, openaiMessages, tools, uiAdapter, (message, calls) => {
1741
1745
  roundMessage = message;
1742
1746
  toolCalls = calls;
1743
1747
  });
1744
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1748
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1745
1749
  console.log(`\n🏁 [Streaming] OpenAI native streaming completed (Round ${currentRound})`);
1746
1750
  }
1747
1751
  }
1748
1752
  else if (serviceType === Types.ModelServiceTypes.Anthropic &&
1749
1753
  (Anthropic || this.anthropicClient)) {
1750
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1754
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1751
1755
  console.log(`\n✅ [Streaming] Using Anthropic native streaming (Round ${currentRound})`);
1752
1756
  }
1753
1757
  const { system, messages: anthropicMessages } = formatMessagesForAnthropic(messages);
1754
- if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1758
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING_MESSAGES) {
1755
1759
  console.log(`🔍 [Anthropic] Sending ${anthropicMessages.length} messages to LLM (system: ${system ? "yes" : "no"}): ${JSON.stringify(anthropicMessages)}`);
1756
1760
  }
1757
1761
  await this.streamWithAnthropic(specification, anthropicMessages, system, tools, uiAdapter, (message, calls) => {
1758
1762
  roundMessage = message;
1759
1763
  toolCalls = calls;
1760
1764
  });
1761
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1765
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1762
1766
  console.log(`\n🏁 [Streaming] Anthropic native streaming completed (Round ${currentRound})`);
1763
1767
  }
1764
1768
  }
1765
1769
  else if (serviceType === Types.ModelServiceTypes.Google &&
1766
1770
  (GoogleGenerativeAI || this.googleClient)) {
1767
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1771
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1768
1772
  console.log(`\n✅ [Streaming] Using Google native streaming (Round ${currentRound})`);
1769
1773
  }
1770
1774
  const googleMessages = formatMessagesForGoogle(messages);
1771
- if (process.env.DEBUG_GRAPHLIT_STREAMING_MESSAGES) {
1775
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING_MESSAGES) {
1772
1776
  console.log(`🔍 [Google] Sending ${googleMessages.length} messages to LLM: ${JSON.stringify(googleMessages)}`);
1773
1777
  }
1774
1778
  // Google doesn't use system prompts separately, they're incorporated into messages
@@ -1777,19 +1781,19 @@ class Graphlit {
1777
1781
  roundMessage = message;
1778
1782
  toolCalls = calls;
1779
1783
  });
1780
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1784
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1781
1785
  console.log(`\n🏁 [Streaming] Google native streaming completed (Round ${currentRound})`);
1782
1786
  }
1783
1787
  }
1784
1788
  else {
1785
1789
  // Fallback to non-streaming
1786
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1790
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1787
1791
  console.log(`\n⚠️ [Fallback] No native streaming available for ${serviceType} (Round ${currentRound})`);
1788
1792
  console.log(` Falling back to non-streaming promptConversation`);
1789
1793
  console.log(` This should NOT happen if clients are properly set!`);
1790
1794
  }
1791
1795
  await this.fallbackToNonStreaming(prompt, conversationId, specification, tools, mimeType, data, uiAdapter, correlationId);
1792
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1796
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
1793
1797
  console.log(`\n🏁 [Fallback] Non-streaming fallback completed (Round ${currentRound})`);
1794
1798
  }
1795
1799
  break;
@@ -2035,12 +2039,12 @@ class Graphlit {
2035
2039
  * Fallback to non-streaming when streaming is not available
2036
2040
  */
2037
2041
  async fallbackToNonStreaming(prompt, conversationId, specification, tools, mimeType, data, uiAdapter, correlationId) {
2038
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2042
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2039
2043
  console.log(`🔄 [Fallback] Starting non-streaming fallback | ConvID: ${conversationId} | Spec: ${specification.name} (${specification.serviceType}) | Prompt: "${prompt.substring(0, 50)}${prompt.length > 50 ? "..." : ""}"`);
2040
2044
  }
2041
2045
  const response = await this.promptConversation(prompt, conversationId, { id: specification.id }, mimeType, data, tools, false, false, correlationId);
2042
2046
  const message = response.promptConversation?.message;
2043
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2047
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2044
2048
  console.log(`✅ [Fallback] promptConversation completed | Length: ${message?.message?.length || 0} chars | Preview: "${message?.message?.substring(0, 50) || "NO MESSAGE"}${(message?.message?.length || 0) > 50 ? "..." : ""}"`);
2045
2049
  }
2046
2050
  if (message?.message) {
@@ -2051,7 +2055,7 @@ class Graphlit {
2051
2055
  uiAdapter.handleEvent({ type: "token", token });
2052
2056
  }
2053
2057
  uiAdapter.handleEvent({ type: "message", message: message.message });
2054
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2058
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2055
2059
  console.log(`\n🎯 [Fallback] Completed token simulation (${words.length} tokens)`);
2056
2060
  }
2057
2061
  }
@@ -2073,7 +2077,7 @@ class Graphlit {
2073
2077
  : (() => {
2074
2078
  throw new Error("OpenAI module not available");
2075
2079
  })());
2076
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2080
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2077
2081
  console.log(`🚀 [Graphlit SDK] Routing to OpenAI streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
2078
2082
  }
2079
2083
  await streamWithOpenAI(specification, messages, tools, openaiClient, (event) => uiAdapter.handleEvent(event), onComplete);
@@ -2095,7 +2099,7 @@ class Graphlit {
2095
2099
  : (() => {
2096
2100
  throw new Error("Anthropic module not available");
2097
2101
  })());
2098
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2102
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2099
2103
  console.log(`🚀 [Graphlit SDK] Routing to Anthropic streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0} | SystemPrompt: ${systemPrompt ? "Yes" : "No"}`);
2100
2104
  }
2101
2105
  await streamWithAnthropic(specification, messages, systemPrompt, tools, anthropicClient, (event) => uiAdapter.handleEvent(event), onComplete);
@@ -2115,7 +2119,7 @@ class Graphlit {
2115
2119
  : (() => {
2116
2120
  throw new Error("Google GenerativeAI module not available");
2117
2121
  })());
2118
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
2122
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2119
2123
  console.log(`🚀 [Graphlit SDK] Routing to Google streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0} | SystemPrompt: ${systemPrompt ? "Yes" : "No"}`);
2120
2124
  }
2121
2125
  await streamWithGoogle(specification, messages, systemPrompt, tools, googleClient, (event) => uiAdapter.handleEvent(event), onComplete);
@@ -42,7 +42,7 @@ onEvent, onComplete) {
42
42
  if (!modelName) {
43
43
  throw new Error(`No model name found for OpenAI specification: ${specification.name}`);
44
44
  }
45
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
45
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
46
46
  console.log(`🤖 [OpenAI] Model Config: Service=OpenAI | Model=${modelName} | Temperature=${specification.openAI?.temperature} | MaxTokens=${specification.openAI?.completionTokenLimit || "null"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
47
47
  }
48
48
  const streamConfig = {
@@ -68,14 +68,14 @@ onEvent, onComplete) {
68
68
  },
69
69
  }));
70
70
  }
71
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
71
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
72
72
  console.log(`⏱️ [OpenAI] Starting LLM call at: ${new Date().toISOString()}`);
73
73
  }
74
74
  const stream = await openaiClient.chat.completions.create(streamConfig);
75
75
  for await (const chunk of stream) {
76
76
  const delta = chunk.choices[0]?.delta;
77
77
  // Debug log chunk details
78
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
78
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
79
79
  console.log(`[OpenAI] Chunk:`, JSON.stringify(chunk, null, 2));
80
80
  if (delta?.content) {
81
81
  console.log(`[OpenAI] Content delta: "${delta.content}" (${delta.content.length} chars)`);
@@ -94,14 +94,14 @@ onEvent, onComplete) {
94
94
  // Track TTFT (first token regardless of type)
95
95
  if (firstTokenTime === 0) {
96
96
  firstTokenTime = currentTime - startTime;
97
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
97
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
98
98
  console.log(`\n⚡ [OpenAI] Time to First Token (TTFT): ${firstTokenTime}ms`);
99
99
  }
100
100
  }
101
101
  // Track first meaningful content (excludes tool calls)
102
102
  if (firstMeaningfulContentTime === 0 && delta.content.trim()) {
103
103
  firstMeaningfulContentTime = currentTime - startTime;
104
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
104
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
105
105
  console.log(`\n🎯 [OpenAI] Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
106
106
  }
107
107
  }
@@ -111,7 +111,7 @@ onEvent, onComplete) {
111
111
  interTokenDelays.push(delay);
112
112
  }
113
113
  lastEventTime = currentTime;
114
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
114
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
115
115
  console.log(`[OpenAI] Token #${tokenCount}: "${delta.content}" | Accumulated: ${fullMessage.length} chars`);
116
116
  }
117
117
  onEvent({
@@ -141,11 +141,11 @@ onEvent, onComplete) {
141
141
  // Track TTFT for first tool if no content yet
142
142
  if (firstTokenTime === 0) {
143
143
  firstTokenTime = Date.now() - startTime;
144
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
144
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
145
145
  console.log(`\n⚡ [OpenAI] Time to First Token (Tool Call): ${firstTokenTime}ms`);
146
146
  }
147
147
  }
148
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
148
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
149
149
  console.log(`[OpenAI] Starting new tool call: ${toolCalls[index].id}`);
150
150
  }
151
151
  onEvent({
@@ -158,7 +158,7 @@ onEvent, onComplete) {
158
158
  }
159
159
  if (toolCallDelta.function?.name) {
160
160
  toolCalls[index].name = toolCallDelta.function.name;
161
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
161
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
162
162
  console.log(`[OpenAI] Tool name: ${toolCallDelta.function.name}`);
163
163
  }
164
164
  }
@@ -167,7 +167,7 @@ onEvent, onComplete) {
167
167
  // Count tool argument tokens (rough estimate: ~4 chars per token)
168
168
  toolArgumentTokens += Math.ceil(toolCallDelta.function.arguments.length / 4);
169
169
  // Debug logging for partial JSON accumulation
170
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
170
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
171
171
  console.log(`[OpenAI] Tool ${toolCalls[index].name} - Partial JSON chunk: "${toolCallDelta.function.arguments}"`);
172
172
  console.log(`[OpenAI] Tool ${toolCalls[index].name} - Total accumulated: ${toolCalls[index].arguments.length} chars`);
173
173
  }
@@ -195,7 +195,7 @@ onEvent, onComplete) {
195
195
  try {
196
196
  JSON.parse(toolCall.arguments);
197
197
  toolMetrics.successfulTools++;
198
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
198
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
199
199
  console.log(`[OpenAI] ✅ Valid JSON for ${toolCall.name}`);
200
200
  }
201
201
  }
@@ -204,7 +204,7 @@ onEvent, onComplete) {
204
204
  console.error(`[OpenAI] ❌ Invalid JSON for ${toolCall.name}: ${e}`);
205
205
  }
206
206
  // Log the final JSON for debugging
207
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
207
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
208
208
  console.log(`[OpenAI] Tool ${toolCall.name} complete with arguments (${toolCall.arguments.length} chars):`);
209
209
  console.log(toolCall.arguments);
210
210
  }
@@ -218,7 +218,7 @@ onEvent, onComplete) {
218
218
  });
219
219
  }
220
220
  // Final summary logging
221
- if (process.env.DEBUG_GRAPHLIT_STREAMING && toolCalls.length > 0) {
221
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING && toolCalls.length > 0) {
222
222
  console.log(`[OpenAI] Successfully processed ${toolCalls.length} tool calls`);
223
223
  }
224
224
  // Calculate final metrics including tool calling insights
@@ -237,7 +237,7 @@ onEvent, onComplete) {
237
237
  toolCount: toolCalls.length,
238
238
  });
239
239
  }
240
- if (process.env.DEBUG_GRAPHLIT_METRICS) {
240
+ if (process.env.DEBUG_GRAPHLIT_SDK_METRICS) {
241
241
  const metricsData = {
242
242
  totalTime: `${totalTime}ms`,
243
243
  ttft: `${firstTokenTime}ms`,
@@ -326,7 +326,7 @@ onEvent, onComplete) {
326
326
  if (!modelName) {
327
327
  throw new Error(`No model name found for Anthropic specification: ${specification.name}`);
328
328
  }
329
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
329
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
330
330
  console.log(`🤖 [Anthropic] Model Config: Service=Anthropic | Model=${modelName} | Temperature=${specification.anthropic?.temperature} | MaxTokens=${specification.anthropic?.completionTokenLimit || 8192} | SystemPrompt=${systemPrompt ? "Yes" : "No"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
331
331
  }
332
332
  const streamConfig = {
@@ -348,14 +348,14 @@ onEvent, onComplete) {
348
348
  input_schema: tool.schema ? JSON.parse(tool.schema) : {},
349
349
  }));
350
350
  }
351
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
351
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
352
352
  console.log(`⏱️ [Anthropic] Starting LLM call at: ${new Date().toISOString()}`);
353
353
  }
354
354
  const stream = await anthropicClient.messages.create(streamConfig);
355
355
  let activeContentBlock = false;
356
356
  for await (const chunk of stream) {
357
357
  // Debug log all chunk types
358
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
358
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
359
359
  console.log(`[Anthropic] Received chunk type: ${chunk.type}`);
360
360
  }
361
361
  if (chunk.type === "content_block_start") {
@@ -379,7 +379,7 @@ onEvent, onComplete) {
379
379
  // Track TTFT for first tool if no content yet
380
380
  if (firstTokenTime === 0) {
381
381
  firstTokenTime = Date.now() - startTime;
382
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
382
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
383
383
  console.log(`\n⚡ [Anthropic] Time to First Token (Tool Call): ${firstTokenTime}ms`);
384
384
  }
385
385
  }
@@ -400,14 +400,14 @@ onEvent, onComplete) {
400
400
  // Track TTFT (first token regardless of type)
401
401
  if (firstTokenTime === 0) {
402
402
  firstTokenTime = currentTime - startTime;
403
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
403
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
404
404
  console.log(`\n⚡ [Anthropic] Time to First Token (TTFT): ${firstTokenTime}ms`);
405
405
  }
406
406
  }
407
407
  // Track first meaningful content (excludes tool calls)
408
408
  if (firstMeaningfulContentTime === 0 && chunk.delta.text.trim()) {
409
409
  firstMeaningfulContentTime = currentTime - startTime;
410
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
410
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
411
411
  console.log(`\n🎯 [Anthropic] Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
412
412
  }
413
413
  }
@@ -417,7 +417,7 @@ onEvent, onComplete) {
417
417
  interTokenDelays.push(delay);
418
418
  }
419
419
  lastEventTime = currentTime;
420
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
420
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
421
421
  console.log(`[Anthropic] Token #${tokenCount}: "${chunk.delta.text}" | Accumulated: ${fullMessage.length} chars`);
422
422
  }
423
423
  onEvent({
@@ -433,7 +433,7 @@ onEvent, onComplete) {
433
433
  // Count tool argument tokens (rough estimate: ~4 chars per token)
434
434
  toolArgumentTokens += Math.ceil(chunk.delta.partial_json.length / 4);
435
435
  // Debug logging for partial JSON accumulation
436
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
436
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
437
437
  console.log(`[Anthropic] Tool ${currentTool.name} - Partial JSON chunk: "${chunk.delta.partial_json}"`);
438
438
  console.log(`[Anthropic] Tool ${currentTool.name} - Total accumulated: ${currentTool.arguments.length} chars`);
439
439
  }
@@ -463,7 +463,7 @@ onEvent, onComplete) {
463
463
  try {
464
464
  JSON.parse(currentTool.arguments);
465
465
  toolMetrics.successfulTools++;
466
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
466
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
467
467
  console.log(`[Anthropic] ✅ Valid JSON for ${currentTool.name}`);
468
468
  }
469
469
  }
@@ -472,7 +472,7 @@ onEvent, onComplete) {
472
472
  console.error(`[Anthropic] ❌ Invalid JSON for ${currentTool.name}: ${e}`);
473
473
  }
474
474
  // Log the final JSON for debugging
475
- if (process.env.DEBUG_GRAPHLIT_STREAMING ||
475
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING ||
476
476
  !isValidJSON(currentTool.arguments)) {
477
477
  console.log(`[Anthropic] Tool ${currentTool.name} complete with arguments (${currentTool.arguments.length} chars):`);
478
478
  console.log(currentTool.arguments);
@@ -547,7 +547,7 @@ onEvent, onComplete) {
547
547
  toolCount: validToolCalls.length,
548
548
  });
549
549
  }
550
- if (process.env.DEBUG_GRAPHLIT_METRICS) {
550
+ if (process.env.DEBUG_GRAPHLIT_SDK_METRICS) {
551
551
  const metricsData = {
552
552
  totalTime: `${totalTime}ms`,
553
553
  ttft: `${firstTokenTime}ms`,
@@ -636,7 +636,7 @@ onEvent, onComplete) {
636
636
  if (!modelName) {
637
637
  throw new Error(`No model name found for Google specification: ${specification.name}`);
638
638
  }
639
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
639
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
640
640
  console.log(`🤖 [Google] Model Config: Service=Google | Model=${modelName} | Temperature=${specification.google?.temperature} | MaxTokens=${specification.google?.completionTokenLimit || "null"} | SystemPrompt=${systemPrompt ? "Yes" : "No"} | Tools=${tools?.length || 0} | Spec="${specification.name}"`);
641
641
  }
642
642
  const streamConfig = {
@@ -689,7 +689,7 @@ onEvent, onComplete) {
689
689
  for await (const chunk of result.stream) {
690
690
  const text = chunk.text();
691
691
  // Debug log chunk details
692
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
692
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
693
693
  console.log(`[Google] Raw chunk:`, JSON.stringify(chunk, null, 2));
694
694
  if (text) {
695
695
  console.log(`[Google] Text delta: "${text}" (${text.length} chars)`);
@@ -702,14 +702,14 @@ onEvent, onComplete) {
702
702
  // Track TTFT (first token regardless of type)
703
703
  if (firstTokenTime === 0) {
704
704
  firstTokenTime = currentTime - startTime;
705
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
705
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
706
706
  console.log(`\n⚡ [Google] Time to First Token (TTFT): ${firstTokenTime}ms`);
707
707
  }
708
708
  }
709
709
  // Track first meaningful content
710
710
  if (firstMeaningfulContentTime === 0 && text.trim()) {
711
711
  firstMeaningfulContentTime = currentTime - startTime;
712
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
712
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
713
713
  console.log(`\n🎯 [Google] Time to First Meaningful Content: ${firstMeaningfulContentTime}ms`);
714
714
  }
715
715
  }
@@ -725,7 +725,7 @@ onEvent, onComplete) {
725
725
  if (candidate?.content?.parts) {
726
726
  for (const part of candidate.content.parts) {
727
727
  if (part.functionCall) {
728
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
728
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
729
729
  console.log(`[Google] Received function call: ${part.functionCall.name}`);
730
730
  console.log(`[Google] Function args:`, JSON.stringify(part.functionCall.args || {}));
731
731
  }
@@ -748,7 +748,7 @@ onEvent, onComplete) {
748
748
  // Track TTFT for first tool if no content yet
749
749
  if (firstTokenTime === 0) {
750
750
  firstTokenTime = Date.now() - startTime;
751
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
751
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
752
752
  console.log(`\n⚡ [Google] Time to First Token (Tool Call): ${firstTokenTime}ms`);
753
753
  }
754
754
  }
@@ -775,7 +775,7 @@ onEvent, onComplete) {
775
775
  try {
776
776
  JSON.parse(toolCall.arguments);
777
777
  toolMetrics.successfulTools++;
778
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
778
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
779
779
  console.log(`[Google] ✅ Valid JSON for ${toolCall.name}`);
780
780
  }
781
781
  }
@@ -784,7 +784,7 @@ onEvent, onComplete) {
784
784
  console.error(`[Google] ❌ Invalid JSON for ${toolCall.name}: ${e}`);
785
785
  }
786
786
  // Log completion
787
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
787
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
788
788
  console.log(`[Google] Tool ${toolCall.name} complete with arguments (${toolCall.arguments.length} chars):`);
789
789
  console.log(toolCall.arguments);
790
790
  }
@@ -802,7 +802,7 @@ onEvent, onComplete) {
802
802
  }
803
803
  catch (error) {
804
804
  // Silently ignore parsing errors
805
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
805
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
806
806
  console.error(`[Google] Error parsing chunk for function calls:`, error);
807
807
  }
808
808
  }
@@ -811,7 +811,8 @@ onEvent, onComplete) {
811
811
  try {
812
812
  const response = await result.response;
813
813
  const candidate = response.candidates?.[0];
814
- if (process.env.DEBUG_GRAPHLIT_STREAMING && candidate?.content?.parts) {
814
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING &&
815
+ candidate?.content?.parts) {
815
816
  console.log(`[Google] Processing final response with ${candidate.content.parts.length} parts`);
816
817
  }
817
818
  if (candidate?.content?.parts) {
@@ -821,7 +822,7 @@ onEvent, onComplete) {
821
822
  const finalText = part.text;
822
823
  // Only add if it's not already included in fullMessage
823
824
  if (!fullMessage.endsWith(finalText)) {
824
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
825
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
825
826
  console.log(`[Google] Adding final text: ${finalText.length} chars`);
826
827
  }
827
828
  fullMessage += finalText;
@@ -834,7 +835,7 @@ onEvent, onComplete) {
834
835
  // Check for function calls
835
836
  if (part.functionCall &&
836
837
  !toolCalls.some((tc) => tc.name === part.functionCall.name)) {
837
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
838
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
838
839
  console.log(`[Google] Found function call in final response: ${part.functionCall.name}`);
839
840
  }
840
841
  const toolCall = {
@@ -865,12 +866,12 @@ onEvent, onComplete) {
865
866
  }
866
867
  catch (error) {
867
868
  // Log parsing errors when debugging
868
- if (process.env.DEBUG_GRAPHLIT_STREAMING) {
869
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
869
870
  console.error(`[Google] Error processing final response:`, error);
870
871
  }
871
872
  }
872
873
  // Final summary logging
873
- if (process.env.DEBUG_GRAPHLIT_STREAMING && toolCalls.length > 0) {
874
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING && toolCalls.length > 0) {
874
875
  console.log(`[Google] Successfully processed ${toolCalls.length} tool calls`);
875
876
  }
876
877
  // Calculate final metrics including tool calling insights
@@ -889,7 +890,7 @@ onEvent, onComplete) {
889
890
  toolCount: toolCalls.length,
890
891
  });
891
892
  }
892
- if (process.env.DEBUG_GRAPHLIT_METRICS) {
893
+ if (process.env.DEBUG_GRAPHLIT_SDK_METRICS) {
893
894
  const metricsData = {
894
895
  totalTime: `${totalTime}ms`,
895
896
  ttft: `${firstTokenTime}ms`,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250612005",
3
+ "version": "1.0.20250612007",
4
4
  "description": "Graphlit API Client for TypeScript",
5
5
  "type": "module",
6
6
  "main": "./dist/client.js",