graphlit-client 1.0.20250611016 → 1.0.20250611017

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -8,17 +8,20 @@ The official TypeScript/JavaScript SDK for the [Graphlit Platform](https://www.g
8
8
  ## šŸš€ What is Graphlit?
9
9
 
10
10
  Graphlit is a cloud platform that handles the complex parts of building AI applications:
11
+
11
12
  - **Ingest any content** - PDFs, websites, audio, video, and more
12
13
  - **Chat with your data** - Using RAG (Retrieval-Augmented Generation)
13
14
  - **Extract insights** - Summaries, entities, and metadata
14
15
  - **Build knowledge graphs** - Automatically connect related information
15
16
 
16
17
  ## ✨ What's New in v1.1.0
18
+
17
19
  - **Real-time streaming** - Watch AI responses appear word-by-word
18
20
  - **Tool calling** - Let AI execute functions and retrieve data
19
21
  - **Better performance** - Native integration with OpenAI, Anthropic, and Google
20
22
 
21
23
  ## šŸ“‹ Table of Contents
24
+
22
25
  - [Quick Start](#quick-start)
23
26
  - [Basic Examples](#basic-examples)
24
27
  - [Common Use Cases](#common-use-cases)
@@ -35,7 +38,7 @@ npm install graphlit-client
35
38
 
36
39
  # Set your credentials (get free account at https://portal.graphlit.dev)
37
40
  export GRAPHLIT_ORGANIZATION_ID=your_org_id
38
- export GRAPHLIT_ENVIRONMENT_ID=your_env_id
41
+ export GRAPHLIT_ENVIRONMENT_ID=your_env_id
39
42
  export GRAPHLIT_JWT_SECRET=your_secret
40
43
  ```
41
44
 
@@ -50,8 +53,8 @@ const spec = await client.createSpecification({
50
53
  type: Types.SpecificationTypes.Completion,
51
54
  serviceType: Types.ModelServiceTypes.OpenAi,
52
55
  openAI: {
53
- model: Types.OpenAiModels.Gpt4O_128K
54
- }
56
+ model: Types.OpenAiModels.Gpt4O_128K,
57
+ },
55
58
  });
56
59
 
57
60
  // Start chatting with AI
@@ -63,7 +66,7 @@ await client.streamAgent(
63
66
  }
64
67
  },
65
68
  undefined, // conversationId (optional)
66
- { id: spec.createSpecification.id } // specification
69
+ { id: spec.createSpecification.id }, // specification
67
70
  );
68
71
  ```
69
72
 
@@ -81,7 +84,7 @@ Install the LLM SDK for streaming responses:
81
84
  # For OpenAI streaming
82
85
  npm install openai
83
86
 
84
- # For Anthropic streaming
87
+ # For Anthropic streaming
85
88
  npm install @anthropic-ai/sdk
86
89
 
87
90
  # For Google streaming
@@ -121,8 +124,8 @@ const spec = await client.createSpecification({
121
124
  serviceType: Types.ModelServiceTypes.OpenAi,
122
125
  openAI: {
123
126
  model: Types.OpenAiModels.Gpt4O_128K,
124
- temperature: 0.7
125
- }
127
+ temperature: 0.7,
128
+ },
126
129
  });
127
130
 
128
131
  // Chat with streaming
@@ -135,7 +138,7 @@ await client.streamAgent(
135
138
  }
136
139
  },
137
140
  undefined, // conversationId
138
- { id: spec.createSpecification.id } // specification
141
+ { id: spec.createSpecification.id }, // specification
139
142
  );
140
143
  ```
141
144
 
@@ -154,8 +157,8 @@ const spec = await client.createSpecification({
154
157
  type: Types.SpecificationTypes.Completion,
155
158
  serviceType: Types.ModelServiceTypes.OpenAi,
156
159
  openAI: {
157
- model: Types.OpenAiModels.Gpt4O_128K
158
- }
160
+ model: Types.OpenAiModels.Gpt4O_128K,
161
+ },
159
162
  });
160
163
 
161
164
  // Upload a PDF synchronously to ensure it's ready
@@ -163,17 +166,17 @@ const content = await client.ingestUri(
163
166
  "https://arxiv.org/pdf/1706.03762.pdf", // Attention Is All You Need paper
164
167
  "AI Research Paper", // name
165
168
  undefined, // id
166
- true // isSynchronous - waits for processing
169
+ true, // isSynchronous - waits for processing
167
170
  );
168
171
 
169
172
  console.log(`āœ… Uploaded: ${content.ingestUri.id}`);
170
173
 
171
174
  // Wait a moment for content to be fully indexed
172
- await new Promise(resolve => setTimeout(resolve, 5000));
175
+ await new Promise((resolve) => setTimeout(resolve, 5000));
173
176
 
174
177
  // Create a conversation that filters to this specific content
175
178
  const conversation = await client.createConversation({
176
- filter: { contents: [{ id: content.ingestUri.id }] }
179
+ filter: { contents: [{ id: content.ingestUri.id }] },
177
180
  });
178
181
 
179
182
  // Ask questions about the PDF
@@ -185,7 +188,7 @@ await client.streamAgent(
185
188
  }
186
189
  },
187
190
  conversation.createConversation.id, // conversationId with content filter
188
- { id: spec.createSpecification.id } // specification
191
+ { id: spec.createSpecification.id }, // specification
189
192
  );
190
193
  ```
191
194
 
@@ -199,22 +202,22 @@ const webpage = await client.ingestUri(
199
202
  "https://en.wikipedia.org/wiki/Artificial_intelligence", // uri
200
203
  "AI Wikipedia Page", // name
201
204
  undefined, // id
202
- true // isSynchronous
205
+ true, // isSynchronous
203
206
  );
204
207
 
205
208
  // Wait for content to be indexed
206
- await new Promise(resolve => setTimeout(resolve, 5000));
209
+ await new Promise((resolve) => setTimeout(resolve, 5000));
207
210
 
208
211
  // Create a conversation filtered to this content
209
212
  const conversation = await client.createConversation({
210
- filter: { contents: [{ id: webpage.ingestUri.id }] }
213
+ filter: { contents: [{ id: webpage.ingestUri.id }] },
211
214
  });
212
215
 
213
216
  // Ask about the specific content
214
217
  const response = await client.promptAgent(
215
218
  "Summarize the key points about AI from this Wikipedia page",
216
219
  conversation.createConversation.id, // conversationId with filter
217
- { id: spec.createSpecification.id } // specification (create one as shown above)
220
+ { id: spec.createSpecification.id }, // specification (create one as shown above)
218
221
  );
219
222
 
220
223
  console.log(response.message);
@@ -236,22 +239,22 @@ const weatherTool: Types.ToolDefinitionInput = {
236
239
  schema: JSON.stringify({
237
240
  type: "object",
238
241
  properties: {
239
- city: { type: "string", description: "City name" }
242
+ city: { type: "string", description: "City name" },
240
243
  },
241
- required: ["city"]
242
- })
244
+ required: ["city"],
245
+ }),
243
246
  };
244
247
 
245
248
  // Tool implementation
246
249
  const toolHandlers = {
247
250
  get_weather: async (args: { city: string }) => {
248
251
  // Call your weather API here
249
- return {
252
+ return {
250
253
  city: args.city,
251
- temperature: 72,
252
- condition: "sunny"
254
+ temperature: 72,
255
+ condition: "sunny",
253
256
  };
254
- }
257
+ },
255
258
  };
256
259
 
257
260
  // Create a specification for tool calling
@@ -260,8 +263,8 @@ const spec = await client.createSpecification({
260
263
  type: Types.SpecificationTypes.Completion,
261
264
  serviceType: Types.ModelServiceTypes.OpenAi,
262
265
  openAI: {
263
- model: Types.OpenAiModels.Gpt4O_128K
264
- }
266
+ model: Types.OpenAiModels.Gpt4O_128K,
267
+ },
265
268
  });
266
269
 
267
270
  // Chat with tools
@@ -277,7 +280,7 @@ await client.streamAgent(
277
280
  undefined, // conversationId
278
281
  { id: spec.createSpecification.id }, // specification
279
282
  [weatherTool], // tools
280
- toolHandlers // handlers
283
+ toolHandlers, // handlers
281
284
  );
282
285
  ```
283
286
 
@@ -308,40 +311,40 @@ class KnowledgeAssistant {
308
311
  serviceType: Types.ModelServiceTypes.OpenAi,
309
312
  openAI: {
310
313
  model: Types.OpenAiModels.Gpt4O_128K,
311
- temperature: 0.7
312
- }
314
+ temperature: 0.7,
315
+ },
313
316
  });
314
317
  this.specificationId = spec.createSpecification?.id;
315
318
  }
316
319
 
317
320
  async uploadDocuments(urls: string[]) {
318
321
  console.log("šŸ“š Uploading documents...");
319
-
322
+
320
323
  for (const url of urls) {
321
324
  const content = await this.client.ingestUri(
322
325
  url, // uri
323
- url.split('/').pop() || "Document", // name
326
+ url.split("/").pop() || "Document", // name
324
327
  undefined, // id
325
- true // isSynchronous - wait for processing
328
+ true, // isSynchronous - wait for processing
326
329
  );
327
330
  this.contentIds.push(content.ingestUri.id);
328
331
  }
329
-
332
+
330
333
  console.log("āœ… Documents uploaded!");
331
-
334
+
332
335
  // Wait for content to be indexed
333
- await new Promise(resolve => setTimeout(resolve, 5000));
336
+ await new Promise((resolve) => setTimeout(resolve, 5000));
334
337
  }
335
338
 
336
339
  async ask(question: string) {
337
340
  // Create conversation with content filter if not exists
338
341
  if (!this.conversationId && this.contentIds.length > 0) {
339
342
  const conversation = await this.client.createConversation({
340
- filter: { contents: this.contentIds.map(id => ({ id })) }
343
+ filter: { contents: this.contentIds.map((id) => ({ id })) },
341
344
  });
342
345
  this.conversationId = conversation.createConversation?.id;
343
346
  }
344
-
347
+
345
348
  await this.client.streamAgent(
346
349
  question,
347
350
  (event) => {
@@ -352,7 +355,7 @@ class KnowledgeAssistant {
352
355
  }
353
356
  },
354
357
  this.conversationId, // Maintains conversation context
355
- { id: this.specificationId! } // specification
358
+ { id: this.specificationId! }, // specification
356
359
  );
357
360
  }
358
361
  }
@@ -364,7 +367,7 @@ await assistant.initialize();
364
367
  // Upload your documents
365
368
  await assistant.uploadDocuments([
366
369
  "https://arxiv.org/pdf/2103.15348.pdf",
367
- "https://arxiv.org/pdf/1706.03762.pdf"
370
+ "https://arxiv.org/pdf/1706.03762.pdf",
368
371
  ]);
369
372
 
370
373
  // Ask questions
@@ -382,18 +385,18 @@ const document = await client.ingestUri(
382
385
  "https://example.com/document.pdf", // uri
383
386
  "Document #12345", // name
384
387
  undefined, // id
385
- true // isSynchronous
388
+ true, // isSynchronous
386
389
  );
387
390
 
388
391
  // Wait for content to be indexed
389
- await new Promise(resolve => setTimeout(resolve, 5000));
392
+ await new Promise((resolve) => setTimeout(resolve, 5000));
390
393
 
391
394
  // Extract specific data
392
395
  const extraction = await client.extractContents(
393
396
  "Extract the key information from this document",
394
397
  undefined, // tools
395
398
  undefined, // specification
396
- { contents: [{ id: document.ingestUri.id }] } // filter
399
+ { contents: [{ id: document.ingestUri.id }] }, // filter
397
400
  );
398
401
 
399
402
  console.log("Extracted data:", extraction.extractContents);
@@ -410,20 +413,22 @@ const ids: string[] = [];
410
413
  for (const url of documentUrls) {
411
414
  const content = await client.ingestUri(
412
415
  url, // uri
413
- url.split('/').pop() || "Document", // name
416
+ url.split("/").pop() || "Document", // name
414
417
  undefined, // id
415
- true // isSynchronous
418
+ true, // isSynchronous
416
419
  );
417
420
  ids.push(content.ingestUri.id);
418
421
  }
419
422
 
420
423
  // Generate a summary across all documents
421
424
  const summary = await client.summarizeContents(
422
- [{
423
- type: Types.SummarizationTypes.Custom,
424
- prompt: "Create an executive summary of these documents"
425
- }], // summarizations
426
- { contents: ids.map(id => ({ id })) } // filter
425
+ [
426
+ {
427
+ type: Types.SummarizationTypes.Custom,
428
+ prompt: "Create an executive summary of these documents",
429
+ },
430
+ ], // summarizations
431
+ { contents: ids.map((id) => ({ id })) }, // filter
427
432
  );
428
433
 
429
434
  console.log("Summary:", summary.summarizeContents);
@@ -437,13 +442,13 @@ const content = await client.ingestUri(
437
442
  "https://example.com/large-document.pdf", // uri
438
443
  undefined, // name
439
444
  undefined, // id
440
- true // isSynchronous
445
+ true, // isSynchronous
441
446
  );
442
447
  console.log("āœ… Content ready!");
443
448
 
444
449
  // Option 2: Asynchronous processing (for large files)
445
450
  const content = await client.ingestUri(
446
- "https://example.com/very-large-video.mp4" // uri
451
+ "https://example.com/very-large-video.mp4", // uri
447
452
  // isSynchronous defaults to false
448
453
  );
449
454
 
@@ -452,15 +457,116 @@ let isReady = false;
452
457
  while (!isReady) {
453
458
  const status = await client.isContentDone(content.ingestUri.id);
454
459
  isReady = status.isContentDone?.result || false;
455
-
460
+
456
461
  if (!isReady) {
457
462
  console.log("ā³ Still processing...");
458
- await new Promise(resolve => setTimeout(resolve, 2000));
463
+ await new Promise((resolve) => setTimeout(resolve, 2000));
459
464
  }
460
465
  }
461
466
  console.log("āœ… Content ready!");
462
467
  ```
463
468
 
469
+ ## Advanced Agent Features
470
+
471
+ ### Using Content Filters
472
+
473
+ Control what content the agent can access during conversations:
474
+
475
+ ```typescript
476
+ // Example 1: Chat with specific documents only
477
+ const result = await client.promptAgent(
478
+ "What are the main points in these documents?",
479
+ undefined, // conversationId - will create new
480
+ { id: specificationId },
481
+ undefined, // tools
482
+ undefined, // toolHandlers
483
+ undefined, // options
484
+ undefined, // mimeType
485
+ undefined, // data
486
+ {
487
+ // Only allow retrieval from specific content
488
+ contents: [{ id: "content-id-1" }, { id: "content-id-2" }],
489
+ },
490
+ );
491
+
492
+ // Example 2: Streaming with content filter
493
+ await client.streamAgent(
494
+ "Explain the technical details",
495
+ (event) => {
496
+ if (event.type === "message_update") {
497
+ process.stdout.write(event.message.message);
498
+ }
499
+ },
500
+ undefined, // conversationId
501
+ { id: specificationId },
502
+ undefined, // tools
503
+ undefined, // toolHandlers
504
+ undefined, // options
505
+ undefined, // mimeType
506
+ undefined, // data
507
+ {
508
+ // Filter by collection
509
+ collections: [{ id: "technical-docs-collection" }],
510
+ },
511
+ );
512
+ ```
513
+
514
+ ### Using Augmented Filters
515
+
516
+ Force specific content into the LLM context without retrieval:
517
+
518
+ ```typescript
519
+ // Example: Chat with a specific file always in context
520
+ const fileContent = await client.getContent("file-content-id");
521
+
522
+ await client.streamAgent(
523
+ "What patterns do you see in this code?",
524
+ (event) => {
525
+ if (event.type === "message_update") {
526
+ process.stdout.write(event.message.message);
527
+ }
528
+ },
529
+ undefined, // conversationId
530
+ { id: specificationId },
531
+ undefined, // tools
532
+ undefined, // toolHandlers
533
+ undefined, // options
534
+ undefined, // mimeType
535
+ undefined, // data
536
+ undefined, // contentFilter
537
+ {
538
+ // Force this content into context
539
+ contents: [{ id: fileContent.content.id }],
540
+ },
541
+ );
542
+ ```
543
+
544
+ ### Combining Filters
545
+
546
+ Use both filters for precise control:
547
+
548
+ ```typescript
549
+ // Chat about specific code with documentation available
550
+ await client.promptAgent(
551
+ "How does this code implement the algorithm described in the docs?",
552
+ undefined,
553
+ { id: specificationId },
554
+ undefined,
555
+ undefined,
556
+ undefined,
557
+ undefined,
558
+ undefined,
559
+ {
560
+ // Can retrieve from documentation
561
+ collections: [{ id: "algorithm-docs" }],
562
+ },
563
+ {
564
+ // Always include the specific code file
565
+ contents: [{ id: "implementation-file-id" }],
566
+ },
567
+ );
568
+ ```
569
+
464
570
  ## Advanced Workflows
465
571
 
466
572
  ### Creating Workflows for Content Processing
@@ -478,29 +584,31 @@ const summarizationSpec = await client.createSpecification({
478
584
  type: Types.SpecificationTypes.Summarization,
479
585
  serviceType: Types.ModelServiceTypes.OpenAi,
480
586
  openAI: {
481
- model: Types.OpenAiModels.Gpt4O_128K
482
- }
587
+ model: Types.OpenAiModels.Gpt4O_128K,
588
+ },
483
589
  });
484
590
 
485
591
  // Create a workflow that summarizes all content
486
592
  const workflow = await client.createWorkflow({
487
593
  name: "Document Intelligence",
488
594
  preparation: {
489
- summarizations: [{
490
- type: Types.SummarizationTypes.Summary,
491
- specification: { id: summarizationSpec.createSpecification.id }
492
- }]
493
- }
595
+ summarizations: [
596
+ {
597
+ type: Types.SummarizationTypes.Summary,
598
+ specification: { id: summarizationSpec.createSpecification.id },
599
+ },
600
+ ],
601
+ },
494
602
  });
495
603
 
496
604
  // Set workflow as default for project
497
605
  await client.updateProject({
498
- workflow: { id: workflow.createWorkflow.id }
606
+ workflow: { id: workflow.createWorkflow.id },
499
607
  });
500
608
 
501
609
  // Now all content will be automatically summarized
502
610
  const content = await client.ingestUri(
503
- "https://example.com/report.pdf" // uri
611
+ "https://example.com/report.pdf", // uri
504
612
  );
505
613
  ```
506
614
 
@@ -520,8 +628,8 @@ const conversationSpec = await client.createSpecification({
520
628
  openAI: {
521
629
  model: Types.OpenAiModels.Gpt4O_128K,
522
630
  temperature: 0.7,
523
- completionTokenLimit: 2000
524
- }
631
+ completionTokenLimit: 2000,
632
+ },
525
633
  });
526
634
 
527
635
  // Use the specification in conversations
@@ -533,7 +641,7 @@ await client.streamAgent(
533
641
  }
534
642
  },
535
643
  undefined,
536
- { id: conversationSpec.createSpecification.id }
644
+ { id: conversationSpec.createSpecification.id },
537
645
  );
538
646
  ```
539
647
 
@@ -546,6 +654,7 @@ const client = new Graphlit(organizationId?, environmentId?, jwtSecret?);
546
654
  ```
547
655
 
548
656
  #### Content Operations
657
+
549
658
  - `ingestUri(uri, name?, id?, isSynchronous?, ...)` - Ingest content from URL
550
659
  - `ingestText(text, name?, textType?, ...)` - Ingest text content directly
551
660
  - `queryContents(filter?)` - Search and query content
@@ -555,18 +664,21 @@ const client = new Graphlit(organizationId?, environmentId?, jwtSecret?);
555
664
  - `summarizeContents(summarizations, filter?)` - Summarize content
556
665
  - `isContentDone(id)` - Check if content processing is complete
557
666
 
558
- #### Conversation Operations
667
+ #### Conversation Operations
668
+
559
669
  - `createConversation(input?)` - Create a new conversation
560
670
  - `streamAgent(prompt, handler, ...)` - Stream AI responses
561
671
  - `promptAgent(prompt, ...)` - Get AI response without streaming
562
672
  - `deleteConversation(id)` - Delete conversation
563
673
 
564
674
  #### Specification Operations
675
+
565
676
  - `createSpecification(input)` - Create AI model configuration
566
677
  - `querySpecifications(filter?)` - List specifications
567
678
  - `deleteSpecification(id)` - Delete specification
568
679
 
569
680
  #### Workflow Operations
681
+
570
682
  - `createWorkflow(input)` - Create content processing workflow
571
683
  - `queryWorkflows(filter?)` - List workflows
572
684
  - `updateProject(input)` - Update project settings
@@ -574,12 +686,12 @@ const client = new Graphlit(organizationId?, environmentId?, jwtSecret?);
574
686
  ### Event Types
575
687
 
576
688
  ```typescript
577
- type AgentStreamEvent =
689
+ type AgentStreamEvent =
578
690
  | { type: "conversation_started"; conversationId: string }
579
691
  | { type: "message_update"; message: { message: string } }
580
692
  | { type: "tool_update"; toolCall: any; status: string }
581
693
  | { type: "conversation_completed"; message: { message: string } }
582
- | { type: "error"; error: { message: string; recoverable: boolean } }
694
+ | { type: "error"; error: { message: string; recoverable: boolean } };
583
695
  ```
584
696
 
585
697
  ## Testing & Examples
@@ -613,4 +725,4 @@ npm test test/readme-simple.test.ts
613
725
 
614
726
  ## License
615
727
 
616
- MIT License - see LICENSE file for details.
728
+ MIT License - see LICENSE file for details.
package/dist/client.d.ts CHANGED
@@ -319,7 +319,7 @@ declare class Graphlit {
319
319
  * @param specification - Optional specification to check compatibility
320
320
  * @returns true if streaming is available, false otherwise
321
321
  */
322
- supportsStreaming(specification?: Types.Specification | Types.EntityReferenceInput): boolean;
322
+ supportsStreaming(specification?: Types.Specification): boolean;
323
323
  /**
324
324
  * Execute an agent with non-streaming response
325
325
  * @param prompt - The user prompt
@@ -328,10 +328,15 @@ declare class Graphlit {
328
328
  * @param tools - Optional tool definitions
329
329
  * @param toolHandlers - Optional tool handler functions
330
330
  * @param options - Agent options
331
+ * @param mimeType - Optional MIME type for multimodal input
332
+ * @param data - Optional base64 encoded data for multimodal input
333
+ * @param contentFilter - Optional filter for content retrieval during conversation
334
+ * @param augmentedFilter - Optional filter to force specific content into LLM context
335
+ * @param correlationId - Optional correlation ID for tracking
331
336
  * @returns Complete agent result with message and tool calls
332
337
  */
333
338
  promptAgent(prompt: string, conversationId?: string, specification?: Types.EntityReferenceInput, tools?: Types.ToolDefinitionInput[], toolHandlers?: Record<string, ToolHandler>, options?: AgentOptions, mimeType?: string, data?: string, // base64 encoded
334
- correlationId?: string): Promise<AgentResult>;
339
+ contentFilter?: Types.ContentCriteriaInput, augmentedFilter?: Types.ContentCriteriaInput, correlationId?: string): Promise<AgentResult>;
335
340
  /**
336
341
  * Execute an agent with streaming response
337
342
  * @param prompt - The user prompt
@@ -341,10 +346,15 @@ declare class Graphlit {
341
346
  * @param tools - Optional tool definitions
342
347
  * @param toolHandlers - Optional tool handler functions
343
348
  * @param options - Stream agent options
349
+ * @param mimeType - Optional MIME type for multimodal input
350
+ * @param data - Optional base64 encoded data for multimodal input
351
+ * @param contentFilter - Optional filter for content retrieval during conversation
352
+ * @param augmentedFilter - Optional filter to force specific content into LLM context
353
+ * @param correlationId - Optional correlation ID for tracking
344
354
  * @throws Error if streaming is not supported
345
355
  */
346
356
  streamAgent(prompt: string, onEvent: (event: AgentStreamEvent) => void, conversationId?: string, specification?: Types.EntityReferenceInput, tools?: Types.ToolDefinitionInput[], toolHandlers?: Record<string, ToolHandler>, options?: StreamAgentOptions, mimeType?: string, data?: string, // base64 encoded
347
- correlationId?: string): Promise<void>;
357
+ contentFilter?: Types.ContentCriteriaInput, augmentedFilter?: Types.ContentCriteriaInput, correlationId?: string): Promise<void>;
348
358
  /**
349
359
  * Execute the streaming agent workflow with tool calling loop
350
360
  */
package/dist/client.js CHANGED
@@ -24,7 +24,8 @@ catch (e) {
24
24
  }
25
25
  try {
26
26
  Anthropic =
27
- optionalRequire("@anthropic-ai/sdk").default || optionalRequire("@anthropic-ai/sdk");
27
+ optionalRequire("@anthropic-ai/sdk").default ||
28
+ optionalRequire("@anthropic-ai/sdk");
28
29
  }
29
30
  catch (e) {
30
31
  // Anthropic SDK not installed
@@ -1421,8 +1422,8 @@ class Graphlit {
1421
1422
  */
1422
1423
  supportsStreaming(specification) {
1423
1424
  // If we have a full specification, check its service type
1424
- if (specification && "modelService" in specification) {
1425
- const serviceType = specification.modelService;
1425
+ if (specification) {
1426
+ const serviceType = specification.serviceType;
1426
1427
  switch (serviceType) {
1427
1428
  case Types.ModelServiceTypes.OpenAi:
1428
1429
  return typeof OpenAI !== "undefined";
@@ -1434,11 +1435,10 @@ class Graphlit {
1434
1435
  return false;
1435
1436
  }
1436
1437
  }
1437
- // If we only have a reference or no specification, check if any client is available
1438
+ // If we have no specification, check if OpenAI client is available
1439
+ // We default to OpenAI GPT-4o if no specification provider.
1438
1440
  const hasOpenAI = typeof OpenAI !== "undefined";
1439
- const hasAnthropic = typeof Anthropic !== "undefined";
1440
- const hasGoogle = typeof GoogleGenerativeAI !== "undefined";
1441
- return hasOpenAI || hasAnthropic || hasGoogle;
1441
+ return hasOpenAI;
1442
1442
  }
1443
1443
  /**
1444
1444
  * Execute an agent with non-streaming response
@@ -1448,10 +1448,15 @@ class Graphlit {
1448
1448
  * @param tools - Optional tool definitions
1449
1449
  * @param toolHandlers - Optional tool handler functions
1450
1450
  * @param options - Agent options
1451
+ * @param mimeType - Optional MIME type for multimodal input
1452
+ * @param data - Optional base64 encoded data for multimodal input
1453
+ * @param contentFilter - Optional filter for content retrieval during conversation
1454
+ * @param augmentedFilter - Optional filter to force specific content into LLM context
1455
+ * @param correlationId - Optional correlation ID for tracking
1451
1456
  * @returns Complete agent result with message and tool calls
1452
1457
  */
1453
1458
  async promptAgent(prompt, conversationId, specification, tools, toolHandlers, options, mimeType, data, // base64 encoded
1454
- correlationId) {
1459
+ contentFilter, augmentedFilter, correlationId) {
1455
1460
  const startTime = Date.now();
1456
1461
  const maxRounds = options?.maxToolRounds || DEFAULT_MAX_TOOL_ROUNDS;
1457
1462
  const timeout = options?.timeout || 300000; // 5 minutes default
@@ -1466,6 +1471,8 @@ class Graphlit {
1466
1471
  name: `Agent conversation`,
1467
1472
  specification: specification,
1468
1473
  tools: tools,
1474
+ filter: contentFilter,
1475
+ augmentedFilter: augmentedFilter,
1469
1476
  }, correlationId);
1470
1477
  actualConversationId = createResponse.createConversation?.id;
1471
1478
  if (!actualConversationId) {
@@ -1535,10 +1542,15 @@ class Graphlit {
1535
1542
  * @param tools - Optional tool definitions
1536
1543
  * @param toolHandlers - Optional tool handler functions
1537
1544
  * @param options - Stream agent options
1545
+ * @param mimeType - Optional MIME type for multimodal input
1546
+ * @param data - Optional base64 encoded data for multimodal input
1547
+ * @param contentFilter - Optional filter for content retrieval during conversation
1548
+ * @param augmentedFilter - Optional filter to force specific content into LLM context
1549
+ * @param correlationId - Optional correlation ID for tracking
1538
1550
  * @throws Error if streaming is not supported
1539
1551
  */
1540
1552
  async streamAgent(prompt, onEvent, conversationId, specification, tools, toolHandlers, options, mimeType, data, // base64 encoded
1541
- correlationId) {
1553
+ contentFilter, augmentedFilter, correlationId) {
1542
1554
  const maxRounds = options?.maxToolRounds || DEFAULT_MAX_TOOL_ROUNDS;
1543
1555
  const abortSignal = options?.abortSignal;
1544
1556
  let uiAdapter;
@@ -1553,7 +1565,7 @@ class Graphlit {
1553
1565
  .specification
1554
1566
  : undefined;
1555
1567
  // Check streaming support
1556
- if (!this.supportsStreaming(fullSpec)) {
1568
+ if (fullSpec && !this.supportsStreaming(fullSpec)) {
1557
1569
  throw new Error("Streaming is not supported for this specification. " +
1558
1570
  "Use promptAgent() instead or configure a streaming client.");
1559
1571
  }
@@ -1564,6 +1576,8 @@ class Graphlit {
1564
1576
  name: `Streaming agent conversation`,
1565
1577
  specification: specification,
1566
1578
  tools: tools,
1579
+ filter: contentFilter,
1580
+ augmentedFilter: augmentedFilter,
1567
1581
  }, correlationId);
1568
1582
  actualConversationId = createResponse.createConversation?.id;
1569
1583
  if (!actualConversationId) {
@@ -1621,10 +1635,15 @@ class Graphlit {
1621
1635
  });
1622
1636
  // Format conversation once at the beginning
1623
1637
  const formatResponse = await this.formatConversation(prompt, conversationId, { id: specification.id }, tools, true, correlationId);
1624
- const formattedPrompt = formatResponse.formatConversation?.message?.message;
1625
- if (!formattedPrompt) {
1638
+ const formattedMessage = formatResponse.formatConversation?.message;
1639
+ if (!formattedMessage?.message) {
1626
1640
  throw new Error("Failed to format conversation");
1627
1641
  }
1642
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1643
+ console.log("\nšŸ“‹ [formatConversation] Response:");
1644
+ console.log("Formatted message:", formattedMessage.message);
1645
+ console.log("Full formatConversation response:", JSON.stringify(formatResponse.formatConversation, null, 2));
1646
+ }
1628
1647
  // Build message array with conversation history
1629
1648
  const messages = [];
1630
1649
  // Add system prompt if specified
@@ -1636,23 +1655,19 @@ class Graphlit {
1636
1655
  timestamp: new Date().toISOString(),
1637
1656
  });
1638
1657
  }
1639
- // Get conversation history to maintain context
1640
- const conversationResponse = await this.getConversation(conversationId);
1641
- const conversation = conversationResponse.conversation;
1642
- if (conversation?.messages && conversation.messages.length > 0) {
1643
- // Add all previous messages (formatConversation already added the current prompt)
1644
- const previousMessages = conversation.messages;
1645
- messages.push(...previousMessages);
1646
- }
1647
- else {
1648
- // If no history, just add the current user message
1658
+ // Use the formatted message from formatConversation which already includes
1659
+ // all context, RAG results, and conversation history
1660
+ if (formattedMessage) {
1649
1661
  messages.push({
1650
1662
  __typename: "ConversationMessage",
1651
- role: Types.ConversationRoleTypes.User,
1652
- message: formattedPrompt,
1653
- timestamp: new Date().toISOString(),
1663
+ role: formattedMessage.role || Types.ConversationRoleTypes.User,
1664
+ message: formattedMessage.message,
1665
+ timestamp: formattedMessage.timestamp || new Date().toISOString(),
1654
1666
  });
1655
1667
  }
1668
+ else {
1669
+ throw new Error("No formatted message returned from formatConversation");
1670
+ }
1656
1671
  const serviceType = getServiceType(specification);
1657
1672
  // Handle tool calling loop locally
1658
1673
  while (currentRound < maxRounds) {
@@ -1664,6 +1679,11 @@ class Graphlit {
1664
1679
  // Stream with appropriate provider
1665
1680
  if (serviceType === Types.ModelServiceTypes.OpenAi && OpenAI) {
1666
1681
  const openaiMessages = formatMessagesForOpenAI(messages);
1682
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1683
+ console.log("\nšŸ” [OpenAI] Formatted messages being sent to LLM:");
1684
+ console.log(JSON.stringify(openaiMessages, null, 2));
1685
+ console.log("Total messages:", openaiMessages.length);
1686
+ }
1667
1687
  await this.streamWithOpenAI(specification, openaiMessages, tools, uiAdapter, (message, calls) => {
1668
1688
  roundMessage = message;
1669
1689
  toolCalls = calls;
@@ -1672,6 +1692,12 @@ class Graphlit {
1672
1692
  else if (serviceType === Types.ModelServiceTypes.Anthropic &&
1673
1693
  Anthropic) {
1674
1694
  const { system, messages: anthropicMessages } = formatMessagesForAnthropic(messages);
1695
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1696
+ console.log("\nšŸ” [Anthropic] Formatted messages being sent to LLM:");
1697
+ console.log("System prompt:", system);
1698
+ console.log(JSON.stringify(anthropicMessages, null, 2));
1699
+ console.log("Total messages:", anthropicMessages.length);
1700
+ }
1675
1701
  await this.streamWithAnthropic(specification, anthropicMessages, system, tools, uiAdapter, (message, calls) => {
1676
1702
  roundMessage = message;
1677
1703
  toolCalls = calls;
@@ -1680,6 +1706,11 @@ class Graphlit {
1680
1706
  else if (serviceType === Types.ModelServiceTypes.Google &&
1681
1707
  GoogleGenerativeAI) {
1682
1708
  const googleMessages = formatMessagesForGoogle(messages);
1709
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
1710
+ console.log("\nšŸ” [Google] Formatted messages being sent to LLM:");
1711
+ console.log(JSON.stringify(googleMessages, null, 2));
1712
+ console.log("Total messages:", googleMessages.length);
1713
+ }
1683
1714
  // Google doesn't use system prompts separately, they're incorporated into messages
1684
1715
  await this.streamWithGoogle(specification, googleMessages, undefined, // systemPrompt - Google handles this differently
1685
1716
  tools, uiAdapter, (message, calls) => {
@@ -2065,8 +2096,7 @@ class Graphlit {
2065
2096
  return result.data;
2066
2097
  }
2067
2098
  catch (error) {
2068
- if (error instanceof ApolloError &&
2069
- error.graphQLErrors.length > 0) {
2099
+ if (error instanceof ApolloError && error.graphQLErrors.length > 0) {
2070
2100
  const errorMessage = error.graphQLErrors
2071
2101
  .map((err) => this.prettyPrintGraphQLError(err))
2072
2102
  .join("\n");
@@ -2102,8 +2132,7 @@ class Graphlit {
2102
2132
  return result.data;
2103
2133
  }
2104
2134
  catch (error) {
2105
- if (error instanceof ApolloError &&
2106
- error.graphQLErrors.length > 0) {
2135
+ if (error instanceof ApolloError && error.graphQLErrors.length > 0) {
2107
2136
  const errorMessage = error.graphQLErrors
2108
2137
  .map((err) => this.prettyPrintGraphQLError(err))
2109
2138
  .join("\n");
@@ -23,6 +23,15 @@ onEvent, onComplete) {
23
23
  if (!modelName) {
24
24
  throw new Error(`No model name found for OpenAI specification: ${specification.name}`);
25
25
  }
26
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
27
+ console.log("\nšŸ¤– [OpenAI] Model Configuration:");
28
+ console.log(" Service: OpenAI");
29
+ console.log(" Model:", modelName);
30
+ console.log(" Temperature:", specification.openAI?.temperature);
31
+ console.log(" Max Tokens:", specification.openAI?.completionTokenLimit);
32
+ console.log(" Tools:", tools?.length || 0);
33
+ console.log(" Specification Name:", specification.name);
34
+ }
26
35
  const streamConfig = {
27
36
  model: modelName,
28
37
  messages,
@@ -49,8 +58,25 @@ onEvent, onComplete) {
49
58
  const stream = await openaiClient.chat.completions.create(streamConfig);
50
59
  for await (const chunk of stream) {
51
60
  const delta = chunk.choices[0]?.delta;
61
+ // Debug log chunk details
62
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
63
+ console.log(`[OpenAI] Chunk:`, JSON.stringify(chunk, null, 2));
64
+ if (delta?.content) {
65
+ console.log(`[OpenAI] Content delta: "${delta.content}" (${delta.content.length} chars)`);
66
+ }
67
+ if (delta?.tool_calls) {
68
+ console.log(`[OpenAI] Tool calls:`, delta.tool_calls);
69
+ }
70
+ if (chunk.choices[0]?.finish_reason) {
71
+ console.log(`[OpenAI] Finish reason: ${chunk.choices[0].finish_reason}`);
72
+ }
73
+ }
52
74
  if (delta?.content) {
53
75
  fullMessage += delta.content;
76
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
77
+ console.log(`[OpenAI] Message accumulated: ${fullMessage.length} chars total`);
78
+ console.log(`[OpenAI] Current full message: "${fullMessage}"`);
79
+ }
54
80
  onEvent({
55
81
  type: "token",
56
82
  token: delta.content,
@@ -66,6 +92,9 @@ onEvent, onComplete) {
66
92
  name: "",
67
93
  arguments: "",
68
94
  };
95
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
96
+ console.log(`[OpenAI] Starting new tool call: ${toolCalls[index].id}`);
97
+ }
69
98
  onEvent({
70
99
  type: "tool_call_start",
71
100
  toolCall: {
@@ -76,9 +105,17 @@ onEvent, onComplete) {
76
105
  }
77
106
  if (toolCallDelta.function?.name) {
78
107
  toolCalls[index].name = toolCallDelta.function.name;
108
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
109
+ console.log(`[OpenAI] Tool name: ${toolCallDelta.function.name}`);
110
+ }
79
111
  }
80
112
  if (toolCallDelta.function?.arguments) {
81
113
  toolCalls[index].arguments += toolCallDelta.function.arguments;
114
+ // Debug logging for partial JSON accumulation
115
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
116
+ console.log(`[OpenAI] Tool ${toolCalls[index].name} - Partial JSON chunk: "${toolCallDelta.function.arguments}"`);
117
+ console.log(`[OpenAI] Tool ${toolCalls[index].name} - Total accumulated: ${toolCalls[index].arguments.length} chars`);
118
+ }
82
119
  onEvent({
83
120
  type: "tool_call_delta",
84
121
  toolCallId: toolCalls[index].id,
@@ -90,6 +127,19 @@ onEvent, onComplete) {
90
127
  }
91
128
  // Emit complete events for tool calls
92
129
  for (const toolCall of toolCalls) {
130
+ // Log the final JSON for debugging
131
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
132
+ console.log(`[OpenAI] Tool ${toolCall.name} complete with arguments (${toolCall.arguments.length} chars):`);
133
+ console.log(toolCall.arguments);
134
+ // Validate JSON
135
+ try {
136
+ JSON.parse(toolCall.arguments);
137
+ console.log(`[OpenAI] āœ… Valid JSON for ${toolCall.name}`);
138
+ }
139
+ catch (e) {
140
+ console.error(`[OpenAI] āŒ Invalid JSON for ${toolCall.name}: ${e}`);
141
+ }
142
+ }
93
143
  onEvent({
94
144
  type: "tool_call_complete",
95
145
  toolCall: {
@@ -99,6 +149,13 @@ onEvent, onComplete) {
99
149
  },
100
150
  });
101
151
  }
152
+ // Final summary logging
153
+ if (process.env.DEBUG_GRAPHLIT_STREAMING && toolCalls.length > 0) {
154
+ console.log(`[OpenAI] Successfully processed ${toolCalls.length} tool calls`);
155
+ }
156
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
157
+ console.log(`[OpenAI] Streaming complete. Final message: "${fullMessage}" (${fullMessage.length} chars)`);
158
+ }
102
159
  onComplete(fullMessage, toolCalls);
103
160
  }
104
161
  catch (error) {
@@ -121,6 +178,16 @@ onEvent, onComplete) {
121
178
  if (!modelName) {
122
179
  throw new Error(`No model name found for Anthropic specification: ${specification.name}`);
123
180
  }
181
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
182
+ console.log("\nšŸ¤– [Anthropic] Model Configuration:");
183
+ console.log(" Service: Anthropic");
184
+ console.log(" Model:", modelName);
185
+ console.log(" Temperature:", specification.anthropic?.temperature);
186
+ console.log(" Max Tokens:", specification.anthropic?.completionTokenLimit || 8192);
187
+ console.log(" System Prompt:", systemPrompt ? "Yes" : "No");
188
+ console.log(" Tools:", tools?.length || 0);
189
+ console.log(" Specification Name:", specification.name);
190
+ }
124
191
  const streamConfig = {
125
192
  model: modelName,
126
193
  messages,
@@ -144,7 +211,7 @@ onEvent, onComplete) {
144
211
  let activeContentBlock = false;
145
212
  for await (const chunk of stream) {
146
213
  // Debug log all chunk types
147
- if (process.env.DEBUG_STREAMING) {
214
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
148
215
  console.log(`[Anthropic] Received chunk type: ${chunk.type}`);
149
216
  }
150
217
  if (chunk.type === "content_block_start") {
@@ -167,6 +234,9 @@ onEvent, onComplete) {
167
234
  }
168
235
  else if (chunk.type === "content_block_delta") {
169
236
  if (chunk.delta.type === "text_delta") {
237
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
238
+ console.log(`[Anthropic] Text delta: "${chunk.delta.text}"`);
239
+ }
170
240
  fullMessage += chunk.delta.text;
171
241
  onEvent({
172
242
  type: "token",
@@ -179,7 +249,7 @@ onEvent, onComplete) {
179
249
  if (currentTool) {
180
250
  currentTool.arguments += chunk.delta.partial_json;
181
251
  // Debug logging for partial JSON accumulation
182
- if (process.env.DEBUG_STREAMING) {
252
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
183
253
  console.log(`[Anthropic] Tool ${currentTool.name} - Partial JSON chunk: "${chunk.delta.partial_json}"`);
184
254
  console.log(`[Anthropic] Tool ${currentTool.name} - Total accumulated: ${currentTool.arguments.length} chars`);
185
255
  }
@@ -197,7 +267,7 @@ onEvent, onComplete) {
197
267
  const currentTool = toolCalls[toolCalls.length - 1];
198
268
  if (currentTool) {
199
269
  // Log the final JSON for debugging
200
- if (process.env.DEBUG_STREAMING ||
270
+ if (process.env.DEBUG_GRAPHLIT_STREAMING ||
201
271
  !isValidJSON(currentTool.arguments)) {
202
272
  console.log(`[Anthropic] Tool ${currentTool.name} complete with arguments (${currentTool.arguments.length} chars):`);
203
273
  console.log(currentTool.arguments);
@@ -210,7 +280,7 @@ onEvent, onComplete) {
210
280
  // Validate JSON
211
281
  try {
212
282
  JSON.parse(currentTool.arguments);
213
- if (process.env.DEBUG_STREAMING) {
283
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
214
284
  console.log(`[Anthropic] āœ… Valid JSON for ${currentTool.name}`);
215
285
  }
216
286
  }
@@ -288,6 +358,16 @@ onEvent, onComplete) {
288
358
  if (!modelName) {
289
359
  throw new Error(`No model name found for Google specification: ${specification.name}`);
290
360
  }
361
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
362
+ console.log("\nšŸ¤– [Google] Model Configuration:");
363
+ console.log(" Service: Google");
364
+ console.log(" Model:", modelName);
365
+ console.log(" Temperature:", specification.google?.temperature);
366
+ console.log(" Max Tokens:", specification.google?.completionTokenLimit);
367
+ console.log(" System Prompt:", systemPrompt ? "Yes" : "No");
368
+ console.log(" Tools:", tools?.length || 0);
369
+ console.log(" Specification Name:", specification.name);
370
+ }
291
371
  const streamConfig = {
292
372
  model: modelName,
293
373
  messages,
@@ -337,6 +417,13 @@ onEvent, onComplete) {
337
417
  const result = await chat.sendMessageStream(prompt);
338
418
  for await (const chunk of result.stream) {
339
419
  const text = chunk.text();
420
+ // Debug log chunk details
421
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
422
+ console.log(`[Google] Raw chunk:`, JSON.stringify(chunk, null, 2));
423
+ if (text) {
424
+ console.log(`[Google] Text delta: "${text}" (${text.length} chars)`);
425
+ }
426
+ }
340
427
  if (text) {
341
428
  fullMessage += text;
342
429
  onEvent({
@@ -351,6 +438,10 @@ onEvent, onComplete) {
351
438
  if (candidate?.content?.parts) {
352
439
  for (const part of candidate.content.parts) {
353
440
  if (part.functionCall) {
441
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
442
+ console.log(`[Google] Received function call: ${part.functionCall.name}`);
443
+ console.log(`[Google] Function args:`, JSON.stringify(part.functionCall.args || {}));
444
+ }
354
445
  const toolCall = {
355
446
  id: `google_tool_${Date.now()}_${toolCalls.length}`,
356
447
  name: part.functionCall.name,
@@ -370,6 +461,19 @@ onEvent, onComplete) {
370
461
  toolCallId: toolCall.id,
371
462
  argumentDelta: toolCall.arguments,
372
463
  });
464
+ // Log completion and validate JSON
465
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
466
+ console.log(`[Google] Tool ${toolCall.name} complete with arguments (${toolCall.arguments.length} chars):`);
467
+ console.log(toolCall.arguments);
468
+ // Validate JSON
469
+ try {
470
+ JSON.parse(toolCall.arguments);
471
+ console.log(`[Google] āœ… Valid JSON for ${toolCall.name}`);
472
+ }
473
+ catch (e) {
474
+ console.error(`[Google] āŒ Invalid JSON for ${toolCall.name}: ${e}`);
475
+ }
476
+ }
373
477
  onEvent({
374
478
  type: "tool_call_complete",
375
479
  toolCall: {
@@ -384,12 +488,18 @@ onEvent, onComplete) {
384
488
  }
385
489
  catch (error) {
386
490
  // Silently ignore parsing errors
491
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
492
+ console.error(`[Google] Error parsing chunk for function calls:`, error);
493
+ }
387
494
  }
388
495
  }
389
496
  // Google might also return function calls or additional text in the final response
390
497
  try {
391
498
  const response = await result.response;
392
499
  const candidate = response.candidates?.[0];
500
+ if (process.env.DEBUG_GRAPHLIT_STREAMING && candidate?.content?.parts) {
501
+ console.log(`[Google] Processing final response with ${candidate.content.parts.length} parts`);
502
+ }
393
503
  if (candidate?.content?.parts) {
394
504
  for (const part of candidate.content.parts) {
395
505
  // Check for any final text we might have missed
@@ -397,6 +507,9 @@ onEvent, onComplete) {
397
507
  const finalText = part.text;
398
508
  // Only add if it's not already included in fullMessage
399
509
  if (!fullMessage.endsWith(finalText)) {
510
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
511
+ console.log(`[Google] Adding final text: ${finalText.length} chars`);
512
+ }
400
513
  fullMessage += finalText;
401
514
  onEvent({
402
515
  type: "token",
@@ -407,6 +520,9 @@ onEvent, onComplete) {
407
520
  // Check for function calls
408
521
  if (part.functionCall &&
409
522
  !toolCalls.some((tc) => tc.name === part.functionCall.name)) {
523
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
524
+ console.log(`[Google] Found function call in final response: ${part.functionCall.name}`);
525
+ }
410
526
  const toolCall = {
411
527
  id: `google_tool_${Date.now()}_${toolCalls.length}`,
412
528
  name: part.functionCall.name,
@@ -434,7 +550,14 @@ onEvent, onComplete) {
434
550
  }
435
551
  }
436
552
  catch (error) {
437
- // Silently ignore parsing errors
553
+ // Log parsing errors when debugging
554
+ if (process.env.DEBUG_GRAPHLIT_STREAMING) {
555
+ console.error(`[Google] Error processing final response:`, error);
556
+ }
557
+ }
558
+ // Final summary logging
559
+ if (process.env.DEBUG_GRAPHLIT_STREAMING && toolCalls.length > 0) {
560
+ console.log(`[Google] Successfully processed ${toolCalls.length} tool calls`);
438
561
  }
439
562
  onComplete(fullMessage, toolCalls);
440
563
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250611016",
3
+ "version": "1.0.20250611017",
4
4
  "description": "Graphlit API Client for TypeScript",
5
5
  "type": "module",
6
6
  "main": "./dist/client.js",