graphlit-client 1.0.20250622007 → 1.0.20250627001

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,7 +14,15 @@ Graphlit is a cloud platform that handles the complex parts of building AI appli
14
14
  - **Extract insights** - Summaries, entities, and metadata
15
15
  - **Build knowledge graphs** - Automatically connect related information
16
16
 
17
- ## ✨ What's New in v1.1.0
17
+ ## ✨ What's New
18
+
19
+ ### v1.2.0 - Reasoning & Cancellation Support 🧠
20
+
21
+ - **Reasoning/Thinking Detection** - See how AI models think through problems (Bedrock Nova, Deepseek, Anthropic)
22
+ - **Stream Cancellation** - Stop long-running generations instantly with AbortSignal support
23
+ - **Enhanced Streaming Events** - New `reasoning_update` events expose model thought processes
24
+
25
+ ### v1.1.0 - Streaming & Resilience
18
26
 
19
27
  - **Real-time streaming** - Watch AI responses appear word-by-word across 9 different providers
20
28
  - **Tool calling** - Let AI execute functions and retrieve data
@@ -27,7 +35,9 @@ Graphlit is a cloud platform that handles the complex parts of building AI appli
27
35
  - [Quick Start](#quick-start)
28
36
  - [Installation](#installation)
29
37
  - [Setting Up](#setting-up)
30
- - [Network Resilience](#network-resilience-new-in-v111)
38
+ - [Reasoning Support (New!)](#reasoning-support-new) 🧠
39
+ - [Stream Cancellation (New!)](#stream-cancellation-new) 🛑
40
+ - [Network Resilience](#network-resilience)
31
41
  - [Streaming Provider Support](#streaming-provider-support)
32
42
  - [Basic Examples](#basic-examples)
33
43
  - [Common Use Cases](#common-use-cases)
@@ -145,9 +155,153 @@ AWS_ACCESS_KEY_ID=your_key
145
155
  AWS_SECRET_ACCESS_KEY=your_secret
146
156
  ```
147
157
 
148
- ## Network Resilience (New in v1.1.1)
158
+ ## Reasoning Support (New!) 🧠
159
+
160
+ The SDK can detect and expose AI reasoning processes, showing you how models "think" through problems. This feature works with models that support reasoning output.
161
+
162
+ ### Quick Example
163
+
164
+ ```typescript
165
+ await client.streamAgent(
166
+ "What's 15% of 240? Think step by step.",
167
+ (event) => {
168
+ if (event.type === "reasoning_update") {
169
+ console.log("🤔 Model thinking:", event.content);
170
+ } else if (event.type === "message_update") {
171
+ console.log("💬 Answer:", event.message.message);
172
+ }
173
+ },
174
+ undefined,
175
+ { id: specificationId },
176
+ );
177
+ ```
178
+
179
+ ### Supported Models
180
+
181
+ | Provider | Models | Format | Example Output |
182
+ | --------------- | ---------------------------- | -------------- | ------------------------------------------ |
183
+ | **AWS Bedrock** | Nova Premier | `thinking_tag` | `<thinking>Let me calculate...</thinking>` |
184
+ | **Deepseek** | Chat, Reasoner | `markdown` | `**Step 1:** First, I need to...` |
185
+ | **Anthropic** | Claude (with special access) | `thinking_tag` | Internal thinking blocks |
186
+
187
+ ### Using Reasoning Detection
188
+
189
+ ```typescript
190
+ // Create a specification with a reasoning-capable model
191
+ const spec = await client.createSpecification({
192
+ name: "Reasoning Assistant",
193
+ serviceType: Types.ModelServiceTypes.Bedrock,
194
+ bedrock: {
195
+ model: Types.BedrockModels.NovaPremier,
196
+ temperature: 0.7,
197
+ },
198
+ });
149
199
 
150
- The SDK now includes automatic retry logic for network errors and transient failures:
200
+ // Track reasoning steps
201
+ const reasoningSteps: string[] = [];
202
+
203
+ await client.streamAgent(
204
+ "Analyze the pros and cons of remote work. Think carefully.",
205
+ (event) => {
206
+ switch (event.type) {
207
+ case "reasoning_update":
208
+ // Capture model's thinking process
209
+ reasoningSteps.push(event.content);
210
+ console.log(`🧠 Thinking (${event.format}):`, event.content);
211
+
212
+ if (event.isComplete) {
213
+ console.log("✅ Reasoning complete!");
214
+ }
215
+ break;
216
+
217
+ case "message_update":
218
+ // The actual answer (reasoning removed)
219
+ console.log("Answer:", event.message.message);
220
+ break;
221
+ }
222
+ },
223
+ undefined,
224
+ { id: spec.createSpecification!.id },
225
+ );
226
+ ```
227
+
228
+ ### Key Features
229
+
230
+ - **Automatic Detection**: Reasoning content is automatically detected and separated
231
+ - **Format Preservation**: Maintains original formatting (markdown, tags, etc.)
232
+ - **Real-time Streaming**: Reasoning streams as it's generated
233
+ - **Clean Separation**: Final answers don't include thinking content
234
+
235
+ ## Stream Cancellation (New!) 🛑
236
+
237
+ Cancel long-running AI generations instantly using the standard Web API `AbortController`.
238
+
239
+ ### Quick Example
240
+
241
+ ```typescript
242
+ const controller = new AbortController();
243
+
244
+ // Add a stop button
245
+ document.getElementById("stop").onclick = () => controller.abort();
246
+
247
+ try {
248
+ await client.streamAgent(
249
+ "Write a 10,000 word essay about quantum computing...",
250
+ (event) => {
251
+ if (event.type === "message_update") {
252
+ console.log(event.message.message);
253
+ }
254
+ },
255
+ undefined,
256
+ { id: specificationId },
257
+ undefined, // tools
258
+ undefined, // toolHandlers
259
+ { abortSignal: controller.signal }, // Pass the signal
260
+ );
261
+ } catch (error) {
262
+ if (controller.signal.aborted) {
263
+ console.log("✋ Generation stopped by user");
264
+ }
265
+ }
266
+ ```
267
+
268
+ ### Advanced Cancellation
269
+
270
+ ```typescript
271
+ // Cancel after timeout
272
+ const controller = new AbortController();
273
+ setTimeout(() => controller.abort(), 30000); // 30 second timeout
274
+
275
+ // Cancel multiple streams at once
276
+ const controller = new AbortController();
277
+
278
+ const streams = [
279
+ client.streamAgent("Query 1", handler1, undefined, spec1, null, null, {
280
+ abortSignal: controller.signal,
281
+ }),
282
+ client.streamAgent("Query 2", handler2, undefined, spec2, null, null, {
283
+ abortSignal: controller.signal,
284
+ }),
285
+ client.streamAgent("Query 3", handler3, undefined, spec3, null, null, {
286
+ abortSignal: controller.signal,
287
+ }),
288
+ ];
289
+
290
+ // Cancel all streams
291
+ controller.abort();
292
+ await Promise.allSettled(streams);
293
+ ```
294
+
295
+ ### Features
296
+
297
+ - **Instant Response**: Cancellation happens immediately
298
+ - **Provider Support**: Works with all streaming providers
299
+ - **Tool Interruption**: Stops tool execution between rounds
300
+ - **Clean Cleanup**: Resources are properly released
301
+
302
+ ## Network Resilience
303
+
304
+ The SDK includes automatic retry logic for network errors and transient failures:
151
305
 
152
306
  ### Default Retry Configuration
153
307
 
@@ -466,7 +620,73 @@ for (const provider of providers) {
466
620
  }
467
621
  ```
468
622
 
469
- ### 5. Tool Calling
623
+ ### 5. Reasoning + Cancellation Example
624
+
625
+ Combine reasoning detection with cancellable streams:
626
+
627
+ ```typescript
628
+ import { Graphlit, Types } from "graphlit-client";
629
+
630
+ const client = new Graphlit();
631
+ const controller = new AbortController();
632
+
633
+ // Create spec for reasoning model
634
+ const spec = await client.createSpecification({
635
+ name: "Reasoning Demo",
636
+ serviceType: Types.ModelServiceTypes.Bedrock,
637
+ bedrock: {
638
+ model: Types.BedrockModels.NovaPremier,
639
+ },
640
+ });
641
+
642
+ // UI elements
643
+ const stopButton = document.getElementById("stop-reasoning");
644
+ const reasoningDiv = document.getElementById("reasoning");
645
+ const answerDiv = document.getElementById("answer");
646
+
647
+ stopButton.onclick = () => {
648
+ controller.abort();
649
+ console.log("🛑 Cancelled!");
650
+ };
651
+
652
+ try {
653
+ await client.streamAgent(
654
+ "Solve this puzzle: If it takes 5 machines 5 minutes to make 5 widgets, how long does it take 100 machines to make 100 widgets? Think through this step-by-step.",
655
+ (event) => {
656
+ switch (event.type) {
657
+ case "reasoning_update":
658
+ // Show the AI's thought process
659
+ reasoningDiv.textContent = event.content;
660
+ if (event.isComplete) {
661
+ reasoningDiv.classList.add("complete");
662
+ }
663
+ break;
664
+
665
+ case "message_update":
666
+ // Show the final answer
667
+ answerDiv.textContent = event.message.message;
668
+ break;
669
+
670
+ case "conversation_completed":
671
+ stopButton.disabled = true;
672
+ console.log("✅ Complete!");
673
+ break;
674
+ }
675
+ },
676
+ undefined,
677
+ { id: spec.createSpecification!.id },
678
+ undefined,
679
+ undefined,
680
+ { abortSignal: controller.signal },
681
+ );
682
+ } catch (error) {
683
+ if (controller.signal.aborted) {
684
+ console.log("Reasoning cancelled by user");
685
+ }
686
+ }
687
+ ```
688
+
689
+ ### 6. Tool Calling
470
690
 
471
691
  Let AI call functions to get real-time data:
472
692
 
@@ -933,6 +1153,16 @@ type AgentStreamEvent =
933
1153
  | { type: "conversation_started"; conversationId: string }
934
1154
  | { type: "message_update"; message: { message: string } }
935
1155
  | { type: "tool_update"; toolCall: any; status: string }
1156
+ | {
1157
+ type: "reasoning_update";
1158
+ content: string;
1159
+ format: "thinking_tag" | "markdown" | "custom";
1160
+ isComplete: boolean;
1161
+ }
1162
+ | {
1163
+ type: "context_window";
1164
+ usage: { usedTokens: number; maxTokens: number; percentage: number };
1165
+ }
936
1166
  | { type: "conversation_completed"; message: { message: string } }
937
1167
  | { type: "error"; error: { message: string; recoverable: boolean } };
938
1168
  ```
package/dist/client.d.ts CHANGED
@@ -183,6 +183,11 @@ declare class Graphlit {
183
183
  continueConversation(id: string, responses: Types.ConversationToolResponseInput[], correlationId?: string): Promise<Types.ContinueConversationMutation>;
184
184
  publishConversation(id: string, connector: Types.ContentPublishingConnectorInput, name?: string, workflow?: Types.EntityReferenceInput, isSynchronous?: boolean, correlationId?: string): Promise<Types.PublishConversationMutation>;
185
185
  suggestConversation(id: string, count?: number, correlationId?: string): Promise<Types.SuggestConversationMutation>;
186
+ queryMicrosoftCalendars(properties: Types.MicrosoftCalendarsInput): Promise<Types.QueryMicrosoftCalendarsQuery>;
187
+ queryGoogleCalendars(properties: Types.GoogleCalendarsInput): Promise<Types.QueryGoogleCalendarsQuery>;
188
+ queryBoxFolders(properties: Types.BoxFoldersInput, folderId?: string): Promise<Types.QueryBoxFoldersQuery>;
189
+ queryDropboxFolders(properties: Types.DropboxFoldersInput, folderPath?: string): Promise<Types.QueryDropboxFoldersQuery>;
190
+ queryGoogleDriveFolders(properties: Types.GoogleDriveFoldersInput, folderId?: string): Promise<Types.QueryGoogleDriveFoldersQuery>;
186
191
  queryOneDriveFolders(properties: Types.OneDriveFoldersInput, folderId?: string): Promise<Types.QueryOneDriveFoldersQuery>;
187
192
  querySharePointFolders(properties: Types.SharePointFoldersInput, libraryId: string, folderId?: string): Promise<Types.QuerySharePointFoldersQuery>;
188
193
  querySharePointLibraries(properties: Types.SharePointLibrariesInput): Promise<Types.QuerySharePointLibrariesQuery>;
@@ -216,6 +221,22 @@ declare class Graphlit {
216
221
  countSpecifications(filter?: Types.SpecificationFilter): Promise<Types.CountSpecificationsQuery>;
217
222
  specificationExists(filter?: Types.SpecificationFilter): Promise<Types.SpecificationExistsQuery>;
218
223
  queryModels(filter?: Types.ModelFilter): Promise<Types.QueryModelsQuery>;
224
+ createConnector(connector: Types.ConnectorInput): Promise<Types.CreateConnectorMutation>;
225
+ updateConnector(connector: Types.ConnectorUpdateInput): Promise<Types.UpdateConnectorMutation>;
226
+ deleteConnector(id: string): Promise<Types.DeleteConnectorMutation>;
227
+ getConnector(id: string): Promise<Types.GetConnectorQuery>;
228
+ queryConnectors(filter?: Types.ConnectorFilter): Promise<Types.QueryConnectorsQuery>;
229
+ countConnectors(filter?: Types.ConnectorFilter): Promise<Types.CountConnectorsQuery>;
230
+ createView(view: Types.ViewInput): Promise<Types.CreateViewMutation>;
231
+ updateView(view: Types.ViewUpdateInput): Promise<Types.UpdateViewMutation>;
232
+ upsertView(view: Types.ViewInput): Promise<Types.UpsertViewMutation>;
233
+ deleteView(id: string): Promise<Types.DeleteViewMutation>;
234
+ deleteViews(ids: string[], isSynchronous?: boolean): Promise<Types.DeleteViewsMutation>;
235
+ deleteAllViews(filter?: Types.ViewFilter, isSynchronous?: boolean, correlationId?: string): Promise<Types.DeleteAllViewsMutation>;
236
+ getView(id: string): Promise<Types.GetViewQuery>;
237
+ queryViews(filter?: Types.ViewFilter): Promise<Types.QueryViewsQuery>;
238
+ countViews(filter?: Types.ViewFilter): Promise<Types.CountViewsQuery>;
239
+ viewExists(filter?: Types.ViewFilter): Promise<Types.ViewExistsQuery>;
219
240
  createWorkflow(workflow: Types.WorkflowInput): Promise<Types.CreateWorkflowMutation>;
220
241
  updateWorkflow(workflow: Types.WorkflowUpdateInput): Promise<Types.UpdateWorkflowMutation>;
221
242
  upsertWorkflow(workflow: Types.WorkflowInput): Promise<Types.UpsertWorkflowMutation>;
@@ -389,7 +410,7 @@ declare class Graphlit {
389
410
  * @param specification - Optional specification to check compatibility
390
411
  * @returns true if streaming is available, false otherwise
391
412
  */
392
- supportsStreaming(specification?: Types.Specification): boolean;
413
+ supportsStreaming(specification?: Types.Specification, tools?: Types.ToolDefinitionInput[]): boolean;
393
414
  /**
394
415
  * Execute an agent with non-streaming response
395
416
  * @param prompt - The user prompt
@@ -453,6 +474,10 @@ declare class Graphlit {
453
474
  * Stream with Anthropic client
454
475
  */
455
476
  private streamWithAnthropic;
477
+ /**
478
+ * Extract thinking configuration from specification
479
+ */
480
+ private getThinkingConfig;
456
481
  /**
457
482
  * Stream with Google client
458
483
  */