graphlit-client 1.0.20250625001 → 1.0.20250627002

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,7 +14,15 @@ Graphlit is a cloud platform that handles the complex parts of building AI appli
14
14
  - **Extract insights** - Summaries, entities, and metadata
15
15
  - **Build knowledge graphs** - Automatically connect related information
16
16
 
17
- ## ✨ What's New in v1.1.0
17
+ ## ✨ What's New
18
+
19
+ ### v1.2.0 - Reasoning & Cancellation Support 🧠
20
+
21
+ - **Reasoning/Thinking Detection** - See how AI models think through problems (Bedrock Nova, Deepseek, Anthropic)
22
+ - **Stream Cancellation** - Stop long-running generations instantly with AbortSignal support
23
+ - **Enhanced Streaming Events** - New `reasoning_update` events expose model thought processes
24
+
25
+ ### v1.1.0 - Streaming & Resilience
18
26
 
19
27
  - **Real-time streaming** - Watch AI responses appear word-by-word across 9 different providers
20
28
  - **Tool calling** - Let AI execute functions and retrieve data
@@ -27,7 +35,9 @@ Graphlit is a cloud platform that handles the complex parts of building AI appli
27
35
  - [Quick Start](#quick-start)
28
36
  - [Installation](#installation)
29
37
  - [Setting Up](#setting-up)
30
- - [Network Resilience](#network-resilience-new-in-v111)
38
+ - [Reasoning Support (New!)](#reasoning-support-new) 🧠
39
+ - [Stream Cancellation (New!)](#stream-cancellation-new) 🛑
40
+ - [Network Resilience](#network-resilience)
31
41
  - [Streaming Provider Support](#streaming-provider-support)
32
42
  - [Basic Examples](#basic-examples)
33
43
  - [Common Use Cases](#common-use-cases)
@@ -145,9 +155,153 @@ AWS_ACCESS_KEY_ID=your_key
145
155
  AWS_SECRET_ACCESS_KEY=your_secret
146
156
  ```
147
157
 
148
- ## Network Resilience (New in v1.1.1)
158
+ ## Reasoning Support (New!) 🧠
159
+
160
+ The SDK can detect and expose AI reasoning processes, showing you how models "think" through problems. This feature works with models that support reasoning output.
161
+
162
+ ### Quick Example
163
+
164
+ ```typescript
165
+ await client.streamAgent(
166
+ "What's 15% of 240? Think step by step.",
167
+ (event) => {
168
+ if (event.type === "reasoning_update") {
169
+ console.log("🤔 Model thinking:", event.content);
170
+ } else if (event.type === "message_update") {
171
+ console.log("💬 Answer:", event.message.message);
172
+ }
173
+ },
174
+ undefined,
175
+ { id: specificationId },
176
+ );
177
+ ```
178
+
179
+ ### Supported Models
180
+
181
+ | Provider | Models | Format | Example Output |
182
+ | --------------- | ---------------------------- | -------------- | ------------------------------------------ |
183
+ | **AWS Bedrock** | Nova Premier | `thinking_tag` | `<thinking>Let me calculate...</thinking>` |
184
+ | **Deepseek** | Chat, Reasoner | `markdown` | `**Step 1:** First, I need to...` |
185
+ | **Anthropic** | Claude (with special access) | `thinking_tag` | Internal thinking blocks |
186
+
187
+ ### Using Reasoning Detection
188
+
189
+ ```typescript
190
+ // Create a specification with a reasoning-capable model
191
+ const spec = await client.createSpecification({
192
+ name: "Reasoning Assistant",
193
+ serviceType: Types.ModelServiceTypes.Bedrock,
194
+ bedrock: {
195
+ model: Types.BedrockModels.NovaPremier,
196
+ temperature: 0.7,
197
+ },
198
+ });
149
199
 
150
- The SDK now includes automatic retry logic for network errors and transient failures:
200
+ // Track reasoning steps
201
+ const reasoningSteps: string[] = [];
202
+
203
+ await client.streamAgent(
204
+ "Analyze the pros and cons of remote work. Think carefully.",
205
+ (event) => {
206
+ switch (event.type) {
207
+ case "reasoning_update":
208
+ // Capture model's thinking process
209
+ reasoningSteps.push(event.content);
210
+ console.log(`🧠 Thinking (${event.format}):`, event.content);
211
+
212
+ if (event.isComplete) {
213
+ console.log("✅ Reasoning complete!");
214
+ }
215
+ break;
216
+
217
+ case "message_update":
218
+ // The actual answer (reasoning removed)
219
+ console.log("Answer:", event.message.message);
220
+ break;
221
+ }
222
+ },
223
+ undefined,
224
+ { id: spec.createSpecification!.id },
225
+ );
226
+ ```
227
+
228
+ ### Key Features
229
+
230
+ - **Automatic Detection**: Reasoning content is automatically detected and separated
231
+ - **Format Preservation**: Maintains original formatting (markdown, tags, etc.)
232
+ - **Real-time Streaming**: Reasoning streams as it's generated
233
+ - **Clean Separation**: Final answers don't include thinking content
234
+
235
+ ## Stream Cancellation (New!) 🛑
236
+
237
+ Cancel long-running AI generations instantly using the standard Web API `AbortController`.
238
+
239
+ ### Quick Example
240
+
241
+ ```typescript
242
+ const controller = new AbortController();
243
+
244
+ // Add a stop button
245
+ document.getElementById("stop").onclick = () => controller.abort();
246
+
247
+ try {
248
+ await client.streamAgent(
249
+ "Write a 10,000 word essay about quantum computing...",
250
+ (event) => {
251
+ if (event.type === "message_update") {
252
+ console.log(event.message.message);
253
+ }
254
+ },
255
+ undefined,
256
+ { id: specificationId },
257
+ undefined, // tools
258
+ undefined, // toolHandlers
259
+ { abortSignal: controller.signal }, // Pass the signal
260
+ );
261
+ } catch (error) {
262
+ if (controller.signal.aborted) {
263
+ console.log("✋ Generation stopped by user");
264
+ }
265
+ }
266
+ ```
267
+
268
+ ### Advanced Cancellation
269
+
270
+ ```typescript
271
+ // Cancel after timeout
272
+ const controller = new AbortController();
273
+ setTimeout(() => controller.abort(), 30000); // 30 second timeout
274
+
275
+ // Cancel multiple streams at once
276
+ const controller = new AbortController();
277
+
278
+ const streams = [
279
+ client.streamAgent("Query 1", handler1, undefined, spec1, null, null, {
280
+ abortSignal: controller.signal,
281
+ }),
282
+ client.streamAgent("Query 2", handler2, undefined, spec2, null, null, {
283
+ abortSignal: controller.signal,
284
+ }),
285
+ client.streamAgent("Query 3", handler3, undefined, spec3, null, null, {
286
+ abortSignal: controller.signal,
287
+ }),
288
+ ];
289
+
290
+ // Cancel all streams
291
+ controller.abort();
292
+ await Promise.allSettled(streams);
293
+ ```
294
+
295
+ ### Features
296
+
297
+ - **Instant Response**: Cancellation happens immediately
298
+ - **Provider Support**: Works with all streaming providers
299
+ - **Tool Interruption**: Stops tool execution between rounds
300
+ - **Clean Cleanup**: Resources are properly released
301
+
302
+ ## Network Resilience
303
+
304
+ The SDK includes automatic retry logic for network errors and transient failures:
151
305
 
152
306
  ### Default Retry Configuration
153
307
 
@@ -466,7 +620,73 @@ for (const provider of providers) {
466
620
  }
467
621
  ```
468
622
 
469
- ### 5. Tool Calling
623
+ ### 5. Reasoning + Cancellation Example
624
+
625
+ Combine reasoning detection with cancellable streams:
626
+
627
+ ```typescript
628
+ import { Graphlit, Types } from "graphlit-client";
629
+
630
+ const client = new Graphlit();
631
+ const controller = new AbortController();
632
+
633
+ // Create spec for reasoning model
634
+ const spec = await client.createSpecification({
635
+ name: "Reasoning Demo",
636
+ serviceType: Types.ModelServiceTypes.Bedrock,
637
+ bedrock: {
638
+ model: Types.BedrockModels.NovaPremier,
639
+ },
640
+ });
641
+
642
+ // UI elements
643
+ const stopButton = document.getElementById("stop-reasoning");
644
+ const reasoningDiv = document.getElementById("reasoning");
645
+ const answerDiv = document.getElementById("answer");
646
+
647
+ stopButton.onclick = () => {
648
+ controller.abort();
649
+ console.log("🛑 Cancelled!");
650
+ };
651
+
652
+ try {
653
+ await client.streamAgent(
654
+ "Solve this puzzle: If it takes 5 machines 5 minutes to make 5 widgets, how long does it take 100 machines to make 100 widgets? Think through this step-by-step.",
655
+ (event) => {
656
+ switch (event.type) {
657
+ case "reasoning_update":
658
+ // Show the AI's thought process
659
+ reasoningDiv.textContent = event.content;
660
+ if (event.isComplete) {
661
+ reasoningDiv.classList.add("complete");
662
+ }
663
+ break;
664
+
665
+ case "message_update":
666
+ // Show the final answer
667
+ answerDiv.textContent = event.message.message;
668
+ break;
669
+
670
+ case "conversation_completed":
671
+ stopButton.disabled = true;
672
+ console.log("✅ Complete!");
673
+ break;
674
+ }
675
+ },
676
+ undefined,
677
+ { id: spec.createSpecification!.id },
678
+ undefined,
679
+ undefined,
680
+ { abortSignal: controller.signal },
681
+ );
682
+ } catch (error) {
683
+ if (controller.signal.aborted) {
684
+ console.log("Reasoning cancelled by user");
685
+ }
686
+ }
687
+ ```
688
+
689
+ ### 6. Tool Calling
470
690
 
471
691
  Let AI call functions to get real-time data:
472
692
 
@@ -933,6 +1153,16 @@ type AgentStreamEvent =
933
1153
  | { type: "conversation_started"; conversationId: string }
934
1154
  | { type: "message_update"; message: { message: string } }
935
1155
  | { type: "tool_update"; toolCall: any; status: string }
1156
+ | {
1157
+ type: "reasoning_update";
1158
+ content: string;
1159
+ format: "thinking_tag" | "markdown" | "custom";
1160
+ isComplete: boolean;
1161
+ }
1162
+ | {
1163
+ type: "context_window";
1164
+ usage: { usedTokens: number; maxTokens: number; percentage: number };
1165
+ }
936
1166
  | { type: "conversation_completed"; message: { message: string } }
937
1167
  | { type: "error"; error: { message: string; recoverable: boolean } };
938
1168
  ```
package/dist/client.d.ts CHANGED
@@ -174,6 +174,7 @@ declare class Graphlit {
174
174
  reviseText(prompt: string, text: string, id?: string, specification?: Types.EntityReferenceInput, correlationId?: string): Promise<Types.ReviseTextMutation>;
175
175
  reviseContent(prompt: string, content: Types.EntityReferenceInput, id?: string, specification?: Types.EntityReferenceInput, correlationId?: string): Promise<Types.ReviseContentMutation>;
176
176
  prompt(prompt?: string, mimeType?: string, data?: string, specification?: Types.EntityReferenceInput, messages?: Types.ConversationMessageInput[], correlationId?: string): Promise<Types.PromptMutation>;
177
+ retrieveView(prompt: string, id: string, retrievalStrategy?: Types.RetrievalStrategyInput, rerankingStrategy?: Types.RerankingStrategyInput, correlationId?: string): Promise<Types.RetrieveViewMutation>;
177
178
  retrieveSources(prompt: string, filter?: Types.ContentFilter, augmentedFilter?: Types.ContentFilter, retrievalStrategy?: Types.RetrievalStrategyInput, rerankingStrategy?: Types.RerankingStrategyInput, correlationId?: string): Promise<Types.RetrieveSourcesMutation>;
178
179
  formatConversation(prompt: string, id?: string, specification?: Types.EntityReferenceInput, tools?: Types.ToolDefinitionInput[], systemPrompt?: string, includeDetails?: boolean, correlationId?: string): Promise<Types.FormatConversationMutation>;
179
180
  completeConversation(completion: string, id: string, completionTime?: Types.Scalars["TimeSpan"]["input"], ttft?: Types.Scalars["TimeSpan"]["input"], throughput?: Types.Scalars["Float"]["input"], correlationId?: string): Promise<Types.CompleteConversationMutation>;
@@ -193,6 +194,8 @@ declare class Graphlit {
193
194
  querySharePointLibraries(properties: Types.SharePointLibrariesInput): Promise<Types.QuerySharePointLibrariesQuery>;
194
195
  queryMicrosoftTeamsTeams(properties: Types.MicrosoftTeamsTeamsInput): Promise<Types.QueryMicrosoftTeamsTeamsQuery>;
195
196
  queryMicrosoftTeamsChannels(properties: Types.MicrosoftTeamsChannelsInput, teamId: string): Promise<Types.QueryMicrosoftTeamsChannelsQuery>;
197
+ queryDiscordGuilds(properties: Types.DiscordGuildsInput): Promise<Types.QueryDiscordGuildsQuery>;
198
+ queryDiscordChannels(properties: Types.DiscordChannelsInput): Promise<Types.QueryDiscordChannelsQuery>;
196
199
  querySlackChannels(properties: Types.SlackChannelsInput): Promise<Types.QuerySlackChannelsQuery>;
197
200
  queryLinearProjects(properties: Types.LinearProjectsInput): Promise<Types.QueryLinearProjectsQuery>;
198
201
  queryNotionDatabases(properties: Types.NotionDatabasesInput): Promise<Types.QueryNotionDatabasesQuery>;
@@ -221,6 +224,22 @@ declare class Graphlit {
221
224
  countSpecifications(filter?: Types.SpecificationFilter): Promise<Types.CountSpecificationsQuery>;
222
225
  specificationExists(filter?: Types.SpecificationFilter): Promise<Types.SpecificationExistsQuery>;
223
226
  queryModels(filter?: Types.ModelFilter): Promise<Types.QueryModelsQuery>;
227
+ createConnector(connector: Types.ConnectorInput): Promise<Types.CreateConnectorMutation>;
228
+ updateConnector(connector: Types.ConnectorUpdateInput): Promise<Types.UpdateConnectorMutation>;
229
+ deleteConnector(id: string): Promise<Types.DeleteConnectorMutation>;
230
+ getConnector(id: string): Promise<Types.GetConnectorQuery>;
231
+ queryConnectors(filter?: Types.ConnectorFilter): Promise<Types.QueryConnectorsQuery>;
232
+ countConnectors(filter?: Types.ConnectorFilter): Promise<Types.CountConnectorsQuery>;
233
+ createView(view: Types.ViewInput): Promise<Types.CreateViewMutation>;
234
+ updateView(view: Types.ViewUpdateInput): Promise<Types.UpdateViewMutation>;
235
+ upsertView(view: Types.ViewInput): Promise<Types.UpsertViewMutation>;
236
+ deleteView(id: string): Promise<Types.DeleteViewMutation>;
237
+ deleteViews(ids: string[], isSynchronous?: boolean): Promise<Types.DeleteViewsMutation>;
238
+ deleteAllViews(filter?: Types.ViewFilter, isSynchronous?: boolean, correlationId?: string): Promise<Types.DeleteAllViewsMutation>;
239
+ getView(id: string): Promise<Types.GetViewQuery>;
240
+ queryViews(filter?: Types.ViewFilter): Promise<Types.QueryViewsQuery>;
241
+ countViews(filter?: Types.ViewFilter): Promise<Types.CountViewsQuery>;
242
+ viewExists(filter?: Types.ViewFilter): Promise<Types.ViewExistsQuery>;
224
243
  createWorkflow(workflow: Types.WorkflowInput): Promise<Types.CreateWorkflowMutation>;
225
244
  updateWorkflow(workflow: Types.WorkflowUpdateInput): Promise<Types.UpdateWorkflowMutation>;
226
245
  upsertWorkflow(workflow: Types.WorkflowInput): Promise<Types.UpsertWorkflowMutation>;
@@ -394,7 +413,7 @@ declare class Graphlit {
394
413
  * @param specification - Optional specification to check compatibility
395
414
  * @returns true if streaming is available, false otherwise
396
415
  */
397
- supportsStreaming(specification?: Types.Specification): boolean;
416
+ supportsStreaming(specification?: Types.Specification, tools?: Types.ToolDefinitionInput[]): boolean;
398
417
  /**
399
418
  * Execute an agent with non-streaming response
400
419
  * @param prompt - The user prompt
@@ -458,6 +477,10 @@ declare class Graphlit {
458
477
  * Stream with Anthropic client
459
478
  */
460
479
  private streamWithAnthropic;
480
+ /**
481
+ * Extract thinking configuration from specification
482
+ */
483
+ private getThinkingConfig;
461
484
  /**
462
485
  * Stream with Google client
463
486
  */