graphlit-client 1.0.20250610010 → 1.0.20250611002
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +427 -553
- package/dist/client.d.ts +3 -2
- package/dist/client.js +21 -10
- package/package.json +2 -1
package/README.md
CHANGED
@@ -1,197 +1,238 @@
|
|
1
|
-
#
|
1
|
+
# Graphlit TypeScript Client SDK
|
2
2
|
|
3
|
-
|
3
|
+
[](https://badge.fury.io/js/graphlit-client)
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
4
5
|
|
5
|
-
The
|
6
|
+
The official TypeScript/JavaScript SDK for the [Graphlit Platform](https://www.graphlit.com) - build AI-powered applications with knowledge retrieval in minutes.
|
7
|
+
|
8
|
+
## 🚀 What is Graphlit?
|
9
|
+
|
10
|
+
Graphlit is a cloud platform that handles the complex parts of building AI applications:
|
11
|
+
- **Ingest any content** - PDFs, websites, audio, video, and more
|
12
|
+
- **Chat with your data** - Using RAG (Retrieval-Augmented Generation)
|
13
|
+
- **Extract insights** - Summaries, entities, and metadata
|
14
|
+
- **Build knowledge graphs** - Automatically connect related information
|
15
|
+
|
16
|
+
## ✨ What's New in v1.1.0
|
17
|
+
- **Real-time streaming** - Watch AI responses appear word-by-word
|
18
|
+
- **Tool calling** - Let AI execute functions and retrieve data
|
19
|
+
- **Better performance** - Native integration with OpenAI, Anthropic, and Google
|
20
|
+
|
21
|
+
## 📋 Table of Contents
|
22
|
+
- [Quick Start](#quick-start)
|
23
|
+
- [Basic Examples](#basic-examples)
|
24
|
+
- [Common Use Cases](#common-use-cases)
|
25
|
+
- [API Reference](#api-reference)
|
26
|
+
- [Support](#support)
|
6
27
|
|
7
28
|
## Quick Start
|
8
29
|
|
9
|
-
Get
|
30
|
+
Get started in 2 minutes:
|
10
31
|
|
11
32
|
```bash
|
12
|
-
# Install the
|
33
|
+
# Install the SDK
|
13
34
|
npm install graphlit-client
|
14
35
|
|
15
|
-
# Set your credentials (get
|
36
|
+
# Set your credentials (get free account at https://portal.graphlit.dev)
|
16
37
|
export GRAPHLIT_ORGANIZATION_ID=your_org_id
|
17
38
|
export GRAPHLIT_ENVIRONMENT_ID=your_env_id
|
18
39
|
export GRAPHLIT_JWT_SECRET=your_secret
|
19
40
|
```
|
20
41
|
|
21
42
|
```typescript
|
22
|
-
import { Graphlit } from "graphlit-client";
|
43
|
+
import { Graphlit, Types } from "graphlit-client";
|
23
44
|
|
24
|
-
const client = new Graphlit();
|
45
|
+
const client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
46
|
+
|
47
|
+
// First, create a specification (or use your project default)
|
48
|
+
const spec = await client.createSpecification({
|
49
|
+
name: "Assistant",
|
50
|
+
type: Types.SpecificationTypes.Completion,
|
51
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
52
|
+
openAI: {
|
53
|
+
model: Types.OpenAiModels.Gpt4O_128K
|
54
|
+
}
|
55
|
+
});
|
25
56
|
|
26
|
-
//
|
57
|
+
// Start chatting with AI
|
27
58
|
await client.streamAgent(
|
28
|
-
"
|
59
|
+
"Tell me a joke",
|
29
60
|
(event) => {
|
30
61
|
if (event.type === "message_update") {
|
31
62
|
console.log(event.message.message);
|
32
63
|
}
|
33
|
-
}
|
64
|
+
},
|
65
|
+
undefined, // conversationId (optional)
|
66
|
+
{ id: spec.createSpecification.id } // specification
|
34
67
|
);
|
35
68
|
```
|
36
69
|
|
37
|
-
## Prerequisites
|
38
|
-
|
39
|
-
Before you begin, ensure you have the following:
|
40
|
-
|
41
|
-
- Node.js installed on your system (recommended version 18.x or higher).
|
42
|
-
- An active account on the [Graphlit Platform](https://portal.graphlit.dev) with access to the API settings dashboard.
|
43
|
-
|
44
70
|
## Installation
|
45
71
|
|
46
|
-
### Basic Installation
|
47
|
-
|
48
|
-
To install the Graphlit Client with core functionality:
|
49
|
-
|
50
72
|
```bash
|
51
73
|
npm install graphlit-client
|
52
74
|
```
|
53
75
|
|
54
|
-
|
55
|
-
|
56
|
-
```bash
|
57
|
-
yarn add graphlit-client
|
58
|
-
```
|
76
|
+
### Want Real-time Streaming?
|
59
77
|
|
60
|
-
|
78
|
+
Install the LLM SDK for streaming responses:
|
61
79
|
|
62
|
-
|
80
|
+
```bash
|
81
|
+
# For OpenAI streaming
|
82
|
+
npm install openai
|
63
83
|
|
64
|
-
|
84
|
+
# For Anthropic streaming
|
85
|
+
npm install @anthropic-ai/sdk
|
65
86
|
|
66
|
-
|
67
|
-
npm install
|
87
|
+
# For Google streaming
|
88
|
+
npm install @google/generative-ai
|
68
89
|
```
|
69
90
|
|
70
|
-
|
71
|
-
|
72
|
-
```bash
|
73
|
-
# OpenAI streaming only
|
74
|
-
npm install graphlit-client openai
|
91
|
+
## Setting Up
|
75
92
|
|
76
|
-
|
77
|
-
npm install graphlit-client @anthropic-ai/sdk
|
93
|
+
Create a `.env` file in your project:
|
78
94
|
|
79
|
-
|
80
|
-
|
95
|
+
```env
|
96
|
+
GRAPHLIT_ORGANIZATION_ID=your_org_id
|
97
|
+
GRAPHLIT_ENVIRONMENT_ID=your_env_id
|
98
|
+
GRAPHLIT_JWT_SECRET=your_secret
|
99
|
+
|
100
|
+
# Optional: For streaming
|
101
|
+
OPENAI_API_KEY=your_key
|
102
|
+
ANTHROPIC_API_KEY=your_key
|
103
|
+
GOOGLE_API_KEY=your_key
|
81
104
|
```
|
82
105
|
|
83
|
-
|
84
|
-
|
85
|
-
## Configuration
|
106
|
+
## Basic Examples
|
86
107
|
|
87
|
-
|
108
|
+
### 1. Chat with AI
|
88
109
|
|
89
|
-
|
90
|
-
- `GRAPHLIT_ORGANIZATION_ID`: Your organization ID.
|
91
|
-
- `GRAPHLIT_JWT_SECRET`: Your JWT secret for signing the JWT token.
|
110
|
+
Simple conversation with streaming responses:
|
92
111
|
|
93
|
-
|
94
|
-
|
95
|
-
You can find these values in the API settings dashboard on the [Graphlit Platform](https://portal.graphlit.dev).
|
112
|
+
```typescript
|
113
|
+
import { Graphlit, Types } from "graphlit-client";
|
96
114
|
|
97
|
-
|
115
|
+
const client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
98
116
|
|
99
|
-
|
117
|
+
// Create a specification for the AI model
|
118
|
+
const spec = await client.createSpecification({
|
119
|
+
name: "Assistant",
|
120
|
+
type: Types.SpecificationTypes.Completion,
|
121
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
122
|
+
openAI: {
|
123
|
+
model: Types.OpenAiModels.Gpt4O_128K,
|
124
|
+
temperature: 0.7
|
125
|
+
}
|
126
|
+
});
|
100
127
|
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
128
|
+
// Chat with streaming
|
129
|
+
await client.streamAgent(
|
130
|
+
"What can you help me with?",
|
131
|
+
(event) => {
|
132
|
+
if (event.type === "message_update") {
|
133
|
+
// Print the AI's response as it streams
|
134
|
+
process.stdout.write(event.message.message);
|
135
|
+
}
|
136
|
+
},
|
137
|
+
undefined, // conversationId
|
138
|
+
{ id: spec.createSpecification.id } // specification
|
139
|
+
);
|
110
140
|
```
|
111
141
|
|
112
|
-
|
142
|
+
### 2. Ingest and Query Documents
|
113
143
|
|
114
|
-
|
144
|
+
Upload a PDF and ask questions about it:
|
115
145
|
|
116
146
|
```typescript
|
117
|
-
import { Graphlit } from "graphlit-client";
|
147
|
+
import { Graphlit, Types } from "graphlit-client";
|
118
148
|
|
119
|
-
const client = new Graphlit();
|
149
|
+
const client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
120
150
|
|
121
|
-
// Create a
|
122
|
-
const
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
filter: { name: { contains: "Document" } }
|
151
|
+
// Create a specification
|
152
|
+
const spec = await client.createSpecification({
|
153
|
+
name: "Document Q&A",
|
154
|
+
type: Types.SpecificationTypes.Completion,
|
155
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
156
|
+
openAI: {
|
157
|
+
model: Types.OpenAiModels.Gpt4O_128K
|
158
|
+
}
|
130
159
|
});
|
131
|
-
```
|
132
160
|
|
133
|
-
|
161
|
+
// Upload a PDF synchronously to ensure it's ready
|
162
|
+
const content = await client.ingestUri(
|
163
|
+
"https://arxiv.org/pdf/1706.03762.pdf", // Attention Is All You Need paper
|
164
|
+
"AI Research Paper", // name
|
165
|
+
undefined, // id
|
166
|
+
true // isSynchronous - waits for processing
|
167
|
+
);
|
134
168
|
|
135
|
-
|
169
|
+
console.log(`✅ Uploaded: ${content.ingestUri.id}`);
|
136
170
|
|
137
|
-
|
138
|
-
|
171
|
+
// Wait a moment for content to be fully indexed
|
172
|
+
await new Promise(resolve => setTimeout(resolve, 5000));
|
139
173
|
|
140
|
-
|
174
|
+
// Create a conversation that filters to this specific content
|
175
|
+
const conversation = await client.createConversation({
|
176
|
+
filter: { contents: [{ id: content.ingestUri.id }] }
|
177
|
+
});
|
141
178
|
|
142
|
-
//
|
179
|
+
// Ask questions about the PDF
|
143
180
|
await client.streamAgent(
|
144
|
-
"
|
145
|
-
(event
|
146
|
-
|
147
|
-
|
148
|
-
console.log(`Started conversation: ${event.conversationId}`);
|
149
|
-
break;
|
150
|
-
|
151
|
-
case "message_update":
|
152
|
-
// Complete message text - automatically accumulated
|
153
|
-
console.log(`Assistant: ${event.message.message}`);
|
154
|
-
break;
|
155
|
-
|
156
|
-
case "conversation_completed":
|
157
|
-
console.log(`Completed: ${event.message.message}`);
|
158
|
-
break;
|
159
|
-
|
160
|
-
case "error":
|
161
|
-
console.error(`Error: ${event.error.message}`);
|
162
|
-
break;
|
181
|
+
"What are the key innovations in this paper?",
|
182
|
+
(event) => {
|
183
|
+
if (event.type === "message_update") {
|
184
|
+
console.log(event.message.message);
|
163
185
|
}
|
164
|
-
}
|
186
|
+
},
|
187
|
+
conversation.createConversation.id, // conversationId with content filter
|
188
|
+
{ id: spec.createSpecification.id } // specification
|
165
189
|
);
|
166
190
|
```
|
167
191
|
|
168
|
-
###
|
192
|
+
### 3. Web Scraping
|
169
193
|
|
170
|
-
|
194
|
+
Extract content from websites:
|
171
195
|
|
172
196
|
```typescript
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
197
|
+
// Scrape a website (waits for processing to complete)
|
198
|
+
const webpage = await client.ingestUri(
|
199
|
+
"https://en.wikipedia.org/wiki/Artificial_intelligence", // uri
|
200
|
+
"AI Wikipedia Page", // name
|
201
|
+
undefined, // id
|
202
|
+
true // isSynchronous
|
203
|
+
);
|
204
|
+
|
205
|
+
// Wait for content to be indexed
|
206
|
+
await new Promise(resolve => setTimeout(resolve, 5000));
|
207
|
+
|
208
|
+
// Create a conversation filtered to this content
|
209
|
+
const conversation = await client.createConversation({
|
210
|
+
filter: { contents: [{ id: webpage.ingestUri.id }] }
|
211
|
+
});
|
212
|
+
|
213
|
+
// Ask about the specific content
|
214
|
+
const response = await client.promptAgent(
|
215
|
+
"Summarize the key points about AI from this Wikipedia page",
|
216
|
+
conversation.createConversation.id, // conversationId with filter
|
217
|
+
{ id: spec.createSpecification.id } // specification (create one as shown above)
|
180
218
|
);
|
181
219
|
|
182
|
-
console.log(
|
183
|
-
console.log(result.conversationId); // For continuing the conversation
|
220
|
+
console.log(response.message);
|
184
221
|
```
|
185
222
|
|
186
|
-
### Tool Calling
|
223
|
+
### 4. Tool Calling
|
187
224
|
|
188
|
-
|
225
|
+
Let AI call functions to get real-time data:
|
189
226
|
|
190
227
|
```typescript
|
191
|
-
|
192
|
-
|
228
|
+
import { Graphlit, Types } from "graphlit-client";
|
229
|
+
|
230
|
+
const client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
231
|
+
|
232
|
+
// Define a weather tool
|
233
|
+
const weatherTool: Types.ToolDefinitionInput = {
|
193
234
|
name: "get_weather",
|
194
|
-
description: "Get weather for a city",
|
235
|
+
description: "Get current weather for a city",
|
195
236
|
schema: JSON.stringify({
|
196
237
|
type: "object",
|
197
238
|
properties: {
|
@@ -199,544 +240,377 @@ const tools = [{
|
|
199
240
|
},
|
200
241
|
required: ["city"]
|
201
242
|
})
|
202
|
-
}
|
243
|
+
};
|
203
244
|
|
204
|
-
// Tool
|
245
|
+
// Tool implementation
|
205
246
|
const toolHandlers = {
|
206
247
|
get_weather: async (args: { city: string }) => {
|
207
|
-
//
|
208
|
-
return {
|
248
|
+
// Call your weather API here
|
249
|
+
return {
|
250
|
+
city: args.city,
|
251
|
+
temperature: 72,
|
252
|
+
condition: "sunny"
|
253
|
+
};
|
209
254
|
}
|
210
255
|
};
|
211
256
|
|
212
|
-
//
|
257
|
+
// Create a specification for tool calling
|
258
|
+
const spec = await client.createSpecification({
|
259
|
+
name: "Weather Assistant",
|
260
|
+
type: Types.SpecificationTypes.Completion,
|
261
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
262
|
+
openAI: {
|
263
|
+
model: Types.OpenAiModels.Gpt4O_128K
|
264
|
+
}
|
265
|
+
});
|
266
|
+
|
267
|
+
// Chat with tools
|
213
268
|
await client.streamAgent(
|
214
269
|
"What's the weather in San Francisco?",
|
215
|
-
(event
|
216
|
-
if (event.type === "tool_update") {
|
217
|
-
console.log(
|
218
|
-
|
219
|
-
|
220
|
-
}
|
221
|
-
if (event.error) {
|
222
|
-
console.error(`Tool error: ${event.error}`);
|
223
|
-
}
|
224
|
-
} else if (event.type === "conversation_completed") {
|
225
|
-
console.log(`Final: ${event.message.message}`);
|
270
|
+
(event) => {
|
271
|
+
if (event.type === "tool_update" && event.status === "completed") {
|
272
|
+
console.log(`🔧 Called ${event.toolCall.name}`);
|
273
|
+
} else if (event.type === "message_update") {
|
274
|
+
console.log(event.message.message);
|
226
275
|
}
|
227
276
|
},
|
228
277
|
undefined, // conversationId
|
229
|
-
{ id:
|
230
|
-
tools
|
231
|
-
toolHandlers
|
278
|
+
{ id: spec.createSpecification.id }, // specification
|
279
|
+
[weatherTool], // tools
|
280
|
+
toolHandlers // handlers
|
232
281
|
);
|
233
282
|
```
|
234
283
|
|
235
|
-
|
284
|
+
## Common Use Cases
|
236
285
|
|
237
|
-
|
286
|
+
### Build a Knowledge Base Assistant
|
238
287
|
|
239
|
-
|
240
|
-
import OpenAI from "openai";
|
241
|
-
import Anthropic from "@anthropic-ai/sdk";
|
242
|
-
|
243
|
-
// Configure custom clients
|
244
|
-
const openai = new OpenAI({
|
245
|
-
apiKey: "your-api-key",
|
246
|
-
baseURL: "https://your-proxy.com/v1" // Optional proxy
|
247
|
-
});
|
248
|
-
|
249
|
-
const anthropic = new Anthropic({
|
250
|
-
apiKey: "your-api-key",
|
251
|
-
baseURL: "https://your-proxy.com" // Optional proxy
|
252
|
-
});
|
253
|
-
|
254
|
-
// Set custom clients
|
255
|
-
client.setOpenAIClient(openai);
|
256
|
-
client.setAnthropicClient(anthropic);
|
257
|
-
|
258
|
-
// Now streaming will use your custom clients
|
259
|
-
await client.streamAgent("Hello!", (event) => {
|
260
|
-
// Your event handler
|
261
|
-
});
|
262
|
-
```
|
263
|
-
|
264
|
-
### Streaming Options
|
265
|
-
|
266
|
-
Configure streaming behavior with options:
|
288
|
+
Create an AI that answers questions from your documents:
|
267
289
|
|
268
290
|
```typescript
|
269
|
-
|
270
|
-
"Your prompt",
|
271
|
-
(event) => { /* handler */ },
|
272
|
-
undefined, // conversationId
|
273
|
-
{ id: "spec-id" }, // specification
|
274
|
-
undefined, // tools
|
275
|
-
undefined, // toolHandlers
|
276
|
-
{
|
277
|
-
maxToolRounds: 10, // Maximum tool calling rounds (default: 1000)
|
278
|
-
smoothingEnabled: true, // Enable smooth streaming (default: true)
|
279
|
-
chunkingStrategy: 'word', // 'character' | 'word' | 'sentence' (default: 'word')
|
280
|
-
smoothingDelay: 30, // Milliseconds between chunks (default: 30)
|
281
|
-
abortSignal: controller.signal // AbortController signal for cancellation
|
282
|
-
}
|
283
|
-
);
|
284
|
-
```
|
291
|
+
import { Graphlit, Types } from "graphlit-client";
|
285
292
|
|
286
|
-
|
293
|
+
class KnowledgeAssistant {
|
294
|
+
private client: Graphlit;
|
295
|
+
private conversationId?: string;
|
296
|
+
private specificationId?: string;
|
297
|
+
private contentIds: string[] = [];
|
287
298
|
|
288
|
-
|
299
|
+
constructor() {
|
300
|
+
this.client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
301
|
+
}
|
289
302
|
|
290
|
-
|
291
|
-
|
303
|
+
async initialize() {
|
304
|
+
// Create a specification for the assistant
|
305
|
+
const spec = await this.client.createSpecification({
|
306
|
+
name: "Knowledge Assistant",
|
307
|
+
type: Types.SpecificationTypes.Completion,
|
308
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
309
|
+
openAI: {
|
310
|
+
model: Types.OpenAiModels.Gpt4O_128K,
|
311
|
+
temperature: 0.7
|
312
|
+
}
|
313
|
+
});
|
314
|
+
this.specificationId = spec.createSpecification?.id;
|
315
|
+
}
|
292
316
|
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
317
|
+
async uploadDocuments(urls: string[]) {
|
318
|
+
console.log("📚 Uploading documents...");
|
319
|
+
|
320
|
+
for (const url of urls) {
|
321
|
+
const content = await this.client.ingestUri(
|
322
|
+
url, // uri
|
323
|
+
url.split('/').pop() || "Document", // name
|
324
|
+
undefined, // id
|
325
|
+
true // isSynchronous - wait for processing
|
326
|
+
);
|
327
|
+
this.contentIds.push(content.ingestUri.id);
|
299
328
|
}
|
329
|
+
|
330
|
+
console.log("✅ Documents uploaded!");
|
331
|
+
|
332
|
+
// Wait for content to be indexed
|
333
|
+
await new Promise(resolve => setTimeout(resolve, 5000));
|
300
334
|
}
|
301
|
-
);
|
302
335
|
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
336
|
+
async ask(question: string) {
|
337
|
+
// Create conversation with content filter if not exists
|
338
|
+
if (!this.conversationId && this.contentIds.length > 0) {
|
339
|
+
const conversation = await this.client.createConversation({
|
340
|
+
filter: { contents: this.contentIds.map(id => ({ id })) }
|
341
|
+
});
|
342
|
+
this.conversationId = conversation.createConversation?.id;
|
309
343
|
}
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
344
|
+
|
345
|
+
await this.client.streamAgent(
|
346
|
+
question,
|
347
|
+
(event) => {
|
348
|
+
if (event.type === "conversation_started" && !this.conversationId) {
|
349
|
+
this.conversationId = event.conversationId;
|
350
|
+
} else if (event.type === "message_update") {
|
351
|
+
process.stdout.write(event.message.message);
|
352
|
+
}
|
353
|
+
},
|
354
|
+
this.conversationId, // Maintains conversation context
|
355
|
+
{ id: this.specificationId! } // specification
|
356
|
+
);
|
357
|
+
}
|
358
|
+
}
|
314
359
|
|
315
|
-
|
360
|
+
// Usage
|
361
|
+
const assistant = new KnowledgeAssistant();
|
362
|
+
await assistant.initialize();
|
316
363
|
|
317
|
-
|
364
|
+
// Upload your documents
|
365
|
+
await assistant.uploadDocuments([
|
366
|
+
"https://arxiv.org/pdf/2103.15348.pdf",
|
367
|
+
"https://arxiv.org/pdf/1706.03762.pdf"
|
368
|
+
]);
|
318
369
|
|
319
|
-
|
320
|
-
await
|
321
|
-
|
322
|
-
(event) => {
|
323
|
-
if (event.type === "error") {
|
324
|
-
console.error(`Error: ${event.error.message}`);
|
325
|
-
console.log(`Recoverable: ${event.error.recoverable}`);
|
326
|
-
|
327
|
-
// Handle specific error types
|
328
|
-
if (event.error.code === "RATE_LIMIT") {
|
329
|
-
// Implement retry logic
|
330
|
-
}
|
331
|
-
}
|
332
|
-
}
|
333
|
-
);
|
370
|
+
// Ask questions
|
371
|
+
await assistant.ask("What are these papers about?");
|
372
|
+
await assistant.ask("How do they relate to each other?");
|
334
373
|
```
|
335
374
|
|
336
|
-
###
|
375
|
+
### Extract Data from Documents
|
337
376
|
|
338
|
-
|
377
|
+
Extract specific information from uploaded content:
|
339
378
|
|
340
379
|
```typescript
|
341
|
-
|
380
|
+
// Upload a document synchronously
|
381
|
+
const document = await client.ingestUri(
|
382
|
+
"https://example.com/document.pdf", // uri
|
383
|
+
"Document #12345", // name
|
384
|
+
undefined, // id
|
385
|
+
true // isSynchronous
|
386
|
+
);
|
342
387
|
|
343
|
-
//
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
undefined,
|
350
|
-
undefined,
|
351
|
-
|
352
|
-
undefined,
|
353
|
-
{ abortSignal: controller.signal }
|
388
|
+
// Wait for content to be indexed
|
389
|
+
await new Promise(resolve => setTimeout(resolve, 5000));
|
390
|
+
|
391
|
+
// Extract specific data
|
392
|
+
const extraction = await client.extractContents(
|
393
|
+
"Extract the key information from this document",
|
394
|
+
undefined, // tools
|
395
|
+
undefined, // specification
|
396
|
+
{ contents: [{ id: document.ingestUri.id }] } // filter
|
354
397
|
);
|
355
398
|
|
356
|
-
|
357
|
-
setTimeout(() => controller.abort(), 5000);
|
399
|
+
console.log("Extracted data:", extraction.extractContents);
|
358
400
|
```
|
359
401
|
|
360
|
-
|
402
|
+
### Summarize Multiple Documents
|
361
403
|
|
362
|
-
|
363
|
-
|
364
|
-
The agent can make multiple tool calls in a single response:
|
404
|
+
Create summaries across multiple files:
|
365
405
|
|
366
406
|
```typescript
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
{
|
380
|
-
name: "calculate",
|
381
|
-
description: "Perform calculations",
|
382
|
-
schema: JSON.stringify({
|
383
|
-
type: "object",
|
384
|
-
properties: {
|
385
|
-
expression: { type: "string" }
|
386
|
-
},
|
387
|
-
required: ["expression"]
|
388
|
-
})
|
389
|
-
}
|
390
|
-
];
|
391
|
-
|
392
|
-
const toolHandlers = {
|
393
|
-
search_web: async ({ query }) => {
|
394
|
-
// Implement web search
|
395
|
-
return { results: ["Result 1", "Result 2"] };
|
396
|
-
},
|
397
|
-
calculate: async ({ expression }) => {
|
398
|
-
// Implement calculation
|
399
|
-
return { result: eval(expression) }; // Use a proper math parser in production
|
400
|
-
}
|
401
|
-
};
|
407
|
+
// Upload multiple documents synchronously
|
408
|
+
const ids: string[] = [];
|
409
|
+
|
410
|
+
for (const url of documentUrls) {
|
411
|
+
const content = await client.ingestUri(
|
412
|
+
url, // uri
|
413
|
+
url.split('/').pop() || "Document", // name
|
414
|
+
undefined, // id
|
415
|
+
true // isSynchronous
|
416
|
+
);
|
417
|
+
ids.push(content.ingestUri.id);
|
418
|
+
}
|
402
419
|
|
403
|
-
//
|
404
|
-
await client.
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
toolHandlers,
|
411
|
-
{ maxToolRounds: 5 } // Allow multiple rounds of tool calls
|
420
|
+
// Generate a summary across all documents
|
421
|
+
const summary = await client.summarizeContents(
|
422
|
+
[{
|
423
|
+
type: Types.SummarizationTypes.Custom,
|
424
|
+
prompt: "Create an executive summary of these documents"
|
425
|
+
}], // summarizations
|
426
|
+
{ contents: ids.map(id => ({ id })) } // filter
|
412
427
|
);
|
413
|
-
```
|
414
428
|
|
415
|
-
|
429
|
+
console.log("Summary:", summary.summarizeContents);
|
430
|
+
```
|
416
431
|
|
417
|
-
|
432
|
+
### Processing Options
|
418
433
|
|
419
434
|
```typescript
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
// Or set custom timeout in AgentOptions
|
428
|
-
await client.promptAgent(
|
429
|
-
"Run the slow operation",
|
430
|
-
undefined,
|
431
|
-
{ id: specId },
|
432
|
-
tools,
|
433
|
-
toolHandlers,
|
434
|
-
{
|
435
|
-
timeout: 60000, // 60 second timeout for entire operation
|
436
|
-
maxToolRounds: 3
|
437
|
-
}
|
435
|
+
// Option 1: Synchronous processing (simpler)
|
436
|
+
const content = await client.ingestUri(
|
437
|
+
"https://example.com/large-document.pdf", // uri
|
438
|
+
undefined, // name
|
439
|
+
undefined, // id
|
440
|
+
true // isSynchronous
|
438
441
|
);
|
439
|
-
|
440
|
-
|
441
|
-
## Stream Event Reference
|
442
|
-
|
443
|
-
### Agent Stream Events
|
442
|
+
console.log("✅ Content ready!");
|
444
443
|
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
| `conversation_completed` | Streaming finished | `message` (final) |
|
451
|
-
| `error` | Error occurred | `error` object with `message`, `code?`, `recoverable` |
|
444
|
+
// Option 2: Asynchronous processing (for large files)
|
445
|
+
const content = await client.ingestUri(
|
446
|
+
"https://example.com/very-large-video.mp4" // uri
|
447
|
+
// isSynchronous defaults to false
|
448
|
+
);
|
452
449
|
|
453
|
-
|
450
|
+
// Check status later
|
451
|
+
let isReady = false;
|
452
|
+
while (!isReady) {
|
453
|
+
const status = await client.isContentDone(content.ingestUri.id);
|
454
|
+
isReady = status.isContentDone?.result || false;
|
455
|
+
|
456
|
+
if (!isReady) {
|
457
|
+
console.log("⏳ Still processing...");
|
458
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
459
|
+
}
|
460
|
+
}
|
461
|
+
console.log("✅ Content ready!");
|
462
|
+
```
|
454
463
|
|
455
|
-
|
456
|
-
- `executing` - Tool handler is running
|
457
|
-
- `completed` - Tool executed successfully
|
458
|
-
- `failed` - Tool execution failed
|
464
|
+
## Advanced Workflows
|
459
465
|
|
460
|
-
|
466
|
+
### Creating Workflows for Content Processing
|
461
467
|
|
462
|
-
|
468
|
+
Workflows automatically process content when ingested:
|
463
469
|
|
464
470
|
```typescript
|
465
|
-
|
466
|
-
function ChatComponent() {
|
467
|
-
const [message, setMessage] = useState("");
|
468
|
-
const [isLoading, setIsLoading] = useState(false);
|
469
|
-
|
470
|
-
const handleSend = async (prompt: string) => {
|
471
|
-
setIsLoading(true);
|
472
|
-
setMessage("");
|
473
|
-
|
474
|
-
await client.streamAgent(
|
475
|
-
prompt,
|
476
|
-
(event) => {
|
477
|
-
if (event.type === "message_update") {
|
478
|
-
setMessage(event.message.message);
|
479
|
-
} else if (event.type === "conversation_completed") {
|
480
|
-
setIsLoading(false);
|
481
|
-
} else if (event.type === "error") {
|
482
|
-
setMessage(`Error: ${event.error.message}`);
|
483
|
-
setIsLoading(false);
|
484
|
-
}
|
485
|
-
}
|
486
|
-
);
|
487
|
-
};
|
488
|
-
|
489
|
-
return (
|
490
|
-
<div>
|
491
|
-
<div>{message}</div>
|
492
|
-
<button onClick={() => handleSend("Hello!")} disabled={isLoading}>
|
493
|
-
Send
|
494
|
-
</button>
|
495
|
-
</div>
|
496
|
-
);
|
497
|
-
}
|
498
|
-
```
|
471
|
+
import { Graphlit, Types } from "graphlit-client";
|
499
472
|
|
500
|
-
|
473
|
+
const client = new Graphlit(); // Uses env vars: GRAPHLIT_ORGANIZATION_ID, GRAPHLIT_ENVIRONMENT_ID, GRAPHLIT_JWT_SECRET
|
501
474
|
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
type: Types.SpecificationTypes.Completion,
|
475
|
+
// Create specifications for AI models
|
476
|
+
const summarizationSpec = await client.createSpecification({
|
477
|
+
name: "Summarizer",
|
478
|
+
type: Types.SpecificationTypes.Summarization,
|
507
479
|
serviceType: Types.ModelServiceTypes.OpenAi,
|
508
480
|
openAI: {
|
509
|
-
model: Types.OpenAiModels.Gpt4O_128K
|
510
|
-
temperature: 0.7
|
481
|
+
model: Types.OpenAiModels.Gpt4O_128K
|
511
482
|
}
|
512
483
|
});
|
513
484
|
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
485
|
+
// Create a workflow that summarizes all content
|
486
|
+
const workflow = await client.createWorkflow({
|
487
|
+
name: "Document Intelligence",
|
488
|
+
preparation: {
|
489
|
+
summarizations: [{
|
490
|
+
type: Types.SummarizationTypes.Summary,
|
491
|
+
specification: { id: summarizationSpec.createSpecification.id }
|
492
|
+
}]
|
521
493
|
}
|
522
494
|
});
|
523
495
|
|
524
|
-
//
|
525
|
-
await client.
|
526
|
-
|
527
|
-
|
528
|
-
undefined,
|
529
|
-
{ id: gpt4Spec.createSpecification.id } // Use GPT-4
|
530
|
-
);
|
496
|
+
// Set workflow as default for project
|
497
|
+
await client.updateProject({
|
498
|
+
workflow: { id: workflow.createWorkflow.id }
|
499
|
+
});
|
531
500
|
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
undefined,
|
536
|
-
{ id: claudeSpec.createSpecification.id } // Use Claude
|
501
|
+
// Now all content will be automatically summarized
|
502
|
+
const content = await client.ingestUri(
|
503
|
+
"https://example.com/report.pdf" // uri
|
537
504
|
);
|
538
505
|
```
|
539
506
|
|
540
|
-
|
507
|
+
### Creating Specifications
|
541
508
|
|
542
|
-
|
509
|
+
Specifications configure how AI models behave:
|
543
510
|
|
544
511
|
```typescript
|
545
|
-
|
546
|
-
prompt,
|
547
|
-
(event) => {
|
548
|
-
if (event.type === "error") {
|
549
|
-
// Check if error is recoverable
|
550
|
-
if (event.error.recoverable) {
|
551
|
-
// Could retry or fallback
|
552
|
-
console.warn("Recoverable error:", event.error.message);
|
553
|
-
} else {
|
554
|
-
// Fatal error
|
555
|
-
console.error("Fatal error:", event.error.message);
|
556
|
-
}
|
557
|
-
}
|
558
|
-
}
|
559
|
-
);
|
560
|
-
```
|
561
|
-
|
562
|
-
### 2. Clean Up Resources
|
563
|
-
|
564
|
-
```typescript
|
565
|
-
// Always clean up conversations when done
|
566
|
-
let conversationId: string;
|
567
|
-
|
568
|
-
try {
|
569
|
-
await client.streamAgent(
|
570
|
-
prompt,
|
571
|
-
(event) => {
|
572
|
-
if (event.type === "conversation_started") {
|
573
|
-
conversationId = event.conversationId;
|
574
|
-
}
|
575
|
-
}
|
576
|
-
);
|
577
|
-
} finally {
|
578
|
-
if (conversationId) {
|
579
|
-
await client.deleteConversation(conversationId);
|
580
|
-
}
|
581
|
-
}
|
582
|
-
```
|
583
|
-
|
584
|
-
### 3. Tool Handler Best Practices
|
512
|
+
import { Graphlit, Types } from "graphlit-client";
|
585
513
|
|
586
|
-
|
587
|
-
const
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
expression: args.expression,
|
597
|
-
result: evaluateExpression(args.expression),
|
598
|
-
timestamp: new Date().toISOString()
|
599
|
-
};
|
600
|
-
},
|
601
|
-
|
602
|
-
// Handle errors gracefully
|
603
|
-
fetch_data: async (args) => {
|
604
|
-
try {
|
605
|
-
const data = await fetchFromAPI(args.url);
|
606
|
-
return { success: true, data };
|
607
|
-
} catch (error) {
|
608
|
-
return {
|
609
|
-
success: false,
|
610
|
-
error: error.message,
|
611
|
-
// Help the LLM understand what went wrong
|
612
|
-
suggestion: "The URL might be invalid or the service is down"
|
613
|
-
};
|
614
|
-
}
|
514
|
+
// Create a conversational AI specification
|
515
|
+
const conversationSpec = await client.createSpecification({
|
516
|
+
name: "Customer Support AI",
|
517
|
+
type: Types.SpecificationTypes.Completion,
|
518
|
+
serviceType: Types.ModelServiceTypes.OpenAi,
|
519
|
+
systemPrompt: "You are a helpful customer support assistant.",
|
520
|
+
openAI: {
|
521
|
+
model: Types.OpenAiModels.Gpt4O_128K,
|
522
|
+
temperature: 0.7,
|
523
|
+
completionTokenLimit: 2000
|
615
524
|
}
|
616
|
-
};
|
617
|
-
```
|
618
|
-
|
619
|
-
### 4. Optimize for Performance
|
620
|
-
|
621
|
-
```typescript
|
622
|
-
// Use appropriate chunking strategy for your use case
|
623
|
-
const options = {
|
624
|
-
// For code generation or technical content
|
625
|
-
chunkingStrategy: 'character' as const,
|
626
|
-
|
627
|
-
// For natural conversation (default)
|
628
|
-
chunkingStrategy: 'word' as const,
|
629
|
-
|
630
|
-
// For long-form content
|
631
|
-
chunkingStrategy: 'sentence' as const,
|
632
|
-
|
633
|
-
// Adjust smoothing delay for your UI
|
634
|
-
smoothingDelay: 20, // Faster updates
|
635
|
-
smoothingDelay: 50, // Smoother updates (default: 30)
|
636
|
-
};
|
637
|
-
```
|
638
|
-
|
639
|
-
## Migration Guide
|
640
|
-
|
641
|
-
If you're upgrading from `promptConversation` to `streamAgent` or `promptAgent`:
|
642
|
-
|
643
|
-
```typescript
|
644
|
-
// Before (v1.0)
|
645
|
-
const response = await client.promptConversation(
|
646
|
-
"Your prompt",
|
647
|
-
undefined,
|
648
|
-
{ id: specId }
|
649
|
-
);
|
650
|
-
console.log(response.promptConversation.message.message);
|
651
|
-
|
652
|
-
// After (v1.1) - Non-streaming
|
653
|
-
const result = await client.promptAgent(
|
654
|
-
"Your prompt",
|
655
|
-
undefined,
|
656
|
-
{ id: specId }
|
657
|
-
);
|
658
|
-
console.log(result.message);
|
525
|
+
});
|
659
526
|
|
660
|
-
//
|
527
|
+
// Use the specification in conversations
|
661
528
|
await client.streamAgent(
|
662
|
-
"
|
529
|
+
"How do I reset my password?",
|
663
530
|
(event) => {
|
664
|
-
if (event.type === "
|
531
|
+
if (event.type === "message_update") {
|
665
532
|
console.log(event.message.message);
|
666
533
|
}
|
667
534
|
},
|
668
535
|
undefined,
|
669
|
-
{ id:
|
536
|
+
{ id: conversationSpec.createSpecification.id }
|
670
537
|
);
|
671
538
|
```
|
672
539
|
|
673
|
-
##
|
674
|
-
|
675
|
-
### Common Issues
|
540
|
+
## API Reference
|
676
541
|
|
677
|
-
|
542
|
+
### Client Methods
|
678
543
|
|
679
544
|
```typescript
|
680
|
-
|
681
|
-
if (!client.supportsStreaming()) {
|
682
|
-
console.log("Streaming not supported - using fallback mode");
|
683
|
-
}
|
684
|
-
|
685
|
-
// Ensure LLM clients are properly configured
|
686
|
-
const hasNativeStreaming =
|
687
|
-
client.hasOpenAIClient() ||
|
688
|
-
client.hasAnthropicClient() ||
|
689
|
-
client.hasGoogleClient();
|
545
|
+
const client = new Graphlit(organizationId?, environmentId?, jwtSecret?);
|
690
546
|
```
|
691
547
|
|
692
|
-
####
|
548
|
+
#### Content Operations
|
549
|
+
- `ingestUri(uri, name?, id?, isSynchronous?, ...)` - Ingest content from URL
|
550
|
+
- `ingestText(text, name?, textType?, ...)` - Ingest text content directly
|
551
|
+
- `queryContents(filter?)` - Search and query content
|
552
|
+
- `getContent(id)` - Get content by ID
|
553
|
+
- `deleteContent(id)` - Delete content
|
554
|
+
- `extractContents(prompt, tools, specification?, filter?)` - Extract data from content
|
555
|
+
- `summarizeContents(summarizations, filter?)` - Summarize content
|
556
|
+
- `isContentDone(id)` - Check if content processing is complete
|
557
|
+
|
558
|
+
#### Conversation Operations
|
559
|
+
- `createConversation(input?)` - Create a new conversation
|
560
|
+
- `streamAgent(prompt, handler, ...)` - Stream AI responses
|
561
|
+
- `promptAgent(prompt, ...)` - Get AI response without streaming
|
562
|
+
- `deleteConversation(id)` - Delete conversation
|
563
|
+
|
564
|
+
#### Specification Operations
|
565
|
+
- `createSpecification(input)` - Create AI model configuration
|
566
|
+
- `querySpecifications(filter?)` - List specifications
|
567
|
+
- `deleteSpecification(id)` - Delete specification
|
568
|
+
|
569
|
+
#### Workflow Operations
|
570
|
+
- `createWorkflow(input)` - Create content processing workflow
|
571
|
+
- `queryWorkflows(filter?)` - List workflows
|
572
|
+
- `updateProject(input)` - Update project settings
|
573
|
+
|
574
|
+
### Event Types
|
693
575
|
|
694
576
|
```typescript
|
695
|
-
|
696
|
-
|
697
|
-
type: "
|
698
|
-
|
699
|
-
|
700
|
-
}
|
701
|
-
required: ["param"] // Don't forget required fields
|
702
|
-
};
|
703
|
-
|
704
|
-
// Tool names must match exactly
|
705
|
-
const tools = [{ name: "my_tool", /* ... */ }];
|
706
|
-
const toolHandlers = {
|
707
|
-
"my_tool": async (args) => { /* ... */ } // Name must match
|
708
|
-
};
|
577
|
+
type AgentStreamEvent =
|
578
|
+
| { type: "conversation_started"; conversationId: string }
|
579
|
+
| { type: "message_update"; message: { message: string } }
|
580
|
+
| { type: "tool_update"; toolCall: any; status: string }
|
581
|
+
| { type: "conversation_completed"; message: { message: string } }
|
582
|
+
| { type: "error"; error: { message: string; recoverable: boolean } }
|
709
583
|
```
|
710
584
|
|
711
|
-
|
585
|
+
## Testing & Examples
|
712
586
|
|
713
|
-
|
587
|
+
All examples in this README are tested and verified. See [`test/readme-simple.test.ts`](test/readme-simple.test.ts) for runnable versions of these examples.
|
588
|
+
|
589
|
+
To run the examples yourself:
|
714
590
|
|
715
591
|
```bash
|
716
|
-
|
717
|
-
|
592
|
+
# Clone the repository
|
593
|
+
git clone https://github.com/graphlit/graphlit-client-typescript.git
|
594
|
+
cd graphlit-client-typescript
|
718
595
|
|
719
|
-
|
596
|
+
# Install dependencies
|
597
|
+
npm install
|
720
598
|
|
721
|
-
|
599
|
+
# Set up your environment variables
|
600
|
+
cp .env.example .env
|
601
|
+
# Edit .env with your Graphlit credentials
|
722
602
|
|
723
|
-
|
724
|
-
|
725
|
-
Graphlit,
|
726
|
-
AgentStreamEvent, // For streaming events
|
727
|
-
AgentResult, // For promptAgent results
|
728
|
-
ToolHandler, // For tool handler functions
|
729
|
-
StreamAgentOptions // For streaming options
|
730
|
-
} from "graphlit-client";
|
731
|
-
|
732
|
-
// Also available if needed
|
733
|
-
import * as Types from "graphlit-client/generated/graphql-types";
|
603
|
+
# Run the examples
|
604
|
+
npm test test/readme-simple.test.ts
|
734
605
|
```
|
735
606
|
|
736
607
|
## Support
|
737
608
|
|
738
|
-
|
609
|
+
- 📖 **Documentation**: [https://docs.graphlit.dev/](https://docs.graphlit.dev/)
|
610
|
+
- 💬 **Discord Community**: [Join our Discord](https://discord.gg/ygFmfjy3Qx)
|
611
|
+
- 🐛 **Issues**: [GitHub Issues](https://github.com/graphlit/graphlit-client-typescript/issues)
|
612
|
+
- 📧 **Email**: support@graphlit.com
|
739
613
|
|
740
|
-
|
614
|
+
## License
|
741
615
|
|
742
|
-
|
616
|
+
MIT License - see LICENSE file for details.
|
package/dist/client.d.ts
CHANGED
@@ -1,11 +1,12 @@
|
|
1
|
-
|
1
|
+
export declare const ApolloClient: any, InMemoryCache: any, createHttpLink: any, ApolloLink: any;
|
2
|
+
import type { ApolloClient as ApolloClientType, NormalizedCacheObject } from "@apollo/client";
|
2
3
|
import * as Types from "./generated/graphql-types.js";
|
3
4
|
import { AgentOptions, AgentResult, StreamAgentOptions, ToolHandler } from "./types/agent.js";
|
4
5
|
import { AgentStreamEvent } from "./types/ui-events.js";
|
5
6
|
export type { AgentOptions, AgentResult, StreamAgentOptions, ToolCallResult, UsageInfo, AgentError, } from "./types/agent.js";
|
6
7
|
export type { AgentStreamEvent } from "./types/ui-events.js";
|
7
8
|
declare class Graphlit {
|
8
|
-
client:
|
9
|
+
client: ApolloClientType<NormalizedCacheObject> | undefined;
|
9
10
|
token: string | undefined;
|
10
11
|
private apiUri;
|
11
12
|
private organizationId;
|
package/dist/client.js
CHANGED
@@ -1,9 +1,15 @@
|
|
1
|
+
// Runtime import via createRequire
|
2
|
+
import { createRequire } from "node:module";
|
3
|
+
const require = createRequire(import.meta.url);
|
4
|
+
const apollo = require("@apollo/client");
|
5
|
+
// Runtime value import
|
6
|
+
const { ApolloError } = require("@apollo/client");
|
7
|
+
export const { ApolloClient, InMemoryCache, createHttpLink, ApolloLink } = apollo;
|
1
8
|
import * as jwt from "jsonwebtoken";
|
2
|
-
import { ApolloClient, InMemoryCache, createHttpLink, ApolloLink, ApolloError, } from "@apollo/client/core";
|
3
9
|
import * as Types from "./generated/graphql-types.js";
|
4
10
|
import * as Documents from "./generated/graphql-documents.js";
|
5
11
|
import * as dotenv from "dotenv";
|
6
|
-
import { getServiceType
|
12
|
+
import { getServiceType } from "./model-mapping.js";
|
7
13
|
import { UIEventAdapter } from "./streaming/ui-event-adapter.js";
|
8
14
|
import { formatMessagesForOpenAI, formatMessagesForAnthropic, formatMessagesForGoogle, } from "./streaming/llm-formatters.js";
|
9
15
|
import { streamWithOpenAI, streamWithAnthropic, streamWithGoogle, } from "./streaming/providers.js";
|
@@ -1724,7 +1730,8 @@ class Graphlit {
|
|
1724
1730
|
// Check for common truncation patterns
|
1725
1731
|
const lastChars = toolCall.arguments.slice(-20);
|
1726
1732
|
let isTruncated = false;
|
1727
|
-
if (!toolCall.arguments.includes(
|
1733
|
+
if (!toolCall.arguments.includes("}") ||
|
1734
|
+
!lastChars.includes("}")) {
|
1728
1735
|
console.error(`Possible truncation detected - arguments don't end with '}': ...${lastChars}`);
|
1729
1736
|
isTruncated = true;
|
1730
1737
|
}
|
@@ -1737,7 +1744,8 @@ class Graphlit {
|
|
1737
1744
|
const missingBraces = openBraces - closeBraces;
|
1738
1745
|
if (missingBraces > 0) {
|
1739
1746
|
// Check if we're mid-value (ends with number or boolean)
|
1740
|
-
if (fixedJson.match(/:\s*\d+$/) ||
|
1747
|
+
if (fixedJson.match(/:\s*\d+$/) ||
|
1748
|
+
fixedJson.match(/:\s*(true|false)$/)) {
|
1741
1749
|
// Complete the current property and close
|
1742
1750
|
fixedJson += ', "content": ""'; // Add empty content field
|
1743
1751
|
}
|
@@ -1747,7 +1755,8 @@ class Graphlit {
|
|
1747
1755
|
fixedJson += ', "content": ""';
|
1748
1756
|
}
|
1749
1757
|
// Add missing closing quote if the string ends with an unfinished string
|
1750
|
-
else if (fixedJson.endsWith('"') === false &&
|
1758
|
+
else if (fixedJson.endsWith('"') === false &&
|
1759
|
+
fixedJson.includes('"')) {
|
1751
1760
|
const lastQuoteIndex = fixedJson.lastIndexOf('"');
|
1752
1761
|
const afterLastQuote = fixedJson.slice(lastQuoteIndex + 1);
|
1753
1762
|
if (!afterLastQuote.includes('"')) {
|
@@ -1755,7 +1764,7 @@ class Graphlit {
|
|
1755
1764
|
}
|
1756
1765
|
}
|
1757
1766
|
// Add missing closing braces
|
1758
|
-
fixedJson +=
|
1767
|
+
fixedJson += "}".repeat(missingBraces);
|
1759
1768
|
console.log(`Attempting to fix truncated JSON by adding ${missingBraces} closing brace(s):`);
|
1760
1769
|
console.log(fixedJson);
|
1761
1770
|
try {
|
@@ -1771,7 +1780,7 @@ class Graphlit {
|
|
1771
1780
|
// If we couldn't parse or fix the JSON, log details and continue
|
1772
1781
|
if (!args) {
|
1773
1782
|
// Log position mentioned in error if available
|
1774
|
-
const errorMsg = parseError instanceof Error ? parseError.message :
|
1783
|
+
const errorMsg = parseError instanceof Error ? parseError.message : "";
|
1775
1784
|
const posMatch = errorMsg.match(/position (\d+)/);
|
1776
1785
|
if (posMatch) {
|
1777
1786
|
const pos = parseInt(posMatch[1]);
|
@@ -1781,7 +1790,7 @@ class Graphlit {
|
|
1781
1790
|
// Update UI with error - use StreamEvent error type
|
1782
1791
|
uiAdapter.handleEvent({
|
1783
1792
|
type: "error",
|
1784
|
-
error: `Tool ${toolCall.name} failed: Invalid JSON arguments: ${parseError instanceof Error ? parseError.message :
|
1793
|
+
error: `Tool ${toolCall.name} failed: Invalid JSON arguments: ${parseError instanceof Error ? parseError.message : "Unknown error"}`,
|
1785
1794
|
});
|
1786
1795
|
continue;
|
1787
1796
|
}
|
@@ -2058,7 +2067,8 @@ class Graphlit {
|
|
2058
2067
|
return result.data;
|
2059
2068
|
}
|
2060
2069
|
catch (error) {
|
2061
|
-
if (error instanceof ApolloError &&
|
2070
|
+
if (error instanceof ApolloError &&
|
2071
|
+
error.graphQLErrors.length > 0) {
|
2062
2072
|
const errorMessage = error.graphQLErrors
|
2063
2073
|
.map((err) => this.prettyPrintGraphQLError(err))
|
2064
2074
|
.join("\n");
|
@@ -2094,7 +2104,8 @@ class Graphlit {
|
|
2094
2104
|
return result.data;
|
2095
2105
|
}
|
2096
2106
|
catch (error) {
|
2097
|
-
if (error instanceof ApolloError &&
|
2107
|
+
if (error instanceof ApolloError &&
|
2108
|
+
error.graphQLErrors.length > 0) {
|
2098
2109
|
const errorMessage = error.graphQLErrors
|
2099
2110
|
.map((err) => this.prettyPrintGraphQLError(err))
|
2100
2111
|
.join("\n");
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "graphlit-client",
|
3
|
-
"version": "1.0.
|
3
|
+
"version": "1.0.20250611002",
|
4
4
|
"description": "Graphlit API Client for TypeScript",
|
5
5
|
"main": "dist/client.js",
|
6
6
|
"types": "dist/client.d.ts",
|
@@ -63,6 +63,7 @@
|
|
63
63
|
"@types/node": "^20.0.0",
|
64
64
|
"@vitest/coverage-v8": "^1.0.0",
|
65
65
|
"dotenv": "^16.5.0",
|
66
|
+
"react": "^19.1.0",
|
66
67
|
"typescript": "^5.8.2",
|
67
68
|
"vitest": "^1.0.0"
|
68
69
|
},
|