lumnisai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +21 -0
- package/README.md +479 -0
- package/dist/index.cjs +1110 -0
- package/dist/index.d.cts +1065 -0
- package/dist/index.d.mts +1064 -0
- package/dist/index.d.ts +1065 -0
- package/dist/index.mjs +1098 -0
- package/package.json +67 -0
package/LICENSE.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025-PRESENT Anthony Fu <https://github.com/antfu>
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
1
|
+
# Lumnis AI Node.js SDK
|
|
2
|
+
|
|
3
|
+
[![npm version][npm-version-src]][npm-version-href]
|
|
4
|
+
[![npm downloads][npm-downloads-src]][npm-downloads-href]
|
|
5
|
+
[![bundle][bundle-src]][bundle-href]
|
|
6
|
+
[![JSDocs][jsdocs-src]][jsdocs-href]
|
|
7
|
+
[![License][license-src]][license-href]
|
|
8
|
+
|
|
9
|
+
Official Node.js/TypeScript SDK for the Lumnis AI API. Build AI-powered applications with ease.
|
|
10
|
+
|
|
11
|
+
## Features
|
|
12
|
+
|
|
13
|
+
- 🚀 **Full API Coverage** - All 60+ endpoints across 9 resources
|
|
14
|
+
- 📦 **TypeScript First** - Complete type safety and autocompletion
|
|
15
|
+
- ⚡ **Modern Architecture** - Built with ES modules and async/await
|
|
16
|
+
- 📁 **File Management** - Upload, search, and manage files with semantic search
|
|
17
|
+
- 🔄 **Automatic Retries** - Smart retry logic with exponential backoff
|
|
18
|
+
- 🎯 **Idempotent Requests** - Built-in idempotency for safe retries
|
|
19
|
+
- 📊 **Response Polling** - Easy helpers for async response handling
|
|
20
|
+
- 🔐 **Secure by Default** - API key authentication with secure storage
|
|
21
|
+
- 🔌 **MCP Integration** - Full support for Model Context Protocol servers
|
|
22
|
+
- 🎨 **Advanced Agent Config** - Granular control over agent behavior
|
|
23
|
+
|
|
24
|
+
## Installation
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
npm install lumnisai
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
yarn add lumnisai
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pnpm add lumnisai
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Quick Start
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
import LumnisAI from 'lumnisai'
|
|
42
|
+
|
|
43
|
+
// Initialize the client
|
|
44
|
+
const client = new LumnisAI({
|
|
45
|
+
apiKey: process.env.LUMNIS_API_KEY!
|
|
46
|
+
})
|
|
47
|
+
|
|
48
|
+
// Or use named import
|
|
49
|
+
// import { LumnisClient } from 'lumnisai'
|
|
50
|
+
// const client = new LumnisClient({ apiKey: '...' })
|
|
51
|
+
|
|
52
|
+
// Or use environment variables (no options needed)
|
|
53
|
+
// Set LUMNISAI_API_KEY and optionally LUMNISAI_TENANT_ID
|
|
54
|
+
// const client = new LumnisAI()
|
|
55
|
+
|
|
56
|
+
// Create a simple response
|
|
57
|
+
const response = await client.createResponse('What is the meaning of life?')
|
|
58
|
+
console.log(response.outputText)
|
|
59
|
+
|
|
60
|
+
// Create and wait for response completion
|
|
61
|
+
const completedResponse = await client.createResponseAndWait(
|
|
62
|
+
'Explain quantum computing',
|
|
63
|
+
{
|
|
64
|
+
agentEffort: 'high',
|
|
65
|
+
responseFormat: {
|
|
66
|
+
type: 'object',
|
|
67
|
+
properties: {
|
|
68
|
+
explanation: { type: 'string' },
|
|
69
|
+
keyTerms: { type: 'array', items: { type: 'string' } }
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
)
|
|
74
|
+
console.log(completedResponse.structuredResponse)
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Core Features
|
|
78
|
+
|
|
79
|
+
### Creating AI Responses
|
|
80
|
+
|
|
81
|
+
```typescript
|
|
82
|
+
// Simple message
|
|
83
|
+
const response = await client.responses.create({
|
|
84
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
85
|
+
})
|
|
86
|
+
|
|
87
|
+
// With conversation history
|
|
88
|
+
const response = await client.responses.create({
|
|
89
|
+
threadId: 'existing-thread-id',
|
|
90
|
+
messages: [
|
|
91
|
+
{ role: 'system', content: 'You are a helpful assistant' },
|
|
92
|
+
{ role: 'user', content: 'What can you help me with?' }
|
|
93
|
+
],
|
|
94
|
+
agentEffort: 'medium',
|
|
95
|
+
costCapUsd: 0.50
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
// With structured output
|
|
99
|
+
const response = await client.responses.create({
|
|
100
|
+
messages: [{ role: 'user', content: 'List 5 programming languages' }],
|
|
101
|
+
responseFormat: {
|
|
102
|
+
type: 'object',
|
|
103
|
+
properties: {
|
|
104
|
+
languages: {
|
|
105
|
+
type: 'array',
|
|
106
|
+
items: { type: 'string' }
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
})
|
|
111
|
+
|
|
112
|
+
// With advanced agent configuration
|
|
113
|
+
const response = await client.responses.create({
|
|
114
|
+
messages: [{ role: 'user', content: 'Analyze this data' }],
|
|
115
|
+
agentConfig: {
|
|
116
|
+
planStrategy: 'llm_io',
|
|
117
|
+
plannerModelType: 'SMART_MODEL',
|
|
118
|
+
coordinatorModelType: 'REASONING_MODEL',
|
|
119
|
+
orchestratorModelType: 'SMART_MODEL',
|
|
120
|
+
// Model name overrides
|
|
121
|
+
plannerModelName: 'openai:gpt-4o',
|
|
122
|
+
coordinatorModelName: 'anthropic:claude-3-7-sonnet-20250219',
|
|
123
|
+
// Feature flags
|
|
124
|
+
useCognitiveTools: true,
|
|
125
|
+
enableTaskValidation: true,
|
|
126
|
+
generateComprehensiveOutput: false
|
|
127
|
+
}
|
|
128
|
+
})
|
|
129
|
+
|
|
130
|
+
// List responses with filters
|
|
131
|
+
const responses = await client.responses.list({
|
|
132
|
+
userId: 'user@example.com',
|
|
133
|
+
status: 'succeeded',
|
|
134
|
+
startDate: '2025-01-01',
|
|
135
|
+
endDate: '2025-01-31',
|
|
136
|
+
limit: 50,
|
|
137
|
+
offset: 0
|
|
138
|
+
})
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### Polling for Response Completion
|
|
142
|
+
|
|
143
|
+
```typescript
|
|
144
|
+
// Manual polling
|
|
145
|
+
const response = await client.responses.create({ messages })
|
|
146
|
+
while (response.status === 'in_progress') {
|
|
147
|
+
await new Promise(resolve => setTimeout(resolve, 1000))
|
|
148
|
+
response = await client.responses.get(response.responseId)
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Using the helper method
|
|
152
|
+
const completedResponse = await client.createResponseAndWait(
|
|
153
|
+
messages,
|
|
154
|
+
{ pollIntervalMs: 2000, maxWaitMs: 60000 }
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
// Long polling
|
|
158
|
+
const response = await client.responses.get(responseId, { wait: 30 })
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
### Managing Threads
|
|
162
|
+
|
|
163
|
+
```typescript
|
|
164
|
+
// List threads
|
|
165
|
+
const threads = await client.threads.list({
|
|
166
|
+
userId: 'user@example.com',
|
|
167
|
+
limit: 20
|
|
168
|
+
})
|
|
169
|
+
|
|
170
|
+
// Get thread with responses
|
|
171
|
+
const thread = await client.threads.get(threadId)
|
|
172
|
+
const responses = await client.threads.getResponses(threadId)
|
|
173
|
+
|
|
174
|
+
// Update thread title
|
|
175
|
+
await client.threads.update(threadId, {
|
|
176
|
+
title: 'Quantum Physics Discussion'
|
|
177
|
+
})
|
|
178
|
+
|
|
179
|
+
// Delete thread
|
|
180
|
+
await client.threads.delete(threadId)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### User Management
|
|
184
|
+
|
|
185
|
+
```typescript
|
|
186
|
+
// Create or get user
|
|
187
|
+
const user = await client.users.create({
|
|
188
|
+
email: 'user@example.com',
|
|
189
|
+
firstName: 'John',
|
|
190
|
+
lastName: 'Doe'
|
|
191
|
+
})
|
|
192
|
+
|
|
193
|
+
// List users with pagination
|
|
194
|
+
const users = await client.users.list({
|
|
195
|
+
page: 1,
|
|
196
|
+
pageSize: 50
|
|
197
|
+
})
|
|
198
|
+
|
|
199
|
+
// Get user by email or ID
|
|
200
|
+
const user = await client.users.get('user@example.com')
|
|
201
|
+
const user = await client.users.get('550e8400-e29b-41d4-a716-446655440000')
|
|
202
|
+
|
|
203
|
+
// Get user's responses and threads
|
|
204
|
+
const userResponses = await client.users.getResponses('user@example.com')
|
|
205
|
+
const userThreads = await client.users.getThreads('user@example.com')
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
### External API Keys (BYO Keys)
|
|
209
|
+
|
|
210
|
+
```typescript
|
|
211
|
+
// Store an external API key
|
|
212
|
+
await client.externalApiKeys.store({
|
|
213
|
+
provider: 'OPENAI_API_KEY',
|
|
214
|
+
apiKey: 'sk-...'
|
|
215
|
+
})
|
|
216
|
+
|
|
217
|
+
// List stored keys (metadata only)
|
|
218
|
+
const keys = await client.externalApiKeys.list()
|
|
219
|
+
|
|
220
|
+
// Set API key mode
|
|
221
|
+
await client.externalApiKeys.updateMode({ mode: 'byo_keys' })
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
### Model Preferences
|
|
225
|
+
|
|
226
|
+
```typescript
|
|
227
|
+
// Get current preferences
|
|
228
|
+
const prefs = await client.modelPreferences.get()
|
|
229
|
+
|
|
230
|
+
// Update specific model type
|
|
231
|
+
await client.modelPreferences.update('SMART_MODEL', {
|
|
232
|
+
modelType: 'SMART_MODEL',
|
|
233
|
+
provider: 'anthropic',
|
|
234
|
+
modelName: 'claude-3-opus'
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
// Bulk update
|
|
238
|
+
await client.modelPreferences.updateBulk({
|
|
239
|
+
preferences: {
|
|
240
|
+
SMART_MODEL: { provider: 'anthropic', modelName: 'claude-3-opus' },
|
|
241
|
+
FAST_MODEL: { provider: 'openai', modelName: 'gpt-4o-mini' }
|
|
242
|
+
}
|
|
243
|
+
})
|
|
244
|
+
|
|
245
|
+
// Check availability
|
|
246
|
+
const availability = await client.modelPreferences.checkAvailability([
|
|
247
|
+
{ modelType: 'SMART_MODEL', provider: 'openai', modelName: 'gpt-4o' }
|
|
248
|
+
])
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
### Integrations
|
|
252
|
+
|
|
253
|
+
```typescript
|
|
254
|
+
// Initiate OAuth connection
|
|
255
|
+
const { redirectUrl } = await client.integrations.initiateConnection({
|
|
256
|
+
userId: 'user@example.com',
|
|
257
|
+
appName: 'GITHUB',
|
|
258
|
+
redirectUrl: 'https://myapp.com/callback'
|
|
259
|
+
})
|
|
260
|
+
|
|
261
|
+
// Check connection status
|
|
262
|
+
const status = await client.integrations.getConnectionStatus(
|
|
263
|
+
'user@example.com',
|
|
264
|
+
'GITHUB'
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
// Get available tools
|
|
268
|
+
const { tools } = await client.integrations.getTools({
|
|
269
|
+
userId: 'user@example.com',
|
|
270
|
+
appFilter: ['GITHUB', 'SLACK']
|
|
271
|
+
})
|
|
272
|
+
|
|
273
|
+
// Disconnect app
|
|
274
|
+
await client.integrations.disconnect({
|
|
275
|
+
userId: 'user@example.com',
|
|
276
|
+
appName: 'GITHUB'
|
|
277
|
+
})
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
### MCP Servers
|
|
281
|
+
|
|
282
|
+
```typescript
|
|
283
|
+
// Test configuration before saving
|
|
284
|
+
const testResult = await client.mcpServers.testConfig({
|
|
285
|
+
transport: 'stdio',
|
|
286
|
+
command: 'python',
|
|
287
|
+
args: ['mcp_server.py'],
|
|
288
|
+
env: { API_KEY: 'secret' }
|
|
289
|
+
})
|
|
290
|
+
|
|
291
|
+
// Create MCP server configuration
|
|
292
|
+
const server = await client.mcpServers.create({
|
|
293
|
+
name: 'github-tools',
|
|
294
|
+
description: 'GitHub API tools',
|
|
295
|
+
transport: 'streamable_http',
|
|
296
|
+
scope: 'tenant',
|
|
297
|
+
url: 'https://github-mcp.example.com/api',
|
|
298
|
+
headers: {
|
|
299
|
+
Authorization: 'Bearer token'
|
|
300
|
+
}
|
|
301
|
+
})
|
|
302
|
+
|
|
303
|
+
// List servers
|
|
304
|
+
const servers = await client.mcpServers.list({
|
|
305
|
+
scope: 'all',
|
|
306
|
+
isActive: true
|
|
307
|
+
})
|
|
308
|
+
|
|
309
|
+
// Test existing server connection
|
|
310
|
+
const connectionTest = await client.mcpServers.testConnection(server.id)
|
|
311
|
+
```
|
|
312
|
+
|
|
313
|
+
### File Management
|
|
314
|
+
|
|
315
|
+
```typescript
|
|
316
|
+
// Upload a file
|
|
317
|
+
const uploadResult = await client.files.upload(file, {
|
|
318
|
+
scope: 'user',
|
|
319
|
+
userId: 'user@example.com',
|
|
320
|
+
tags: 'documentation,important',
|
|
321
|
+
duplicateHandling: 'suffix'
|
|
322
|
+
})
|
|
323
|
+
|
|
324
|
+
// Upload multiple files
|
|
325
|
+
const bulkResult = await client.files.bulkUpload([file1, file2, file3], {
|
|
326
|
+
scope: 'tenant',
|
|
327
|
+
tags: 'batch-upload'
|
|
328
|
+
})
|
|
329
|
+
|
|
330
|
+
// List files with filters
|
|
331
|
+
const files = await client.files.list({
|
|
332
|
+
scope: 'tenant',
|
|
333
|
+
fileType: 'pdf',
|
|
334
|
+
status: 'completed',
|
|
335
|
+
tags: 'important',
|
|
336
|
+
page: 1,
|
|
337
|
+
limit: 20
|
|
338
|
+
})
|
|
339
|
+
|
|
340
|
+
// Semantic search across files
|
|
341
|
+
const searchResults = await client.files.search({
|
|
342
|
+
query: 'machine learning algorithms',
|
|
343
|
+
limit: 10,
|
|
344
|
+
minScore: 0.7,
|
|
345
|
+
fileTypes: ['pdf', 'md'],
|
|
346
|
+
userId: 'user@example.com'
|
|
347
|
+
})
|
|
348
|
+
|
|
349
|
+
// Get file content
|
|
350
|
+
const content = await client.files.getContent(fileId, {
|
|
351
|
+
contentType: 'text',
|
|
352
|
+
startLine: 1,
|
|
353
|
+
endLine: 100,
|
|
354
|
+
userId: 'user@example.com'
|
|
355
|
+
})
|
|
356
|
+
|
|
357
|
+
// Check processing status
|
|
358
|
+
const status = await client.files.getStatus(fileId)
|
|
359
|
+
console.log(`Progress: ${status.progressPercentage}%`)
|
|
360
|
+
|
|
361
|
+
// Delete files
|
|
362
|
+
await client.files.delete(fileId, { hardDelete: true })
|
|
363
|
+
|
|
364
|
+
// Bulk delete
|
|
365
|
+
await client.files.bulkDelete({
|
|
366
|
+
fileIds: ['id1', 'id2', 'id3']
|
|
367
|
+
}, { hardDelete: true })
|
|
368
|
+
```
|
|
369
|
+
|
|
370
|
+
## Error Handling
|
|
371
|
+
|
|
372
|
+
The SDK provides typed error classes for different scenarios:
|
|
373
|
+
|
|
374
|
+
```typescript
|
|
375
|
+
import {
|
|
376
|
+
AuthenticationError,
|
|
377
|
+
NotFoundError,
|
|
378
|
+
RateLimitError,
|
|
379
|
+
ValidationError
|
|
380
|
+
} from 'lumnisai'
|
|
381
|
+
|
|
382
|
+
try {
|
|
383
|
+
await client.responses.create({ messages })
|
|
384
|
+
}
|
|
385
|
+
catch (error) {
|
|
386
|
+
if (error instanceof AuthenticationError) {
|
|
387
|
+
console.error('Invalid API key')
|
|
388
|
+
}
|
|
389
|
+
else if (error instanceof RateLimitError) {
|
|
390
|
+
console.error(`Rate limited. Retry after ${error.retryAfter} seconds`)
|
|
391
|
+
}
|
|
392
|
+
else if (error instanceof ValidationError) {
|
|
393
|
+
console.error('Invalid request:', error.details)
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
```
|
|
397
|
+
|
|
398
|
+
## Advanced Configuration
|
|
399
|
+
|
|
400
|
+
```typescript
|
|
401
|
+
const client = new LumnisAI({
|
|
402
|
+
apiKey: process.env.LUMNIS_API_KEY!,
|
|
403
|
+
baseUrl: 'https://custom.api.url/v1', // Custom API endpoint
|
|
404
|
+
timeoutMs: 60000, // 60 second timeout
|
|
405
|
+
maxRetries: 3 // Retry up to 3 times
|
|
406
|
+
})
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
## Environment Variables
|
|
410
|
+
|
|
411
|
+
The SDK supports configuration via environment variables:
|
|
412
|
+
|
|
413
|
+
- `LUMNISAI_API_KEY` - Your API key (if not passed to constructor)
|
|
414
|
+
- `LUMNISAI_TENANT_ID` - Your tenant ID (optional)
|
|
415
|
+
- `LUMNISAI_BASE_URL` - Custom API base URL (optional)
|
|
416
|
+
|
|
417
|
+
## API Resources
|
|
418
|
+
|
|
419
|
+
The SDK provides access to all Lumnis AI API resources:
|
|
420
|
+
|
|
421
|
+
| Resource | Description | Endpoints |
|
|
422
|
+
|----------|-------------|-----------|
|
|
423
|
+
| `client.responses` | AI response generation and management | 5 |
|
|
424
|
+
| `client.threads` | Conversation thread management | 6 |
|
|
425
|
+
| `client.users` | User management within tenant | 7 |
|
|
426
|
+
| `client.files` | File upload, search, and management | 15 |
|
|
427
|
+
| `client.integrations` | OAuth integrations (GitHub, Slack, etc.) | 10 |
|
|
428
|
+
| `client.mcpServers` | Model Context Protocol server management | 8 |
|
|
429
|
+
| `client.modelPreferences` | Configure preferred AI models | 5 |
|
|
430
|
+
| `client.externalApiKeys` | Manage external API keys (BYO keys) | 6 |
|
|
431
|
+
| `client.tenantInfo` | Read tenant information | 1 |
|
|
432
|
+
|
|
433
|
+
**Total: 63 endpoints** with full TypeScript support.
|
|
434
|
+
|
|
435
|
+
## TypeScript Support
|
|
436
|
+
|
|
437
|
+
The SDK is written in TypeScript and provides comprehensive type definitions:
|
|
438
|
+
|
|
439
|
+
```typescript
|
|
440
|
+
import type {
|
|
441
|
+
AgentConfig,
|
|
442
|
+
AgentEffort,
|
|
443
|
+
FileMetadata,
|
|
444
|
+
FileScope,
|
|
445
|
+
Message,
|
|
446
|
+
ModelType,
|
|
447
|
+
ResponseObject,
|
|
448
|
+
ThreadObject,
|
|
449
|
+
UserResponse
|
|
450
|
+
} from 'lumnisai'
|
|
451
|
+
```
|
|
452
|
+
|
|
453
|
+
## Best Practices
|
|
454
|
+
|
|
455
|
+
1. **Use Idempotency Keys** - The SDK automatically adds idempotency keys to non-GET requests
|
|
456
|
+
2. **Handle Rate Limits** - Implement exponential backoff when receiving 429 errors
|
|
457
|
+
3. **Poll Efficiently** - Use long polling with the `wait` parameter for real-time updates
|
|
458
|
+
4. **Scope to Users** - Use user-specific operations for multi-tenant applications
|
|
459
|
+
5. **Manage Costs** - Set `costCapUsd` to control spending on expensive operations
|
|
460
|
+
6. **Tag Your Files** - Use tags for better file organization and filtering
|
|
461
|
+
7. **Monitor Processing** - Check file processing status with `files.getStatus()`
|
|
462
|
+
8. **Use Semantic Search** - Leverage `files.search()` for powerful content discovery
|
|
463
|
+
|
|
464
|
+
## License
|
|
465
|
+
|
|
466
|
+
[MIT](./LICENSE) License © Lumnis AI
|
|
467
|
+
|
|
468
|
+
<!-- Badges -->
|
|
469
|
+
|
|
470
|
+
[npm-version-src]: https://img.shields.io/npm/v/lumnisai?style=flat&colorA=080f12&colorB=1fa669
|
|
471
|
+
[npm-version-href]: https://npmjs.com/package/lumnisai
|
|
472
|
+
[npm-downloads-src]: https://img.shields.io/npm/dm/lumnisai?style=flat&colorA=080f12&colorB=1fa669
|
|
473
|
+
[npm-downloads-href]: https://npmjs.com/package/lumnisai
|
|
474
|
+
[bundle-src]: https://img.shields.io/bundlephobia/minzip/lumnisai?style=flat&colorA=080f12&colorB=1fa669&label=minzip
|
|
475
|
+
[bundle-href]: https://bundlephobia.com/result?p=lumnisai
|
|
476
|
+
[license-src]: https://img.shields.io/github/license/Lumnis-AI/lumnisai-node.svg?style=flat&colorA=080f12&colorB=1fa669
|
|
477
|
+
[license-href]: https://github.com/Lumnis-AI/lumnisai-node/blob/main/LICENSE
|
|
478
|
+
[jsdocs-src]: https://img.shields.io/badge/jsdocs-reference-080f12?style=flat&colorA=080f12&colorB=1fa669
|
|
479
|
+
[jsdocs-href]: https://www.jsdocs.io/package/lumnisai
|