@llamaindex/llama-cloud 1.6.0 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +55 -0
- package/README.md +61 -357
- package/client.d.mts +6 -3
- package/client.d.mts.map +1 -1
- package/client.d.ts +6 -3
- package/client.d.ts.map +1 -1
- package/client.js +11 -8
- package/client.js.map +1 -1
- package/client.mjs +11 -8
- package/client.mjs.map +1 -1
- package/internal/tslib.js +17 -17
- package/internal/utils/query.d.mts +2 -0
- package/internal/utils/query.d.mts.map +1 -0
- package/internal/utils/query.d.ts +2 -0
- package/internal/utils/query.d.ts.map +1 -0
- package/internal/utils/query.js +10 -0
- package/internal/utils/query.js.map +1 -0
- package/internal/utils/query.mjs +6 -0
- package/internal/utils/query.mjs.map +1 -0
- package/internal/utils.d.mts +1 -0
- package/internal/utils.d.ts +1 -0
- package/internal/utils.js +1 -0
- package/internal/utils.js.map +1 -1
- package/internal/utils.mjs +1 -0
- package/package.json +1 -1
- package/resources/beta/sheets.d.mts +6 -0
- package/resources/beta/sheets.d.mts.map +1 -1
- package/resources/beta/sheets.d.ts +6 -0
- package/resources/beta/sheets.d.ts.map +1 -1
- package/resources/beta/split.d.mts +5 -3
- package/resources/beta/split.d.mts.map +1 -1
- package/resources/beta/split.d.ts +5 -3
- package/resources/beta/split.d.ts.map +1 -1
- package/resources/classifier/jobs.d.mts +8 -0
- package/resources/classifier/jobs.d.mts.map +1 -1
- package/resources/classifier/jobs.d.ts +8 -0
- package/resources/classifier/jobs.d.ts.map +1 -1
- package/resources/classifier/jobs.js +8 -0
- package/resources/classifier/jobs.js.map +1 -1
- package/resources/classifier/jobs.mjs +8 -0
- package/resources/classifier/jobs.mjs.map +1 -1
- package/resources/classify.d.mts +326 -0
- package/resources/classify.d.mts.map +1 -0
- package/resources/classify.d.ts +326 -0
- package/resources/classify.d.ts.map +1 -0
- package/resources/classify.js +37 -0
- package/resources/classify.js.map +1 -0
- package/resources/classify.mjs +33 -0
- package/resources/classify.mjs.map +1 -0
- package/resources/index.d.mts +1 -0
- package/resources/index.d.mts.map +1 -1
- package/resources/index.d.ts +1 -0
- package/resources/index.d.ts.map +1 -1
- package/resources/index.js +3 -1
- package/resources/index.js.map +1 -1
- package/resources/index.mjs +1 -0
- package/resources/index.mjs.map +1 -1
- package/resources/parsing.d.mts +52 -2
- package/resources/parsing.d.mts.map +1 -1
- package/resources/parsing.d.ts +52 -2
- package/resources/parsing.d.ts.map +1 -1
- package/resources/pipelines/files.d.mts +5 -6
- package/resources/pipelines/files.d.mts.map +1 -1
- package/resources/pipelines/files.d.ts +5 -6
- package/resources/pipelines/files.d.ts.map +1 -1
- package/resources/pipelines/files.js +1 -6
- package/resources/pipelines/files.js.map +1 -1
- package/resources/pipelines/files.mjs +1 -6
- package/resources/pipelines/files.mjs.map +1 -1
- package/src/client.ts +41 -12
- package/src/internal/utils/query.ts +7 -0
- package/src/internal/utils.ts +1 -0
- package/src/resources/beta/sheets.ts +7 -0
- package/src/resources/beta/split.ts +5 -3
- package/src/resources/classifier/jobs.ts +8 -0
- package/src/resources/classify.ts +433 -0
- package/src/resources/index.ts +13 -0
- package/src/resources/parsing.ts +73 -0
- package/src/resources/pipelines/files.ts +6 -6
- package/src/version.ts +1 -1
- package/version.d.mts +1 -1
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/version.mjs +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,60 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 1.8.0 (2026-03-05)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v1.7.0...v1.8.0](https://github.com/run-llama/llama-cloud-ts/compare/v1.7.0...v1.8.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** add better deprecation messages ([249c29c](https://github.com/run-llama/llama-cloud-ts/commit/249c29c7eae0343136d84180305dfa4c9c833200))
|
|
10
|
+
* **api:** Add Classify V2 API ([466e946](https://github.com/run-llama/llama-cloud-ts/commit/466e946aa23b0d00ede11608a3d1b136560ed9bc))
|
|
11
|
+
* **api:** api update ([fffb54f](https://github.com/run-llama/llama-cloud-ts/commit/fffb54f32b7a4e31cd3631d8a150e6aecd0509c4))
|
|
12
|
+
* **api:** better deprecation config ([222f946](https://github.com/run-llama/llama-cloud-ts/commit/222f946dda7d71c29788c64c58f89d70e2c84800))
|
|
13
|
+
* support LLAMA_PARSE_API_KEY as fallback env var for authentication ([756b8de](https://github.com/run-llama/llama-cloud-ts/commit/756b8dedcf354cb28a981bf341eb0a442b1d39e5))
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
### Bug Fixes
|
|
17
|
+
|
|
18
|
+
* fix request delays for retrying to be more respectful of high requested delays ([b191726](https://github.com/run-llama/llama-cloud-ts/commit/b191726c37e8eaf7870151a4e6cdebc1f27294c8))
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
### Chores
|
|
22
|
+
|
|
23
|
+
* **internal:** use x-stainless-mcp-client-envs header for MCP remote code tool calls ([43a1bf2](https://github.com/run-llama/llama-cloud-ts/commit/43a1bf2fe8d25742442815b42e3b22305b89a33b))
|
|
24
|
+
|
|
25
|
+
## 1.7.0 (2026-03-03)
|
|
26
|
+
|
|
27
|
+
Full Changelog: [v1.6.0...v1.7.0](https://github.com/run-llama/llama-cloud-ts/compare/v1.6.0...v1.7.0)
|
|
28
|
+
|
|
29
|
+
### Features
|
|
30
|
+
|
|
31
|
+
* **api:** api update ([1118a71](https://github.com/run-llama/llama-cloud-ts/commit/1118a71e1a81b008a0f010171055363fff91d029))
|
|
32
|
+
* **api:** api update ([3e86820](https://github.com/run-llama/llama-cloud-ts/commit/3e868203b1802b41fab57d8c23a8b929c7a31abe))
|
|
33
|
+
* **api:** api update ([e53cdc7](https://github.com/run-llama/llama-cloud-ts/commit/e53cdc779e95a2b7a3cfb0e1cff5603f6dfbd8ec))
|
|
34
|
+
* **mcp:** add an option to disable code tool ([59a4382](https://github.com/run-llama/llama-cloud-ts/commit/59a43823a951cf488f29da4af3136a71eec9d437))
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
### Bug Fixes
|
|
38
|
+
|
|
39
|
+
* **docs/contributing:** correct pnpm link command ([8ebcd1e](https://github.com/run-llama/llama-cloud-ts/commit/8ebcd1eb8a1276b5d774d4a7c35a99dc58de767e))
|
|
40
|
+
* **mcp:** initialize SDK lazily to avoid failing the connection on init errors ([586ef99](https://github.com/run-llama/llama-cloud-ts/commit/586ef99cc38543cae92003a6ad3c076b97b5e15a))
|
|
41
|
+
* **mcp:** update prompt ([51ff157](https://github.com/run-llama/llama-cloud-ts/commit/51ff1578bb202b50c9df2d900ae868f918bec825))
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
### Chores
|
|
45
|
+
|
|
46
|
+
* **internal:** cache fetch instruction calls in MCP server ([1910716](https://github.com/run-llama/llama-cloud-ts/commit/1910716b6965286951b05e2cbae6839117df7d81))
|
|
47
|
+
* **internal:** fix MCP Dockerfiles so they can be built without buildkit ([c71507a](https://github.com/run-llama/llama-cloud-ts/commit/c71507adf02a899adb554e0feadfa161477253c9))
|
|
48
|
+
* **internal:** fix MCP Dockerfiles so they can be built without buildkit ([8cafa3b](https://github.com/run-llama/llama-cloud-ts/commit/8cafa3b5ebdcf3cde636edacb5a8fe81c1537df7))
|
|
49
|
+
* **internal:** make MCP code execution location configurable via a flag ([753240c](https://github.com/run-llama/llama-cloud-ts/commit/753240c9d3e0e813ca22b999076cd86bcb923107))
|
|
50
|
+
* **internal:** move stringifyQuery implementation to internal function ([c5931c5](https://github.com/run-llama/llama-cloud-ts/commit/c5931c55cd5e961173c4ffbf7485496bc6280579))
|
|
51
|
+
* **internal:** remove mock server code ([28fd07f](https://github.com/run-llama/llama-cloud-ts/commit/28fd07f362276fbda68955d0ef27ea3e53f2380e))
|
|
52
|
+
* **internal:** switch MCP servers to use pino for logging ([f4b2326](https://github.com/run-llama/llama-cloud-ts/commit/f4b232620c447f2043b3ccc4a7eda0799f6e38dc))
|
|
53
|
+
* **internal:** upgrade @modelcontextprotocol/sdk and hono ([4161193](https://github.com/run-llama/llama-cloud-ts/commit/4161193bf554873976f442c0f62511af9a2dfbca))
|
|
54
|
+
* **mcp-server:** return access instructions for 404 without API key ([fe1526d](https://github.com/run-llama/llama-cloud-ts/commit/fe1526d4d9e09feb074affe5c2690bc0ca0167d8))
|
|
55
|
+
* **mcp:** correctly update version in sync with sdk ([89f6113](https://github.com/run-llama/llama-cloud-ts/commit/89f6113e42525fd8533b18e58a77d8b859b85d39))
|
|
56
|
+
* update mock server docs ([9536239](https://github.com/run-llama/llama-cloud-ts/commit/9536239d4291dac002823d687ba74055ed7e175d))
|
|
57
|
+
|
|
3
58
|
## 1.6.0 (2026-02-18)
|
|
4
59
|
|
|
5
60
|
Full Changelog: [v1.5.0...v1.6.0](https://github.com/run-llama/llama-cloud-ts/compare/v1.5.0...v1.6.0)
|
package/README.md
CHANGED
|
@@ -1,21 +1,22 @@
|
|
|
1
|
-
# Llama Cloud TypeScript
|
|
1
|
+
# Llama Cloud TypeScript SDK
|
|
2
2
|
|
|
3
3
|
[>)](https://npmjs.org/package/@llamaindex/llama-cloud) 
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
The official TypeScript SDK for [LlamaParse](https://cloud.llamaindex.ai) - the enterprise platform for agentic OCR and document processing.
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
With this SDK, create powerful workflows across many features:
|
|
8
8
|
|
|
9
|
-
|
|
9
|
+
- **Parse** - Agentic OCR and parsing for 130+ formats
|
|
10
|
+
- **Extract** - Structured data extraction with custom schemas
|
|
11
|
+
- **Classify** - Document categorization with natural-language rules
|
|
12
|
+
- **Agents** - Deploy document agents as APIs
|
|
13
|
+
- **Index** - Document ingestion and embedding for RAG
|
|
10
14
|
|
|
11
|
-
##
|
|
12
|
-
|
|
13
|
-
Use the Llama Cloud MCP Server to enable AI assistants to interact with this API, allowing them to explore endpoints, make test requests, and use documentation to help integrate this SDK into your application.
|
|
14
|
-
|
|
15
|
-
[](https://cursor.com/en-US/install-mcp?name=%40llamaindex%2Fllama-cloud-mcp&config=eyJuYW1lIjoiQGxsYW1haW5kZXgvbGxhbWEtY2xvdWQtbWNwIiwidHJhbnNwb3J0IjoiaHR0cCIsInVybCI6Imh0dHBzOi8vbGxhbWFjbG91ZC1wcm9kLnN0bG1jcC5jb20iLCJoZWFkZXJzIjp7IngtbGxhbWEtY2xvdWQtYXBpLWtleSI6Ik15IEFQSSBLZXkifX0)
|
|
16
|
-
[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22%40llamaindex%2Fllama-cloud-mcp%22%2C%22type%22%3A%22http%22%2C%22url%22%3A%22https%3A%2F%2Fllamacloud-prod.stlmcp.com%22%2C%22headers%22%3A%7B%22x-llama-cloud-api-key%22%3A%22My%20API%20Key%22%7D%7D)
|
|
15
|
+
## Documentation
|
|
17
16
|
|
|
18
|
-
|
|
17
|
+
- [Get an API Key](https://cloud.llamaindex.ai)
|
|
18
|
+
- [Getting Started Guide](https://developers.llamaindex.ai/typescript/cloud/)
|
|
19
|
+
- [Full API Reference](https://developers.api.llamaindex.ai/api/typescript)
|
|
19
20
|
|
|
20
21
|
## Installation
|
|
21
22
|
|
|
@@ -23,103 +24,66 @@ Use the Llama Cloud MCP Server to enable AI assistants to interact with this API
|
|
|
23
24
|
npm install @llamaindex/llama-cloud
|
|
24
25
|
```
|
|
25
26
|
|
|
26
|
-
##
|
|
27
|
+
## Quick Start
|
|
27
28
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
<!-- prettier-ignore -->
|
|
31
|
-
```js
|
|
29
|
+
```ts
|
|
32
30
|
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
33
31
|
|
|
34
32
|
const client = new LlamaCloud({
|
|
35
33
|
apiKey: process.env['LLAMA_CLOUD_API_KEY'], // This is the default and can be omitted
|
|
36
34
|
});
|
|
37
35
|
|
|
38
|
-
|
|
36
|
+
// Parse a document
|
|
37
|
+
const job = await client.parsing.create({
|
|
39
38
|
tier: 'agentic',
|
|
40
39
|
version: 'latest',
|
|
41
|
-
file_id: '
|
|
40
|
+
file_id: 'your-file-id',
|
|
42
41
|
});
|
|
43
42
|
|
|
44
|
-
console.log(
|
|
43
|
+
console.log(job.id);
|
|
45
44
|
```
|
|
46
45
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
This library includes TypeScript definitions for all request params and response fields. You may import and use them like so:
|
|
50
|
-
|
|
51
|
-
<!-- prettier-ignore -->
|
|
52
|
-
```ts
|
|
53
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
54
|
-
|
|
55
|
-
const client = new LlamaCloud({
|
|
56
|
-
apiKey: process.env['LLAMA_CLOUD_API_KEY'], // This is the default and can be omitted
|
|
57
|
-
});
|
|
58
|
-
|
|
59
|
-
const params: LlamaCloud.PipelineListParams = { project_id: 'my-project-id' };
|
|
60
|
-
const pipelines: LlamaCloud.PipelineListResponse = await client.pipelines.list(params);
|
|
61
|
-
```
|
|
62
|
-
|
|
63
|
-
Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors.
|
|
64
|
-
|
|
65
|
-
## File uploads
|
|
66
|
-
|
|
67
|
-
Request parameters that correspond to file uploads can be passed in many different forms:
|
|
68
|
-
|
|
69
|
-
- `File` (or an object with the same structure)
|
|
70
|
-
- a `fetch` `Response` (or an object with the same structure)
|
|
71
|
-
- an `fs.ReadStream`
|
|
72
|
-
- the return value of our `toFile` helper
|
|
46
|
+
## File Uploads
|
|
73
47
|
|
|
74
48
|
```ts
|
|
75
49
|
import fs from 'fs';
|
|
76
|
-
import LlamaCloud
|
|
50
|
+
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
77
51
|
|
|
78
52
|
const client = new LlamaCloud();
|
|
79
53
|
|
|
80
|
-
//
|
|
81
|
-
await client.files.create({ file: fs.createReadStream('/path/to/file'), purpose: 'purpose' });
|
|
82
|
-
|
|
83
|
-
// Or if you have the web `File` API you can pass a `File` instance:
|
|
84
|
-
await client.files.create({ file: new File(['my bytes'], 'file'), purpose: 'purpose' });
|
|
85
|
-
|
|
86
|
-
// You can also pass a `fetch` `Response`:
|
|
87
|
-
await client.files.create({ file: await fetch('https://somesite/file'), purpose: 'purpose' });
|
|
88
|
-
|
|
89
|
-
// Finally, if none of the above are convenient, you can use our `toFile` helper:
|
|
54
|
+
// Upload using a file stream
|
|
90
55
|
await client.files.create({
|
|
91
|
-
file:
|
|
56
|
+
file: fs.createReadStream('/path/to/document.pdf'),
|
|
92
57
|
purpose: 'purpose',
|
|
93
58
|
});
|
|
59
|
+
|
|
60
|
+
// Or using a File object
|
|
94
61
|
await client.files.create({
|
|
95
|
-
file:
|
|
62
|
+
file: new File(['content'], 'document.txt'),
|
|
96
63
|
purpose: 'purpose',
|
|
97
64
|
});
|
|
98
65
|
```
|
|
99
66
|
|
|
100
|
-
##
|
|
67
|
+
## MCP Server
|
|
68
|
+
|
|
69
|
+
Use the Llama Cloud MCP Server to enable AI assistants to interact with the API:
|
|
101
70
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
71
|
+
[](https://cursor.com/en-US/install-mcp?name=%40llamaindex%2Fllama-cloud-mcp&config=eyJuYW1lIjoiQGxsYW1haW5kZXgvbGxhbWEtY2xvdWQtbWNwIiwidHJhbnNwb3J0IjoiaHR0cCIsInVybCI6Imh0dHBzOi8vbGxhbWFjbG91ZC1wcm9kLnN0bG1jcC5jb20iLCJoZWFkZXJzIjp7IngtbGxhbWEtY2xvdWQtYXBpLWtleSI6Ik15IEFQSSBLZXkifX0)
|
|
72
|
+
[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22%40llamaindex%2Fllama-cloud-mcp%22%2C%22type%22%3A%22http%22%2C%22url%22%3A%22https%3A%2F%2Fllamacloud-prod.stlmcp.com%22%2C%22headers%22%3A%7B%22x-llama-cloud-api-key%22%3A%22My%20API%20Key%22%7D%7D)
|
|
73
|
+
|
|
74
|
+
## Error Handling
|
|
75
|
+
|
|
76
|
+
When the API returns a non-success status code, an `APIError` subclass is thrown:
|
|
105
77
|
|
|
106
|
-
<!-- prettier-ignore -->
|
|
107
78
|
```ts
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
console.log(err.headers); // {server: 'nginx', ...}
|
|
115
|
-
} else {
|
|
116
|
-
throw err;
|
|
117
|
-
}
|
|
118
|
-
});
|
|
79
|
+
await client.pipelines.list({ project_id: 'my-project-id' }).catch((err) => {
|
|
80
|
+
if (err instanceof LlamaCloud.APIError) {
|
|
81
|
+
console.log(err.status); // 400
|
|
82
|
+
console.log(err.name); // BadRequestError
|
|
83
|
+
}
|
|
84
|
+
});
|
|
119
85
|
```
|
|
120
86
|
|
|
121
|
-
Error codes are as follows:
|
|
122
|
-
|
|
123
87
|
| Status Code | Error Type |
|
|
124
88
|
| ----------- | -------------------------- |
|
|
125
89
|
| 400 | `BadRequestError` |
|
|
@@ -131,318 +95,58 @@ Error codes are as follows:
|
|
|
131
95
|
| >=500 | `InternalServerError` |
|
|
132
96
|
| N/A | `APIConnectionError` |
|
|
133
97
|
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
Certain errors will be automatically retried 2 times by default, with a short exponential backoff.
|
|
137
|
-
Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict,
|
|
138
|
-
429 Rate Limit, and >=500 Internal errors will all be retried by default.
|
|
139
|
-
|
|
140
|
-
You can use the `maxRetries` option to configure or disable this:
|
|
141
|
-
|
|
142
|
-
<!-- prettier-ignore -->
|
|
143
|
-
```js
|
|
144
|
-
// Configure the default for all requests:
|
|
145
|
-
const client = new LlamaCloud({
|
|
146
|
-
maxRetries: 0, // default is 2
|
|
147
|
-
});
|
|
148
|
-
|
|
149
|
-
// Or, configure per-request:
|
|
150
|
-
await client.pipelines.list({ project_id: 'my-project-id' }, {
|
|
151
|
-
maxRetries: 5,
|
|
152
|
-
});
|
|
153
|
-
```
|
|
154
|
-
|
|
155
|
-
### Timeouts
|
|
98
|
+
## Retries and Timeouts
|
|
156
99
|
|
|
157
|
-
Requests
|
|
100
|
+
The SDK automatically retries requests 2 times on connection errors, timeouts, rate limits, and 5xx errors. Requests timeout after 1 minute by default. Functions that combine multiple API calls (e.g. `client.parsing.parse()`) will have larger timeouts by default to account for the multiple requests and polling.
|
|
158
101
|
|
|
159
|
-
<!-- prettier-ignore -->
|
|
160
102
|
```ts
|
|
161
|
-
// Configure the default for all requests:
|
|
162
103
|
const client = new LlamaCloud({
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
// Override per-request:
|
|
167
|
-
await client.pipelines.list({ project_id: 'my-project-id' }, {
|
|
168
|
-
timeout: 5 * 1000,
|
|
104
|
+
maxRetries: 0, // Disable retries (default: 2)
|
|
105
|
+
timeout: 30 * 1000, // 30 second timeout (default: 1 minute)
|
|
169
106
|
});
|
|
170
107
|
```
|
|
171
108
|
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
Note that requests which time out will be [retried twice by default](#retries).
|
|
109
|
+
## Pagination
|
|
175
110
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
List methods in the LlamaCloud API are paginated.
|
|
179
|
-
You can use the `for await … of` syntax to iterate through items across all pages:
|
|
111
|
+
List methods support auto-pagination with `for await...of`:
|
|
180
112
|
|
|
181
113
|
```ts
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
limit: 20,
|
|
188
|
-
})) {
|
|
189
|
-
allExtractRuns.push(extractRun);
|
|
190
|
-
}
|
|
191
|
-
return allExtractRuns;
|
|
114
|
+
for await (const run of client.extraction.runs.list({
|
|
115
|
+
extraction_agent_id: 'agent-id',
|
|
116
|
+
limit: 20,
|
|
117
|
+
})) {
|
|
118
|
+
console.log(run);
|
|
192
119
|
}
|
|
193
120
|
```
|
|
194
121
|
|
|
195
|
-
|
|
122
|
+
Or fetch one page at a time:
|
|
196
123
|
|
|
197
124
|
```ts
|
|
198
|
-
let page = await client.extraction.runs.list({
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
});
|
|
202
|
-
for (const extractRun of page.items) {
|
|
203
|
-
console.log(extractRun);
|
|
125
|
+
let page = await client.extraction.runs.list({ extraction_agent_id: 'agent-id', limit: 20 });
|
|
126
|
+
for (const run of page.items) {
|
|
127
|
+
console.log(run);
|
|
204
128
|
}
|
|
205
129
|
|
|
206
|
-
// Convenience methods are provided for manually paginating:
|
|
207
130
|
while (page.hasNextPage()) {
|
|
208
131
|
page = await page.getNextPage();
|
|
209
|
-
// ...
|
|
210
132
|
}
|
|
211
133
|
```
|
|
212
134
|
|
|
213
|
-
##
|
|
214
|
-
|
|
215
|
-
### Accessing raw Response data (e.g., headers)
|
|
216
|
-
|
|
217
|
-
The "raw" `Response` returned by `fetch()` can be accessed through the `.asResponse()` method on the `APIPromise` type that all methods return.
|
|
218
|
-
This method returns as soon as the headers for a successful response are received and does not consume the response body, so you are free to write custom parsing or streaming logic.
|
|
219
|
-
|
|
220
|
-
You can also use the `.withResponse()` method to get the raw `Response` along with the parsed data.
|
|
221
|
-
Unlike `.asResponse()` this method consumes the body, returning once it is parsed.
|
|
222
|
-
|
|
223
|
-
<!-- prettier-ignore -->
|
|
224
|
-
```ts
|
|
225
|
-
const client = new LlamaCloud();
|
|
226
|
-
|
|
227
|
-
const response = await client.pipelines.list({ project_id: 'my-project-id' }).asResponse();
|
|
228
|
-
console.log(response.headers.get('X-My-Header'));
|
|
229
|
-
console.log(response.statusText); // access the underlying Response object
|
|
230
|
-
|
|
231
|
-
const { data: pipelines, response: raw } = await client.pipelines
|
|
232
|
-
.list({ project_id: 'my-project-id' })
|
|
233
|
-
.withResponse();
|
|
234
|
-
console.log(raw.headers.get('X-My-Header'));
|
|
235
|
-
console.log(pipelines);
|
|
236
|
-
```
|
|
237
|
-
|
|
238
|
-
### Logging
|
|
239
|
-
|
|
240
|
-
> [!IMPORTANT]
|
|
241
|
-
> All log messages are intended for debugging only. The format and content of log messages
|
|
242
|
-
> may change between releases.
|
|
243
|
-
|
|
244
|
-
#### Log levels
|
|
245
|
-
|
|
246
|
-
The log level can be configured in two ways:
|
|
247
|
-
|
|
248
|
-
1. Via the `LLAMA_CLOUD_LOG` environment variable
|
|
249
|
-
2. Using the `logLevel` client option (overrides the environment variable if set)
|
|
250
|
-
|
|
251
|
-
```ts
|
|
252
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
253
|
-
|
|
254
|
-
const client = new LlamaCloud({
|
|
255
|
-
logLevel: 'debug', // Show all log messages
|
|
256
|
-
});
|
|
257
|
-
```
|
|
258
|
-
|
|
259
|
-
Available log levels, from most to least verbose:
|
|
260
|
-
|
|
261
|
-
- `'debug'` - Show debug messages, info, warnings, and errors
|
|
262
|
-
- `'info'` - Show info messages, warnings, and errors
|
|
263
|
-
- `'warn'` - Show warnings and errors (default)
|
|
264
|
-
- `'error'` - Show only errors
|
|
265
|
-
- `'off'` - Disable all logging
|
|
266
|
-
|
|
267
|
-
At the `'debug'` level, all HTTP requests and responses are logged, including headers and bodies.
|
|
268
|
-
Some authentication-related headers are redacted, but sensitive data in request and response bodies
|
|
269
|
-
may still be visible.
|
|
270
|
-
|
|
271
|
-
#### Custom logger
|
|
272
|
-
|
|
273
|
-
By default, this library logs to `globalThis.console`. You can also provide a custom logger.
|
|
274
|
-
Most logging libraries are supported, including [pino](https://www.npmjs.com/package/pino), [winston](https://www.npmjs.com/package/winston), [bunyan](https://www.npmjs.com/package/bunyan), [consola](https://www.npmjs.com/package/consola), [signale](https://www.npmjs.com/package/signale), and [@std/log](https://jsr.io/@std/log). If your logger doesn't work, please open an issue.
|
|
275
|
-
|
|
276
|
-
When providing a custom logger, the `logLevel` option still controls which messages are emitted, messages
|
|
277
|
-
below the configured level will not be sent to your logger.
|
|
278
|
-
|
|
279
|
-
```ts
|
|
280
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
281
|
-
import pino from 'pino';
|
|
282
|
-
|
|
283
|
-
const logger = pino();
|
|
284
|
-
|
|
285
|
-
const client = new LlamaCloud({
|
|
286
|
-
logger: logger.child({ name: 'LlamaCloud' }),
|
|
287
|
-
logLevel: 'debug', // Send all messages to pino, allowing it to filter
|
|
288
|
-
});
|
|
289
|
-
```
|
|
290
|
-
|
|
291
|
-
### Making custom/undocumented requests
|
|
292
|
-
|
|
293
|
-
This library is typed for convenient access to the documented API. If you need to access undocumented
|
|
294
|
-
endpoints, params, or response properties, the library can still be used.
|
|
295
|
-
|
|
296
|
-
#### Undocumented endpoints
|
|
297
|
-
|
|
298
|
-
To make requests to undocumented endpoints, you can use `client.get`, `client.post`, and other HTTP verbs.
|
|
299
|
-
Options on the client, such as retries, will be respected when making these requests.
|
|
300
|
-
|
|
301
|
-
```ts
|
|
302
|
-
await client.post('/some/path', {
|
|
303
|
-
body: { some_prop: 'foo' },
|
|
304
|
-
query: { some_query_arg: 'bar' },
|
|
305
|
-
});
|
|
306
|
-
```
|
|
307
|
-
|
|
308
|
-
#### Undocumented request params
|
|
309
|
-
|
|
310
|
-
To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented
|
|
311
|
-
parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you
|
|
312
|
-
send will be sent as-is.
|
|
313
|
-
|
|
314
|
-
```ts
|
|
315
|
-
client.parsing.create({
|
|
316
|
-
// ...
|
|
317
|
-
// @ts-expect-error baz is not yet public
|
|
318
|
-
baz: 'undocumented option',
|
|
319
|
-
});
|
|
320
|
-
```
|
|
321
|
-
|
|
322
|
-
For requests with the `GET` verb, any extra params will be in the query, all other requests will send the
|
|
323
|
-
extra param in the body.
|
|
324
|
-
|
|
325
|
-
If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request
|
|
326
|
-
options.
|
|
327
|
-
|
|
328
|
-
#### Undocumented response properties
|
|
329
|
-
|
|
330
|
-
To access undocumented response properties, you may access the response object with `// @ts-expect-error` on
|
|
331
|
-
the response object, or cast the response object to the requisite type. Like the request params, we do not
|
|
332
|
-
validate or strip extra properties from the response from the API.
|
|
333
|
-
|
|
334
|
-
### Customizing the fetch client
|
|
335
|
-
|
|
336
|
-
By default, this library expects a global `fetch` function is defined.
|
|
337
|
-
|
|
338
|
-
If you want to use a different `fetch` function, you can either polyfill the global:
|
|
339
|
-
|
|
340
|
-
```ts
|
|
341
|
-
import fetch from 'my-fetch';
|
|
342
|
-
|
|
343
|
-
globalThis.fetch = fetch;
|
|
344
|
-
```
|
|
345
|
-
|
|
346
|
-
Or pass it to the client:
|
|
347
|
-
|
|
348
|
-
```ts
|
|
349
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
350
|
-
import fetch from 'my-fetch';
|
|
351
|
-
|
|
352
|
-
const client = new LlamaCloud({ fetch });
|
|
353
|
-
```
|
|
354
|
-
|
|
355
|
-
### Fetch options
|
|
356
|
-
|
|
357
|
-
If you want to set custom `fetch` options without overriding the `fetch` function, you can provide a `fetchOptions` object when instantiating the client or making a request. (Request-specific options override client options.)
|
|
358
|
-
|
|
359
|
-
```ts
|
|
360
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
361
|
-
|
|
362
|
-
const client = new LlamaCloud({
|
|
363
|
-
fetchOptions: {
|
|
364
|
-
// `RequestInit` options
|
|
365
|
-
},
|
|
366
|
-
});
|
|
367
|
-
```
|
|
368
|
-
|
|
369
|
-
#### Configuring proxies
|
|
370
|
-
|
|
371
|
-
To modify proxy behavior, you can provide custom `fetchOptions` that add runtime-specific proxy
|
|
372
|
-
options to requests:
|
|
135
|
+
## Logging
|
|
373
136
|
|
|
374
|
-
|
|
137
|
+
Configure logging via the `LLAMA_CLOUD_LOG` environment variable or the `logLevel` option:
|
|
375
138
|
|
|
376
139
|
```ts
|
|
377
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
378
|
-
import * as undici from 'undici';
|
|
379
|
-
|
|
380
|
-
const proxyAgent = new undici.ProxyAgent('http://localhost:8888');
|
|
381
140
|
const client = new LlamaCloud({
|
|
382
|
-
|
|
383
|
-
dispatcher: proxyAgent,
|
|
384
|
-
},
|
|
141
|
+
logLevel: 'debug', // 'debug' | 'info' | 'warn' | 'error' | 'off'
|
|
385
142
|
});
|
|
386
143
|
```
|
|
387
144
|
|
|
388
|
-
<img src="https://raw.githubusercontent.com/stainless-api/sdk-assets/refs/heads/main/bun.svg" align="top" width="18" height="21"> **Bun** <sup>[[docs](https://bun.sh/guides/http/proxy)]</sup>
|
|
389
|
-
|
|
390
|
-
```ts
|
|
391
|
-
import LlamaCloud from '@llamaindex/llama-cloud';
|
|
392
|
-
|
|
393
|
-
const client = new LlamaCloud({
|
|
394
|
-
fetchOptions: {
|
|
395
|
-
proxy: 'http://localhost:8888',
|
|
396
|
-
},
|
|
397
|
-
});
|
|
398
|
-
```
|
|
399
|
-
|
|
400
|
-
<img src="https://raw.githubusercontent.com/stainless-api/sdk-assets/refs/heads/main/deno.svg" align="top" width="18" height="21"> **Deno** <sup>[[docs](https://docs.deno.com/api/deno/~/Deno.createHttpClient)]</sup>
|
|
401
|
-
|
|
402
|
-
```ts
|
|
403
|
-
import LlamaCloud from 'npm:@llamaindex/llama-cloud';
|
|
404
|
-
|
|
405
|
-
const httpClient = Deno.createHttpClient({ proxy: { url: 'http://localhost:8888' } });
|
|
406
|
-
const client = new LlamaCloud({
|
|
407
|
-
fetchOptions: {
|
|
408
|
-
client: httpClient,
|
|
409
|
-
},
|
|
410
|
-
});
|
|
411
|
-
```
|
|
412
|
-
|
|
413
|
-
## Frequently Asked Questions
|
|
414
|
-
|
|
415
|
-
## Semantic versioning
|
|
416
|
-
|
|
417
|
-
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
|
|
418
|
-
|
|
419
|
-
1. Changes that only affect static types, without breaking runtime behavior.
|
|
420
|
-
2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_
|
|
421
|
-
3. Changes that we do not expect to impact the vast majority of users in practice.
|
|
422
|
-
|
|
423
|
-
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
|
|
424
|
-
|
|
425
|
-
We are keen for your feedback; please open an [issue](https://www.github.com/run-llama/llama-cloud-ts/issues) with questions, bugs, or suggestions.
|
|
426
|
-
|
|
427
145
|
## Requirements
|
|
428
146
|
|
|
429
|
-
TypeScript >= 4.9
|
|
430
|
-
|
|
431
|
-
The following runtimes are supported:
|
|
432
|
-
|
|
433
|
-
- Web browsers (Up-to-date Chrome, Firefox, Safari, Edge, and more)
|
|
434
|
-
- Node.js 20 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions.
|
|
435
|
-
- Deno v1.28.0 or higher.
|
|
436
|
-
- Bun 1.0 or later.
|
|
437
|
-
- Cloudflare Workers.
|
|
438
|
-
- Vercel Edge Runtime.
|
|
439
|
-
- Jest 28 or greater with the `"node"` environment (`"jsdom"` is not supported at this time).
|
|
440
|
-
- Nitro v2.6 or greater.
|
|
441
|
-
|
|
442
|
-
Note that React Native is not supported at this time.
|
|
443
|
-
|
|
444
|
-
If you are interested in other runtime environments, please open or upvote an issue on GitHub.
|
|
147
|
+
- TypeScript >= 4.9
|
|
148
|
+
- Node.js 20+, Deno 1.28+, Bun 1.0+, or modern browsers
|
|
445
149
|
|
|
446
150
|
## Contributing
|
|
447
151
|
|
|
448
|
-
See [
|
|
152
|
+
See [CONTRIBUTING.md](./CONTRIBUTING.md).
|
package/client.d.mts
CHANGED
|
@@ -8,6 +8,7 @@ import { type PaginatedBatchItemsParams, PaginatedBatchItemsResponse, type Pagin
|
|
|
8
8
|
import * as Uploads from "./core/uploads.mjs";
|
|
9
9
|
import * as API from "./resources/index.mjs";
|
|
10
10
|
import { APIPromise } from "./core/api-promise.mjs";
|
|
11
|
+
import { Classify, ClassifyConfiguration, ClassifyCreateParams, ClassifyCreateRequest, ClassifyCreateResponse, ClassifyGetParams, ClassifyGetResponse, ClassifyListParams, ClassifyListResponse, ClassifyListResponsesPaginatedCursor, ClassifyResult } from "./resources/classify.mjs";
|
|
11
12
|
import { DataSink, DataSinkCreateParams, DataSinkListParams, DataSinkListResponse, DataSinkUpdateParams, DataSinks } from "./resources/data-sinks.mjs";
|
|
12
13
|
import { DataSource, DataSourceCreateParams, DataSourceListParams, DataSourceListResponse, DataSourceReaderVersionMetadata, DataSourceUpdateParams, DataSources } from "./resources/data-sources.mjs";
|
|
13
14
|
import { File, FileCreate, FileCreateParams, FileCreateResponse, FileDeleteParams, FileGetParams, FileListParams, FileListResponse, FileListResponsesPaginatedCursor, FileQueryParams, FileQueryResponse, Files, PresignedURL } from "./resources/files.mjs";
|
|
@@ -24,7 +25,7 @@ import { FinalRequestOptions, RequestOptions } from "./internal/request-options.
|
|
|
24
25
|
import { type LogLevel, type Logger } from "./internal/utils/log.mjs";
|
|
25
26
|
export interface ClientOptions {
|
|
26
27
|
/**
|
|
27
|
-
* Defaults to process.env['LLAMA_CLOUD_API_KEY'].
|
|
28
|
+
* Defaults to process.env['LLAMA_CLOUD_API_KEY'] or process.env['LLAMA_PARSE_API_KEY'].
|
|
28
29
|
*/
|
|
29
30
|
apiKey?: string | undefined;
|
|
30
31
|
/**
|
|
@@ -106,7 +107,7 @@ export declare class LlamaCloud {
|
|
|
106
107
|
/**
|
|
107
108
|
* API Client for interfacing with the Llama Cloud API.
|
|
108
109
|
*
|
|
109
|
-
* @param {string | undefined} [opts.apiKey=process.env['LLAMA_CLOUD_API_KEY'] ?? undefined]
|
|
110
|
+
* @param {string | undefined} [opts.apiKey=process.env['LLAMA_CLOUD_API_KEY'] ?? process.env['LLAMA_PARSE_API_KEY'] ?? undefined]
|
|
110
111
|
* @param {string} [opts.baseURL=process.env['LLAMA_CLOUD_BASE_URL'] ?? https://api.cloud.llamaindex.ai] - Override the default base URL for the API.
|
|
111
112
|
* @param {number} [opts.timeout=1 minute] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
|
|
112
113
|
* @param {MergedRequestInit} [opts.fetchOptions] - Additional `RequestInit` options to be passed to `fetch` calls.
|
|
@@ -123,7 +124,7 @@ export declare class LlamaCloud {
|
|
|
123
124
|
protected defaultQuery(): Record<string, string | undefined> | undefined;
|
|
124
125
|
protected validateHeaders({ values, nulls }: NullableHeaders): void;
|
|
125
126
|
protected authHeaders(opts: FinalRequestOptions): Promise<NullableHeaders | undefined>;
|
|
126
|
-
protected stringifyQuery(query: Record<string, unknown>): string;
|
|
127
|
+
protected stringifyQuery(query: object | Record<string, unknown>): string;
|
|
127
128
|
private getUserAgent;
|
|
128
129
|
protected defaultIdempotencyKey(): string;
|
|
129
130
|
protected makeStatusError(status: number, error: Object, message: string | undefined, headers: Headers): Errors.APIError;
|
|
@@ -186,6 +187,7 @@ export declare class LlamaCloud {
|
|
|
186
187
|
parsing: API.Parsing;
|
|
187
188
|
extraction: API.Extraction;
|
|
188
189
|
classifier: API.Classifier;
|
|
190
|
+
classify: API.Classify;
|
|
189
191
|
projects: API.Projects;
|
|
190
192
|
dataSinks: API.DataSinks;
|
|
191
193
|
dataSources: API.DataSources;
|
|
@@ -215,6 +217,7 @@ export declare namespace LlamaCloud {
|
|
|
215
217
|
export { Parsing as Parsing, type BBox as BBox, type CodeItem as CodeItem, type FailPageMode as FailPageMode, type FooterItem as FooterItem, type HeaderItem as HeaderItem, type HeadingItem as HeadingItem, type ImageItem as ImageItem, type LinkItem as LinkItem, type ListItem as ListItem, type LlamaParseSupportedFileExtensions as LlamaParseSupportedFileExtensions, type ParsingJob as ParsingJob, type ParsingLanguages as ParsingLanguages, type ParsingMode as ParsingMode, type StatusEnum as StatusEnum, type TableItem as TableItem, type TextItem as TextItem, type ParsingCreateResponse as ParsingCreateResponse, type ParsingListResponse as ParsingListResponse, type ParsingGetResponse as ParsingGetResponse, type ParsingListResponsesPaginatedCursor as ParsingListResponsesPaginatedCursor, type ParsingCreateParams as ParsingCreateParams, type ParsingListParams as ParsingListParams, type ParsingGetParams as ParsingGetParams, };
|
|
216
218
|
export { Extraction as Extraction, type ExtractionRunParams as ExtractionRunParams };
|
|
217
219
|
export { Classifier as Classifier };
|
|
220
|
+
export { Classify as Classify, type ClassifyConfiguration as ClassifyConfiguration, type ClassifyCreateRequest as ClassifyCreateRequest, type ClassifyResult as ClassifyResult, type ClassifyCreateResponse as ClassifyCreateResponse, type ClassifyListResponse as ClassifyListResponse, type ClassifyGetResponse as ClassifyGetResponse, type ClassifyListResponsesPaginatedCursor as ClassifyListResponsesPaginatedCursor, type ClassifyCreateParams as ClassifyCreateParams, type ClassifyListParams as ClassifyListParams, type ClassifyGetParams as ClassifyGetParams, };
|
|
218
221
|
export { Projects as Projects, type Project as Project, type ProjectListResponse as ProjectListResponse, type ProjectListParams as ProjectListParams, type ProjectGetParams as ProjectGetParams, };
|
|
219
222
|
export { DataSinks as DataSinks, type DataSink as DataSink, type DataSinkListResponse as DataSinkListResponse, type DataSinkCreateParams as DataSinkCreateParams, type DataSinkUpdateParams as DataSinkUpdateParams, type DataSinkListParams as DataSinkListParams, };
|
|
220
223
|
export { DataSources as DataSources, type DataSource as DataSource, type DataSourceReaderVersionMetadata as DataSourceReaderVersionMetadata, type DataSourceListResponse as DataSourceListResponse, type DataSourceCreateParams as DataSourceCreateParams, type DataSourceUpdateParams as DataSourceUpdateParams, type DataSourceListParams as DataSourceListParams, };
|