@antfly/sdk 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +79 -6
- package/dist/index.d.cts +524 -80
- package/dist/index.d.ts +524 -80
- package/dist/index.js +79 -6
- package/package.json +35 -26
package/dist/index.d.ts
CHANGED
|
@@ -26,6 +26,97 @@ interface paths {
|
|
|
26
26
|
patch?: never;
|
|
27
27
|
trace?: never;
|
|
28
28
|
};
|
|
29
|
+
"/backup": {
|
|
30
|
+
parameters: {
|
|
31
|
+
query?: never;
|
|
32
|
+
header?: never;
|
|
33
|
+
path?: never;
|
|
34
|
+
cookie?: never;
|
|
35
|
+
};
|
|
36
|
+
get?: never;
|
|
37
|
+
put?: never;
|
|
38
|
+
/**
|
|
39
|
+
* Backup all tables or selected tables
|
|
40
|
+
* @description Creates a backup of all tables or specified tables. Each table's backup includes:
|
|
41
|
+
* - Table metadata (schema, indexes, shard configuration)
|
|
42
|
+
* - All shard data (compressed with zstd)
|
|
43
|
+
*
|
|
44
|
+
* The backup creates a cluster-level manifest that tracks all included tables
|
|
45
|
+
* and their individual backup locations.
|
|
46
|
+
*
|
|
47
|
+
* **Storage Locations:**
|
|
48
|
+
* - Local filesystem: `file:///path/to/backup`
|
|
49
|
+
* - Amazon S3: `s3://bucket-name/path/to/backup`
|
|
50
|
+
*
|
|
51
|
+
* **Backup Structure:**
|
|
52
|
+
* ```
|
|
53
|
+
* {location}/
|
|
54
|
+
* ├── {backup_id}-cluster-metadata.json (cluster manifest)
|
|
55
|
+
* ├── {table1}-{backup_id}-metadata.json (table metadata)
|
|
56
|
+
* ├── shard-1-{table1}-{backup_id}.tar.zst
|
|
57
|
+
* ├── shard-2-{table1}-{backup_id}.tar.zst
|
|
58
|
+
* ├── {table2}-{backup_id}-metadata.json
|
|
59
|
+
* └── ...
|
|
60
|
+
* ```
|
|
61
|
+
*/
|
|
62
|
+
post: operations["backup"];
|
|
63
|
+
delete?: never;
|
|
64
|
+
options?: never;
|
|
65
|
+
head?: never;
|
|
66
|
+
patch?: never;
|
|
67
|
+
trace?: never;
|
|
68
|
+
};
|
|
69
|
+
"/restore": {
|
|
70
|
+
parameters: {
|
|
71
|
+
query?: never;
|
|
72
|
+
header?: never;
|
|
73
|
+
path?: never;
|
|
74
|
+
cookie?: never;
|
|
75
|
+
};
|
|
76
|
+
get?: never;
|
|
77
|
+
put?: never;
|
|
78
|
+
/**
|
|
79
|
+
* Restore multiple tables from a backup
|
|
80
|
+
* @description Restores tables from a cluster backup. Can restore all tables or a subset.
|
|
81
|
+
*
|
|
82
|
+
* **Restore Modes:**
|
|
83
|
+
* - `fail_if_exists`: Abort if any target table already exists (default)
|
|
84
|
+
* - `skip_if_exists`: Skip existing tables and restore the rest
|
|
85
|
+
* - `overwrite`: Drop existing tables and restore from backup
|
|
86
|
+
*
|
|
87
|
+
* The restore is asynchronous - this endpoint triggers the restore process
|
|
88
|
+
* and returns immediately. The actual data restoration happens via the
|
|
89
|
+
* reconciliation loop as shards are started.
|
|
90
|
+
*/
|
|
91
|
+
post: operations["restore"];
|
|
92
|
+
delete?: never;
|
|
93
|
+
options?: never;
|
|
94
|
+
head?: never;
|
|
95
|
+
patch?: never;
|
|
96
|
+
trace?: never;
|
|
97
|
+
};
|
|
98
|
+
"/backups": {
|
|
99
|
+
parameters: {
|
|
100
|
+
query?: never;
|
|
101
|
+
header?: never;
|
|
102
|
+
path?: never;
|
|
103
|
+
cookie?: never;
|
|
104
|
+
};
|
|
105
|
+
/**
|
|
106
|
+
* List available backups
|
|
107
|
+
* @description Lists all cluster-level backups available at the specified location.
|
|
108
|
+
* Returns metadata about each backup including the tables included,
|
|
109
|
+
* timestamp, and Antfly version.
|
|
110
|
+
*/
|
|
111
|
+
get: operations["listBackups"];
|
|
112
|
+
put?: never;
|
|
113
|
+
post?: never;
|
|
114
|
+
delete?: never;
|
|
115
|
+
options?: never;
|
|
116
|
+
head?: never;
|
|
117
|
+
patch?: never;
|
|
118
|
+
trace?: never;
|
|
119
|
+
};
|
|
29
120
|
"/query": {
|
|
30
121
|
parameters: {
|
|
31
122
|
query?: never;
|
|
@@ -435,7 +526,7 @@ interface paths {
|
|
|
435
526
|
get?: never;
|
|
436
527
|
put?: never;
|
|
437
528
|
/** Perform batch inserts and deletes on a table */
|
|
438
|
-
post: operations["
|
|
529
|
+
post: operations["batchWrite"];
|
|
439
530
|
delete?: never;
|
|
440
531
|
options?: never;
|
|
441
532
|
head?: never;
|
|
@@ -1208,6 +1299,14 @@ interface components$1 {
|
|
|
1208
1299
|
transforms?: components$1["schemas"]["Transform"][];
|
|
1209
1300
|
sync_level?: components$1["schemas"]["SyncLevel"];
|
|
1210
1301
|
};
|
|
1302
|
+
BatchResponse: {
|
|
1303
|
+
/** @description Number of documents successfully inserted */
|
|
1304
|
+
inserted?: number;
|
|
1305
|
+
/** @description Number of documents successfully deleted */
|
|
1306
|
+
deleted?: number;
|
|
1307
|
+
/** @description Number of documents successfully transformed */
|
|
1308
|
+
transformed?: number;
|
|
1309
|
+
};
|
|
1211
1310
|
BackupRequest: {
|
|
1212
1311
|
/**
|
|
1213
1312
|
* @description Unique identifier for this backup. Used to reference the backup for restore operations.
|
|
@@ -1226,6 +1325,151 @@ interface components$1 {
|
|
|
1226
1325
|
location: string;
|
|
1227
1326
|
};
|
|
1228
1327
|
RestoreRequest: components$1["schemas"]["BackupRequest"];
|
|
1328
|
+
ClusterBackupRequest: {
|
|
1329
|
+
/**
|
|
1330
|
+
* @description Unique identifier for this backup. Used to reference the backup for restore operations.
|
|
1331
|
+
* Choose a meaningful name that includes date/version information.
|
|
1332
|
+
* @example cluster-backup-2025-01-15
|
|
1333
|
+
*/
|
|
1334
|
+
backup_id: string;
|
|
1335
|
+
/**
|
|
1336
|
+
* @description Storage location for the backup. Supports multiple backends:
|
|
1337
|
+
* - Local filesystem: `file:///path/to/backup`
|
|
1338
|
+
* - Amazon S3: `s3://bucket-name/path/to/backup`
|
|
1339
|
+
*
|
|
1340
|
+
* The backup includes all table data, indexes, and metadata.
|
|
1341
|
+
* @example s3://mybucket/antfly-backups/cluster/2025-01-15
|
|
1342
|
+
*/
|
|
1343
|
+
location: string;
|
|
1344
|
+
/**
|
|
1345
|
+
* @description Optional list of tables to backup. If omitted, all tables are backed up.
|
|
1346
|
+
* @example [
|
|
1347
|
+
* "users",
|
|
1348
|
+
* "products"
|
|
1349
|
+
* ]
|
|
1350
|
+
*/
|
|
1351
|
+
table_names?: string[];
|
|
1352
|
+
};
|
|
1353
|
+
ClusterBackupResponse: {
|
|
1354
|
+
/**
|
|
1355
|
+
* @description The backup identifier
|
|
1356
|
+
* @example cluster-backup-2025-01-15
|
|
1357
|
+
*/
|
|
1358
|
+
backup_id: string;
|
|
1359
|
+
/** @description Status of each table backup */
|
|
1360
|
+
tables: components$1["schemas"]["TableBackupStatus"][];
|
|
1361
|
+
/**
|
|
1362
|
+
* @description Overall backup status
|
|
1363
|
+
* @example completed
|
|
1364
|
+
* @enum {string}
|
|
1365
|
+
*/
|
|
1366
|
+
status: "completed" | "partial" | "failed";
|
|
1367
|
+
};
|
|
1368
|
+
TableBackupStatus: {
|
|
1369
|
+
/**
|
|
1370
|
+
* @description Table name
|
|
1371
|
+
* @example users
|
|
1372
|
+
*/
|
|
1373
|
+
name: string;
|
|
1374
|
+
/**
|
|
1375
|
+
* @description Backup status for this table
|
|
1376
|
+
* @example completed
|
|
1377
|
+
* @enum {string}
|
|
1378
|
+
*/
|
|
1379
|
+
status: "completed" | "failed" | "skipped";
|
|
1380
|
+
/** @description Error message if backup failed */
|
|
1381
|
+
error?: string;
|
|
1382
|
+
};
|
|
1383
|
+
ClusterRestoreRequest: {
|
|
1384
|
+
/**
|
|
1385
|
+
* @description Unique identifier of the backup to restore from.
|
|
1386
|
+
* @example cluster-backup-2025-01-15
|
|
1387
|
+
*/
|
|
1388
|
+
backup_id: string;
|
|
1389
|
+
/**
|
|
1390
|
+
* @description Storage location where the backup is stored.
|
|
1391
|
+
* @example s3://mybucket/antfly-backups/cluster/2025-01-15
|
|
1392
|
+
*/
|
|
1393
|
+
location: string;
|
|
1394
|
+
/**
|
|
1395
|
+
* @description Optional list of tables to restore. If omitted, all tables in the backup are restored.
|
|
1396
|
+
* @example [
|
|
1397
|
+
* "users",
|
|
1398
|
+
* "products"
|
|
1399
|
+
* ]
|
|
1400
|
+
*/
|
|
1401
|
+
table_names?: string[];
|
|
1402
|
+
/**
|
|
1403
|
+
* @description How to handle existing tables:
|
|
1404
|
+
* - `fail_if_exists`: Abort if any table already exists (default)
|
|
1405
|
+
* - `skip_if_exists`: Skip existing tables, restore others
|
|
1406
|
+
* - `overwrite`: Drop and recreate existing tables
|
|
1407
|
+
* @default fail_if_exists
|
|
1408
|
+
* @example skip_if_exists
|
|
1409
|
+
* @enum {string}
|
|
1410
|
+
*/
|
|
1411
|
+
restore_mode?: "fail_if_exists" | "skip_if_exists" | "overwrite";
|
|
1412
|
+
};
|
|
1413
|
+
ClusterRestoreResponse: {
|
|
1414
|
+
/** @description Status of each table restore */
|
|
1415
|
+
tables: components$1["schemas"]["TableRestoreStatus"][];
|
|
1416
|
+
/**
|
|
1417
|
+
* @description Overall restore status
|
|
1418
|
+
* @example triggered
|
|
1419
|
+
* @enum {string}
|
|
1420
|
+
*/
|
|
1421
|
+
status: "triggered" | "partial" | "failed";
|
|
1422
|
+
};
|
|
1423
|
+
TableRestoreStatus: {
|
|
1424
|
+
/**
|
|
1425
|
+
* @description Table name
|
|
1426
|
+
* @example users
|
|
1427
|
+
*/
|
|
1428
|
+
name: string;
|
|
1429
|
+
/**
|
|
1430
|
+
* @description Restore status for this table
|
|
1431
|
+
* @example triggered
|
|
1432
|
+
* @enum {string}
|
|
1433
|
+
*/
|
|
1434
|
+
status: "triggered" | "skipped" | "failed";
|
|
1435
|
+
/** @description Error message if restore failed */
|
|
1436
|
+
error?: string;
|
|
1437
|
+
};
|
|
1438
|
+
BackupInfo: {
|
|
1439
|
+
/**
|
|
1440
|
+
* @description The backup identifier
|
|
1441
|
+
* @example cluster-backup-2025-01-15
|
|
1442
|
+
*/
|
|
1443
|
+
backup_id: string;
|
|
1444
|
+
/**
|
|
1445
|
+
* Format: date-time
|
|
1446
|
+
* @description When the backup was created
|
|
1447
|
+
* @example 2025-01-15T10:30:00Z
|
|
1448
|
+
*/
|
|
1449
|
+
timestamp: string;
|
|
1450
|
+
/**
|
|
1451
|
+
* @description Tables included in the backup
|
|
1452
|
+
* @example [
|
|
1453
|
+
* "users",
|
|
1454
|
+
* "products"
|
|
1455
|
+
* ]
|
|
1456
|
+
*/
|
|
1457
|
+
tables: string[];
|
|
1458
|
+
/**
|
|
1459
|
+
* @description Storage location of the backup
|
|
1460
|
+
* @example s3://mybucket/antfly-backups/cluster/2025-01-15
|
|
1461
|
+
*/
|
|
1462
|
+
location: string;
|
|
1463
|
+
/**
|
|
1464
|
+
* @description Antfly version that created the backup
|
|
1465
|
+
* @example v1.0.0
|
|
1466
|
+
*/
|
|
1467
|
+
antfly_version?: string;
|
|
1468
|
+
};
|
|
1469
|
+
BackupListResponse: {
|
|
1470
|
+
/** @description List of available backups */
|
|
1471
|
+
backups: components$1["schemas"]["BackupInfo"][];
|
|
1472
|
+
};
|
|
1229
1473
|
RAGRequest: {
|
|
1230
1474
|
/**
|
|
1231
1475
|
* @description Array of retrieval queries to execute. Each query must specify a table and can specify its own limit and document_renderer.
|
|
@@ -1290,11 +1534,11 @@ interface components$1 {
|
|
|
1290
1534
|
*/
|
|
1291
1535
|
eval?: components$1["schemas"]["EvalConfig"];
|
|
1292
1536
|
};
|
|
1293
|
-
/** @description RAG result with individual query results and
|
|
1537
|
+
/** @description RAG result with individual query results and generation/evaluation outcome */
|
|
1294
1538
|
RAGResult: {
|
|
1295
1539
|
/** @description Results from each query. Check each result's status and error fields for failures. */
|
|
1296
1540
|
query_results?: components$1["schemas"]["QueryResult"][];
|
|
1297
|
-
|
|
1541
|
+
generate_result?: components$1["schemas"]["GenerateResult"];
|
|
1298
1542
|
/** @description Evaluation results when eval config was provided in the request */
|
|
1299
1543
|
eval_result?: components$1["schemas"]["EvalResult"];
|
|
1300
1544
|
};
|
|
@@ -1366,6 +1610,17 @@ interface components$1 {
|
|
|
1366
1610
|
* @example What are the best gaming laptops under $2000?
|
|
1367
1611
|
*/
|
|
1368
1612
|
query: string;
|
|
1613
|
+
/**
|
|
1614
|
+
* @description Background knowledge that guides the agent's understanding of the domain.
|
|
1615
|
+
* Similar to CLAUDE.md, this provides context that applies to all steps
|
|
1616
|
+
* (classification, retrieval, and answer generation).
|
|
1617
|
+
*
|
|
1618
|
+
* Examples:
|
|
1619
|
+
* - "This data contains medical records. Use clinical terminology and be precise about diagnoses."
|
|
1620
|
+
* - "This is a software engineering knowledge base. Assume a technical audience."
|
|
1621
|
+
* - "This table stores legal documents. Reference laws and regulations accurately."
|
|
1622
|
+
*/
|
|
1623
|
+
agent_knowledge?: string;
|
|
1369
1624
|
/**
|
|
1370
1625
|
* @description Default generator configuration used for all pipeline steps unless overridden in `steps`.
|
|
1371
1626
|
* This is the simple configuration - just set this and everything works with sensible defaults.
|
|
@@ -2916,6 +3171,68 @@ interface components$1 {
|
|
|
2916
3171
|
*/
|
|
2917
3172
|
presence_penalty?: number;
|
|
2918
3173
|
};
|
|
3174
|
+
/**
|
|
3175
|
+
* @description Configuration for the OpenRouter generative AI provider.
|
|
3176
|
+
*
|
|
3177
|
+
* OpenRouter provides a unified API for multiple LLM providers with automatic fallback routing.
|
|
3178
|
+
* API key via `api_key` field or `OPENROUTER_API_KEY` environment variable.
|
|
3179
|
+
*
|
|
3180
|
+
* **Model Selection:**
|
|
3181
|
+
* - Use `model` for a single model (e.g., "openai/gpt-4.1", "anthropic/claude-sonnet-4-5-20250929")
|
|
3182
|
+
* - Use `models` array for fallback routing - OpenRouter tries models in order until one succeeds
|
|
3183
|
+
*
|
|
3184
|
+
* **Example Models:** openai/gpt-4.1, anthropic/claude-sonnet-4-5-20250929, google/gemini-2.5-flash,
|
|
3185
|
+
* meta-llama/llama-3.3-70b-instruct
|
|
3186
|
+
*
|
|
3187
|
+
* **Docs:** https://openrouter.ai/docs/api/api-reference/chat/send-chat-completion-request
|
|
3188
|
+
* @example {
|
|
3189
|
+
* "provider": "openrouter",
|
|
3190
|
+
* "model": "openai/gpt-4.1",
|
|
3191
|
+
* "temperature": 0.7,
|
|
3192
|
+
* "max_tokens": 4096
|
|
3193
|
+
* }
|
|
3194
|
+
*/
|
|
3195
|
+
OpenRouterGeneratorConfig: {
|
|
3196
|
+
/**
|
|
3197
|
+
* @description Single model identifier (e.g., 'openai/gpt-4.1'). Either model or models must be provided.
|
|
3198
|
+
* @example openai/gpt-4.1
|
|
3199
|
+
*/
|
|
3200
|
+
model?: string;
|
|
3201
|
+
/**
|
|
3202
|
+
* @description Array of model identifiers for fallback routing. OpenRouter tries each model in order
|
|
3203
|
+
* until one succeeds. Either model or models must be provided.
|
|
3204
|
+
* @example [
|
|
3205
|
+
* "openai/gpt-4.1",
|
|
3206
|
+
* "anthropic/claude-sonnet-4-5-20250929",
|
|
3207
|
+
* "google/gemini-2.5-flash"
|
|
3208
|
+
* ]
|
|
3209
|
+
*/
|
|
3210
|
+
models?: string[];
|
|
3211
|
+
/** @description The OpenRouter API key. Can also be set via OPENROUTER_API_KEY environment variable. */
|
|
3212
|
+
api_key?: string;
|
|
3213
|
+
/**
|
|
3214
|
+
* Format: float
|
|
3215
|
+
* @description Controls randomness in generation (0.0-2.0). Higher values make output more random.
|
|
3216
|
+
*/
|
|
3217
|
+
temperature?: number;
|
|
3218
|
+
/** @description Maximum number of tokens to generate in the response. */
|
|
3219
|
+
max_tokens?: number;
|
|
3220
|
+
/**
|
|
3221
|
+
* Format: float
|
|
3222
|
+
* @description Nucleus sampling parameter (0.0-1.0). Alternative to temperature.
|
|
3223
|
+
*/
|
|
3224
|
+
top_p?: number;
|
|
3225
|
+
/**
|
|
3226
|
+
* Format: float
|
|
3227
|
+
* @description Penalty for token frequency (-2.0 to 2.0).
|
|
3228
|
+
*/
|
|
3229
|
+
frequency_penalty?: number;
|
|
3230
|
+
/**
|
|
3231
|
+
* Format: float
|
|
3232
|
+
* @description Penalty for token presence (-2.0 to 2.0).
|
|
3233
|
+
*/
|
|
3234
|
+
presence_penalty?: number;
|
|
3235
|
+
};
|
|
2919
3236
|
/**
|
|
2920
3237
|
* @description Configuration for the AWS Bedrock generative AI provider.
|
|
2921
3238
|
*
|
|
@@ -3045,7 +3362,7 @@ interface components$1 {
|
|
|
3045
3362
|
* @description The generative AI provider to use.
|
|
3046
3363
|
* @enum {string}
|
|
3047
3364
|
*/
|
|
3048
|
-
GeneratorProvider: "gemini" | "vertex" | "ollama" | "openai" | "bedrock" | "anthropic" | "cohere" | "mock";
|
|
3365
|
+
GeneratorProvider: "gemini" | "vertex" | "ollama" | "openai" | "openrouter" | "bedrock" | "anthropic" | "cohere" | "mock";
|
|
3049
3366
|
/**
|
|
3050
3367
|
* @description A unified configuration for a generative AI provider.
|
|
3051
3368
|
*
|
|
@@ -3215,7 +3532,7 @@ interface components$1 {
|
|
|
3215
3532
|
* "max_tokens": 2048
|
|
3216
3533
|
* }
|
|
3217
3534
|
*/
|
|
3218
|
-
GeneratorConfig: (components$1["schemas"]["GoogleGeneratorConfig"] | components$1["schemas"]["VertexGeneratorConfig"] | components$1["schemas"]["OllamaGeneratorConfig"] | components$1["schemas"]["OpenAIGeneratorConfig"] | components$1["schemas"]["BedrockGeneratorConfig"] | components$1["schemas"]["AnthropicGeneratorConfig"] | components$1["schemas"]["CohereGeneratorConfig"]) & {
|
|
3535
|
+
GeneratorConfig: (components$1["schemas"]["GoogleGeneratorConfig"] | components$1["schemas"]["VertexGeneratorConfig"] | components$1["schemas"]["OllamaGeneratorConfig"] | components$1["schemas"]["OpenAIGeneratorConfig"] | components$1["schemas"]["OpenRouterGeneratorConfig"] | components$1["schemas"]["BedrockGeneratorConfig"] | components$1["schemas"]["AnthropicGeneratorConfig"] | components$1["schemas"]["CohereGeneratorConfig"]) & {
|
|
3219
3536
|
provider: components$1["schemas"]["GeneratorProvider"];
|
|
3220
3537
|
};
|
|
3221
3538
|
/** @description Retry configuration for generator calls */
|
|
@@ -3328,10 +3645,10 @@ interface components$1 {
|
|
|
3328
3645
|
/** @description Evaluation options (k, thresholds, etc.) */
|
|
3329
3646
|
options?: components$1["schemas"]["EvalOptions"];
|
|
3330
3647
|
};
|
|
3331
|
-
/** @description Result of a
|
|
3332
|
-
|
|
3333
|
-
/** @description The generated
|
|
3334
|
-
|
|
3648
|
+
/** @description Result of a generate operation. Formatted as markdown by default with inline resource references using [resource_id <id>] or [resource_id <id1>, <id2>] format. */
|
|
3649
|
+
GenerateResult: {
|
|
3650
|
+
/** @description The generated text in markdown format with inline resource references like [resource_id res1] or [resource_id res1, res2] */
|
|
3651
|
+
text: string;
|
|
3335
3652
|
};
|
|
3336
3653
|
/** @description Result from a single evaluator */
|
|
3337
3654
|
EvaluatorScore: {
|
|
@@ -3837,6 +4154,33 @@ interface components$1 {
|
|
|
3837
4154
|
* @enum {string}
|
|
3838
4155
|
*/
|
|
3839
4156
|
WebSearchProvider: "google" | "bing" | "serper" | "tavily" | "brave" | "duckduckgo";
|
|
4157
|
+
Credentials: {
|
|
4158
|
+
/**
|
|
4159
|
+
* @description S3-compatible endpoint (e.g., 's3.amazonaws.com' or 'localhost:9000' for MinIO)
|
|
4160
|
+
* @example s3.amazonaws.com
|
|
4161
|
+
*/
|
|
4162
|
+
endpoint?: string;
|
|
4163
|
+
/**
|
|
4164
|
+
* @description Enable SSL/TLS for S3 connections (default: true for AWS, false for local MinIO)
|
|
4165
|
+
* @default true
|
|
4166
|
+
*/
|
|
4167
|
+
use_ssl?: boolean;
|
|
4168
|
+
/**
|
|
4169
|
+
* @description AWS access key ID. Supports keystore syntax for secret lookup. Falls back to AWS_ACCESS_KEY_ID environment variable if not set.
|
|
4170
|
+
* @example your-access-key-id
|
|
4171
|
+
*/
|
|
4172
|
+
access_key_id?: string;
|
|
4173
|
+
/**
|
|
4174
|
+
* @description AWS secret access key. Supports keystore syntax for secret lookup. Falls back to AWS_SECRET_ACCESS_KEY environment variable if not set.
|
|
4175
|
+
* @example your-secret-access-key
|
|
4176
|
+
*/
|
|
4177
|
+
secret_access_key?: string;
|
|
4178
|
+
/**
|
|
4179
|
+
* @description Optional AWS session token for temporary credentials. Supports keystore syntax for secret lookup.
|
|
4180
|
+
* @example your-session-token
|
|
4181
|
+
*/
|
|
4182
|
+
session_token?: string;
|
|
4183
|
+
};
|
|
3840
4184
|
/**
|
|
3841
4185
|
* @description Configuration for URL content fetching.
|
|
3842
4186
|
*
|
|
@@ -3846,6 +4190,7 @@ interface components$1 {
|
|
|
3846
4190
|
* - PDF files (extracts text)
|
|
3847
4191
|
* - Images (returns as data URIs)
|
|
3848
4192
|
* - Plain text files
|
|
4193
|
+
* - S3 URLs (requires s3_credentials)
|
|
3849
4194
|
*
|
|
3850
4195
|
* Security features (from lib/scraping.ContentSecurityConfig):
|
|
3851
4196
|
* - Allowed host whitelist
|
|
@@ -3854,6 +4199,8 @@ interface components$1 {
|
|
|
3854
4199
|
* - Timeout controls
|
|
3855
4200
|
*/
|
|
3856
4201
|
FetchConfig: {
|
|
4202
|
+
/** @description S3 credentials for fetching S3 URLs. If not set, uses package-level defaults. */
|
|
4203
|
+
s3_credentials?: components$1["schemas"]["Credentials"];
|
|
3857
4204
|
/**
|
|
3858
4205
|
* @description Maximum content length in characters (truncated if exceeded)
|
|
3859
4206
|
* @default 50000
|
|
@@ -4151,6 +4498,34 @@ interface components$1 {
|
|
|
4151
4498
|
/** @description Output dimension for the embedding (uses MRL for dimension reduction). Recommended: 256, 512, 1024, 1536, or 3072. */
|
|
4152
4499
|
dimensions?: number;
|
|
4153
4500
|
};
|
|
4501
|
+
/**
|
|
4502
|
+
* @description Configuration for the OpenRouter embedding provider.
|
|
4503
|
+
*
|
|
4504
|
+
* OpenRouter provides a unified API for multiple embedding models from different providers.
|
|
4505
|
+
* API key via `api_key` field or `OPENROUTER_API_KEY` environment variable.
|
|
4506
|
+
*
|
|
4507
|
+
* **Example Models:** openai/text-embedding-3-small (default), openai/text-embedding-3-large,
|
|
4508
|
+
* google/gemini-embedding-001, qwen/qwen3-embedding-8b
|
|
4509
|
+
*
|
|
4510
|
+
* **Docs:** https://openrouter.ai/docs/api/reference/embeddings
|
|
4511
|
+
* @example {
|
|
4512
|
+
* "provider": "openrouter",
|
|
4513
|
+
* "model": "openai/text-embedding-3-small",
|
|
4514
|
+
* "api_key": "sk-or-..."
|
|
4515
|
+
* }
|
|
4516
|
+
*/
|
|
4517
|
+
OpenRouterEmbedderConfig: {
|
|
4518
|
+
/**
|
|
4519
|
+
* @description The OpenRouter model identifier (e.g., 'openai/text-embedding-3-small', 'google/gemini-embedding-001').
|
|
4520
|
+
* @default openai/text-embedding-3-small
|
|
4521
|
+
* @example openai/text-embedding-3-small
|
|
4522
|
+
*/
|
|
4523
|
+
model: string;
|
|
4524
|
+
/** @description The OpenRouter API key. Can also be set via OPENROUTER_API_KEY environment variable. */
|
|
4525
|
+
api_key?: string;
|
|
4526
|
+
/** @description Output dimension for the embedding (if supported by the model). */
|
|
4527
|
+
dimensions?: number;
|
|
4528
|
+
};
|
|
4154
4529
|
/**
|
|
4155
4530
|
* @description Configuration for the AWS Bedrock embedding provider.
|
|
4156
4531
|
*
|
|
@@ -4227,7 +4602,7 @@ interface components$1 {
|
|
|
4227
4602
|
* @description The embedding provider to use.
|
|
4228
4603
|
* @enum {string}
|
|
4229
4604
|
*/
|
|
4230
|
-
EmbedderProvider: "gemini" | "vertex" | "ollama" | "openai" | "bedrock" | "cohere" | "mock";
|
|
4605
|
+
EmbedderProvider: "gemini" | "vertex" | "ollama" | "openai" | "openrouter" | "bedrock" | "cohere" | "mock";
|
|
4231
4606
|
/**
|
|
4232
4607
|
* @description A unified configuration for an embedding provider.
|
|
4233
4608
|
*
|
|
@@ -4401,9 +4776,25 @@ interface components$1 {
|
|
|
4401
4776
|
* "model": "text-embedding-3-small"
|
|
4402
4777
|
* }
|
|
4403
4778
|
*/
|
|
4404
|
-
EmbedderConfig: (components$1["schemas"]["GoogleEmbedderConfig"] | components$1["schemas"]["VertexEmbedderConfig"] | components$1["schemas"]["OllamaEmbedderConfig"] | components$1["schemas"]["OpenAIEmbedderConfig"] | components$1["schemas"]["BedrockEmbedderConfig"] | components$1["schemas"]["CohereEmbedderConfig"]) & {
|
|
4779
|
+
EmbedderConfig: (components$1["schemas"]["GoogleEmbedderConfig"] | components$1["schemas"]["VertexEmbedderConfig"] | components$1["schemas"]["OllamaEmbedderConfig"] | components$1["schemas"]["OpenAIEmbedderConfig"] | components$1["schemas"]["OpenRouterEmbedderConfig"] | components$1["schemas"]["BedrockEmbedderConfig"] | components$1["schemas"]["CohereEmbedderConfig"]) & {
|
|
4405
4780
|
provider: components$1["schemas"]["EmbedderProvider"];
|
|
4406
4781
|
};
|
|
4782
|
+
/** @description Per-request configuration for chunking. All fields are optional - zero/omitted values use chunker defaults. */
|
|
4783
|
+
ChunkOptions: {
|
|
4784
|
+
/** @description Maximum number of chunks to generate per document. */
|
|
4785
|
+
max_chunks?: number;
|
|
4786
|
+
/** @description Number of tokens to overlap between consecutive chunks. Helps maintain context across chunk boundaries. Only used by fixed-size chunkers. */
|
|
4787
|
+
overlap_tokens?: number;
|
|
4788
|
+
/** @description Separator string for splitting (e.g., '\n\n' for paragraphs). Only used by fixed-size chunkers. */
|
|
4789
|
+
separator?: string;
|
|
4790
|
+
/**
|
|
4791
|
+
* Format: float
|
|
4792
|
+
* @description Minimum confidence threshold for separator detection (0.0-1.0). Only used by ONNX models.
|
|
4793
|
+
*/
|
|
4794
|
+
threshold?: number;
|
|
4795
|
+
/** @description Target number of tokens per chunk. */
|
|
4796
|
+
target_tokens?: number;
|
|
4797
|
+
};
|
|
4407
4798
|
/**
|
|
4408
4799
|
* @description Configuration for the Termite chunking provider.
|
|
4409
4800
|
*
|
|
@@ -4429,7 +4820,7 @@ interface components$1 {
|
|
|
4429
4820
|
* "full_text": {}
|
|
4430
4821
|
* }
|
|
4431
4822
|
*/
|
|
4432
|
-
TermiteChunkerConfig: {
|
|
4823
|
+
TermiteChunkerConfig: components$1["schemas"]["ChunkOptions"] & {
|
|
4433
4824
|
/**
|
|
4434
4825
|
* Format: uri
|
|
4435
4826
|
* @description The URL of the Termite API endpoint (e.g., 'http://localhost:8080'). Can also be set via ANTFLY_TERMITE_URL environment variable.
|
|
@@ -4442,32 +4833,6 @@ interface components$1 {
|
|
|
4442
4833
|
* @example fixed
|
|
4443
4834
|
*/
|
|
4444
4835
|
model: string;
|
|
4445
|
-
/**
|
|
4446
|
-
* @description Target number of tokens per chunk. Chunker will aim for chunks around this size.
|
|
4447
|
-
* @default 500
|
|
4448
|
-
*/
|
|
4449
|
-
target_tokens?: number;
|
|
4450
|
-
/**
|
|
4451
|
-
* @description Number of tokens to overlap between consecutive chunks. Helps maintain context across chunk boundaries.
|
|
4452
|
-
* @default 50
|
|
4453
|
-
*/
|
|
4454
|
-
overlap_tokens?: number;
|
|
4455
|
-
/**
|
|
4456
|
-
* @description Separator string for splitting (e.g., '\n\n' for paragraphs). Only used with fixed strategy.
|
|
4457
|
-
* @default
|
|
4458
|
-
*/
|
|
4459
|
-
separator?: string;
|
|
4460
|
-
/**
|
|
4461
|
-
* @description Maximum number of chunks to generate per document. Prevents excessive chunking of very large documents.
|
|
4462
|
-
* @default 50
|
|
4463
|
-
*/
|
|
4464
|
-
max_chunks?: number;
|
|
4465
|
-
/**
|
|
4466
|
-
* Format: float
|
|
4467
|
-
* @description Minimum confidence threshold for separator detection. Only used with ONNX-based models.
|
|
4468
|
-
* @default 0.5
|
|
4469
|
-
*/
|
|
4470
|
-
threshold?: number;
|
|
4471
4836
|
/**
|
|
4472
4837
|
* @description Configuration for full-text indexing of chunks in Bleve.
|
|
4473
4838
|
* When present (even if empty), chunks will be stored with :cft: suffix and indexed in Bleve's _chunks field.
|
|
@@ -4503,27 +4868,7 @@ interface components$1 {
|
|
|
4503
4868
|
* "max_chunks": 50
|
|
4504
4869
|
* }
|
|
4505
4870
|
*/
|
|
4506
|
-
AntflyChunkerConfig: {
|
|
4507
|
-
/**
|
|
4508
|
-
* @description Target number of tokens per chunk. Chunker will aim for chunks around this size.
|
|
4509
|
-
* @default 500
|
|
4510
|
-
*/
|
|
4511
|
-
target_tokens?: number;
|
|
4512
|
-
/**
|
|
4513
|
-
* @description Number of tokens to overlap between consecutive chunks. Helps maintain context across chunk boundaries.
|
|
4514
|
-
* @default 50
|
|
4515
|
-
*/
|
|
4516
|
-
overlap_tokens?: number;
|
|
4517
|
-
/**
|
|
4518
|
-
* @description Separator string for splitting (e.g., '\n\n' for paragraphs).
|
|
4519
|
-
* @default
|
|
4520
|
-
*/
|
|
4521
|
-
separator?: string;
|
|
4522
|
-
/**
|
|
4523
|
-
* @description Maximum number of chunks to generate per document. Prevents excessive chunking of very large documents.
|
|
4524
|
-
* @default 50
|
|
4525
|
-
*/
|
|
4526
|
-
max_chunks?: number;
|
|
4871
|
+
AntflyChunkerConfig: components$1["schemas"]["ChunkOptions"] & {
|
|
4527
4872
|
/**
|
|
4528
4873
|
* @description Configuration for full-text indexing of chunks in Bleve.
|
|
4529
4874
|
* When present (even if empty), chunks will be stored with :cft: suffix and indexed in Bleve's _chunks field.
|
|
@@ -4840,6 +5185,88 @@ interface operations {
|
|
|
4840
5185
|
500: components$1["responses"]["InternalServerError"];
|
|
4841
5186
|
};
|
|
4842
5187
|
};
|
|
5188
|
+
backup: {
|
|
5189
|
+
parameters: {
|
|
5190
|
+
query?: never;
|
|
5191
|
+
header?: never;
|
|
5192
|
+
path?: never;
|
|
5193
|
+
cookie?: never;
|
|
5194
|
+
};
|
|
5195
|
+
requestBody: {
|
|
5196
|
+
content: {
|
|
5197
|
+
"application/json": components$1["schemas"]["ClusterBackupRequest"];
|
|
5198
|
+
};
|
|
5199
|
+
};
|
|
5200
|
+
responses: {
|
|
5201
|
+
/** @description Backup completed successfully */
|
|
5202
|
+
200: {
|
|
5203
|
+
headers: {
|
|
5204
|
+
[name: string]: unknown;
|
|
5205
|
+
};
|
|
5206
|
+
content: {
|
|
5207
|
+
"application/json": components$1["schemas"]["ClusterBackupResponse"];
|
|
5208
|
+
};
|
|
5209
|
+
};
|
|
5210
|
+
400: components$1["responses"]["BadRequest"];
|
|
5211
|
+
500: components$1["responses"]["InternalServerError"];
|
|
5212
|
+
};
|
|
5213
|
+
};
|
|
5214
|
+
restore: {
|
|
5215
|
+
parameters: {
|
|
5216
|
+
query?: never;
|
|
5217
|
+
header?: never;
|
|
5218
|
+
path?: never;
|
|
5219
|
+
cookie?: never;
|
|
5220
|
+
};
|
|
5221
|
+
requestBody: {
|
|
5222
|
+
content: {
|
|
5223
|
+
"application/json": components$1["schemas"]["ClusterRestoreRequest"];
|
|
5224
|
+
};
|
|
5225
|
+
};
|
|
5226
|
+
responses: {
|
|
5227
|
+
/** @description Restore triggered successfully */
|
|
5228
|
+
202: {
|
|
5229
|
+
headers: {
|
|
5230
|
+
[name: string]: unknown;
|
|
5231
|
+
};
|
|
5232
|
+
content: {
|
|
5233
|
+
"application/json": components$1["schemas"]["ClusterRestoreResponse"];
|
|
5234
|
+
};
|
|
5235
|
+
};
|
|
5236
|
+
400: components$1["responses"]["BadRequest"];
|
|
5237
|
+
500: components$1["responses"]["InternalServerError"];
|
|
5238
|
+
};
|
|
5239
|
+
};
|
|
5240
|
+
listBackups: {
|
|
5241
|
+
parameters: {
|
|
5242
|
+
query: {
|
|
5243
|
+
/**
|
|
5244
|
+
* @description Storage location to search for backups.
|
|
5245
|
+
* - Local filesystem: `file:///path/to/backup`
|
|
5246
|
+
* - Amazon S3: `s3://bucket-name/path/to/backup`
|
|
5247
|
+
* @example s3://mybucket/antfly-backups/
|
|
5248
|
+
*/
|
|
5249
|
+
location: string;
|
|
5250
|
+
};
|
|
5251
|
+
header?: never;
|
|
5252
|
+
path?: never;
|
|
5253
|
+
cookie?: never;
|
|
5254
|
+
};
|
|
5255
|
+
requestBody?: never;
|
|
5256
|
+
responses: {
|
|
5257
|
+
/** @description List of available backups */
|
|
5258
|
+
200: {
|
|
5259
|
+
headers: {
|
|
5260
|
+
[name: string]: unknown;
|
|
5261
|
+
};
|
|
5262
|
+
content: {
|
|
5263
|
+
"application/json": components$1["schemas"]["BackupListResponse"];
|
|
5264
|
+
};
|
|
5265
|
+
};
|
|
5266
|
+
400: components$1["responses"]["BadRequest"];
|
|
5267
|
+
500: components$1["responses"]["InternalServerError"];
|
|
5268
|
+
};
|
|
5269
|
+
};
|
|
4843
5270
|
globalQuery: {
|
|
4844
5271
|
parameters: {
|
|
4845
5272
|
query?: never;
|
|
@@ -5291,7 +5718,7 @@ interface operations {
|
|
|
5291
5718
|
};
|
|
5292
5719
|
};
|
|
5293
5720
|
};
|
|
5294
|
-
|
|
5721
|
+
batchWrite: {
|
|
5295
5722
|
parameters: {
|
|
5296
5723
|
query?: never;
|
|
5297
5724
|
header?: never;
|
|
@@ -5313,19 +5740,7 @@ interface operations {
|
|
|
5313
5740
|
[name: string]: unknown;
|
|
5314
5741
|
};
|
|
5315
5742
|
content: {
|
|
5316
|
-
"application/json":
|
|
5317
|
-
/** @description Number of documents successfully inserted */
|
|
5318
|
-
inserted?: number;
|
|
5319
|
-
/** @description Number of documents successfully deleted */
|
|
5320
|
-
deleted?: number;
|
|
5321
|
-
/** @description List of failed operations with error details */
|
|
5322
|
-
failed?: {
|
|
5323
|
-
/** @description The document ID that failed */
|
|
5324
|
-
id?: string;
|
|
5325
|
-
/** @description Error message for this failure */
|
|
5326
|
-
error?: string;
|
|
5327
|
-
}[];
|
|
5328
|
-
};
|
|
5743
|
+
"application/json": components$1["schemas"]["BatchResponse"];
|
|
5329
5744
|
};
|
|
5330
5745
|
};
|
|
5331
5746
|
400: components$1["responses"]["BadRequest"];
|
|
@@ -6325,6 +6740,10 @@ type ResourceType = components$1["schemas"]["ResourceType"];
|
|
|
6325
6740
|
type PermissionType = components$1["schemas"]["PermissionType"];
|
|
6326
6741
|
type BackupRequest = components$1["schemas"]["BackupRequest"];
|
|
6327
6742
|
type RestoreRequest = components$1["schemas"]["RestoreRequest"];
|
|
6743
|
+
type ScanKeysRequest = Omit<components$1["schemas"]["ScanKeysRequest"], "filter_query"> & {
|
|
6744
|
+
/** Full JSON Bleve filter query with proper type checking */
|
|
6745
|
+
filter_query?: BleveQuery;
|
|
6746
|
+
};
|
|
6328
6747
|
type DocumentSchema = components$1["schemas"]["DocumentSchema"];
|
|
6329
6748
|
type FacetOption = components$1["schemas"]["FacetOption"];
|
|
6330
6749
|
type FacetResult = components$1["schemas"]["FacetResult"];
|
|
@@ -6337,7 +6756,7 @@ type EmbedderProvider = components$1["schemas"]["EmbedderProvider"];
|
|
|
6337
6756
|
declare const embedderProviders: components$1["schemas"]["EmbedderProvider"][];
|
|
6338
6757
|
type GeneratorProvider = components$1["schemas"]["GeneratorProvider"];
|
|
6339
6758
|
declare const generatorProviders: components$1["schemas"]["GeneratorProvider"][];
|
|
6340
|
-
type
|
|
6759
|
+
type GenerateResult = components$1["schemas"]["GenerateResult"];
|
|
6341
6760
|
type RAGResult = components$1["schemas"]["RAGResult"];
|
|
6342
6761
|
type AnswerAgentRequest = components$1["schemas"]["AnswerAgentRequest"];
|
|
6343
6762
|
type AnswerAgentResult = components$1["schemas"]["AnswerAgentResult"];
|
|
@@ -6623,10 +7042,7 @@ declare class AntflyClient {
|
|
|
6623
7042
|
batch: (tableName: string, request: BatchRequest) => Promise<{
|
|
6624
7043
|
inserted?: number;
|
|
6625
7044
|
deleted?: number;
|
|
6626
|
-
|
|
6627
|
-
id?: string;
|
|
6628
|
-
error?: string;
|
|
6629
|
-
}[];
|
|
7045
|
+
transformed?: number;
|
|
6630
7046
|
}>;
|
|
6631
7047
|
/**
|
|
6632
7048
|
* Backup a table
|
|
@@ -6642,10 +7058,38 @@ declare class AntflyClient {
|
|
|
6642
7058
|
}>;
|
|
6643
7059
|
/**
|
|
6644
7060
|
* Lookup a specific key in a table
|
|
7061
|
+
* @param tableName - Name of the table
|
|
7062
|
+
* @param key - Key of the record to lookup
|
|
7063
|
+
* @param options - Optional parameters
|
|
7064
|
+
* @param options.fields - Comma-separated list of fields to include (e.g., "title,author,metadata.tags")
|
|
7065
|
+
*/
|
|
7066
|
+
lookup: (tableName: string, key: string, options?: {
|
|
7067
|
+
fields?: string;
|
|
7068
|
+
}) => Promise<{
|
|
7069
|
+
[key: string]: unknown;
|
|
7070
|
+
}>;
|
|
7071
|
+
/**
|
|
7072
|
+
* Scan keys in a table within a key range
|
|
7073
|
+
* Returns documents as an async iterable, streaming results as NDJSON.
|
|
7074
|
+
* @param tableName - Name of the table
|
|
7075
|
+
* @param request - Scan request with optional key range, field projection, and filtering
|
|
7076
|
+
* @returns AsyncGenerator yielding documents with their keys
|
|
6645
7077
|
*/
|
|
6646
|
-
|
|
7078
|
+
scan: (tableName: string, request?: ScanKeysRequest) => AsyncGenerator<{
|
|
7079
|
+
_key: string;
|
|
6647
7080
|
[key: string]: unknown;
|
|
6648
7081
|
}>;
|
|
7082
|
+
/**
|
|
7083
|
+
* Scan keys in a table and collect all results into an array
|
|
7084
|
+
* Convenience method that consumes the scan AsyncGenerator
|
|
7085
|
+
* @param tableName - Name of the table
|
|
7086
|
+
* @param request - Scan request with optional key range, field projection, and filtering
|
|
7087
|
+
* @returns Promise with array of all matching documents
|
|
7088
|
+
*/
|
|
7089
|
+
scanAll: (tableName: string, request?: ScanKeysRequest) => Promise<Array<{
|
|
7090
|
+
_key: string;
|
|
7091
|
+
[key: string]: unknown;
|
|
7092
|
+
}>>;
|
|
6649
7093
|
/**
|
|
6650
7094
|
* RAG (Retrieval-Augmented Generation) query on a specific table with streaming or citations
|
|
6651
7095
|
* @param tableName - Name of the table to query
|
|
@@ -6987,4 +7431,4 @@ declare function geoBoundingBox(field: string, bounds: {
|
|
|
6987
7431
|
* ```
|
|
6988
7432
|
*/
|
|
6989
7433
|
|
|
6990
|
-
export { type AnswerAgentRequest, type AnswerAgentResult, type AnswerAgentStreamCallbacks, type AnswerConfidence, AntflyClient, type AntflyConfig, type AntflyError, type AntflyType, type BackupRequest, type BatchRequest, type ChatAgentRequest, type ChatAgentResult, type ChatAgentSteps, type ChatAgentStreamCallbacks, type ChatMessage, type ChatMessageRole, type ChatToolCall, type ChatToolName, type ChatToolResult, type ChatToolsConfig, type ClarificationRequest, type ClassificationTransformationResult, type CreateTableRequest, type CreateUserRequest, type DocumentSchema, type EmbedderConfig, type EmbedderProvider, type FacetOption, type FacetResult, type FetchConfig, type FilterSpec, type GeneratorConfig, type GeneratorProvider, type IndexConfig, type IndexStatus, type Permission, type PermissionType, type QueryBuilderRequest, type QueryBuilderResult, type QueryHit, type QueryOptions, type QueryRequest, type QueryResponses, type QueryResult, type QueryStrategy, type RAGRequest, type RAGResult, type RAGStreamCallbacks, type RerankerConfig, type ResourceType, type ResponseData, type RestoreRequest, type RouteType, type SemanticQueryMode, type
|
|
7434
|
+
export { type AnswerAgentRequest, type AnswerAgentResult, type AnswerAgentStreamCallbacks, type AnswerConfidence, AntflyClient, type AntflyConfig, type AntflyError, type AntflyType, type BackupRequest, type BatchRequest, type ChatAgentRequest, type ChatAgentResult, type ChatAgentSteps, type ChatAgentStreamCallbacks, type ChatMessage, type ChatMessageRole, type ChatToolCall, type ChatToolName, type ChatToolResult, type ChatToolsConfig, type ClarificationRequest, type ClassificationTransformationResult, type CreateTableRequest, type CreateUserRequest, type DocumentSchema, type EmbedderConfig, type EmbedderProvider, type FacetOption, type FacetResult, type FetchConfig, type FilterSpec, type GenerateResult, type GeneratorConfig, type GeneratorProvider, type IndexConfig, type IndexStatus, type Permission, type PermissionType, type QueryBuilderRequest, type QueryBuilderResult, type QueryHit, type QueryOptions, type QueryRequest, type QueryResponses, type QueryResult, type QueryStrategy, type RAGRequest, type RAGResult, type RAGStreamCallbacks, type RerankerConfig, type ResourceType, type ResponseData, type RestoreRequest, type RouteType, type SemanticQueryMode, type Table, type TableSchema, type TableStatus, type TermFacetResult, type UpdatePasswordRequest, type User, type WebSearchConfig, type WebSearchResultItem, type components as bleve_components, boolean, type components$1 as components, conjunction, dateRange, AntflyClient as default, disjunction, docIds, embedderProviders, fuzzy, generatorProviders, geoBoundingBox, geoDistance, match, matchAll, matchNone, matchPhrase, numericRange, type operations, type paths, prefix, queryString, term };
|