@robosystems/client 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/sdk/sdk.gen.js CHANGED
@@ -2,7 +2,7 @@
2
2
  // This file is auto-generated by @hey-api/openapi-ts
3
3
  Object.defineProperty(exports, "__esModule", { value: true });
4
4
  exports.batchProcessQueries = exports.executeSpecificAgent = exports.autoSelectAgent = exports.syncConnection = exports.getConnection = exports.deleteConnection = exports.oauthCallback = exports.initOAuth = exports.createLinkToken = exports.exchangeLinkToken = exports.getConnectionOptions = exports.createConnection = exports.listConnections = exports.getRepositoryCredits = exports.getSharedRepositoryCredits = exports.cancelSharedRepositorySubscription = exports.upgradeSharedRepositorySubscription = exports.subscribeToSharedRepository = exports.getUserSharedSubscriptions = exports.getDetailedUserAnalytics = exports.getUserUsageOverview = exports.getSharedRepositoryLimits = exports.getAllSharedRepositoryLimits = exports.getUserUsage = exports.getUserLimits = exports.updateUserApiKey = exports.revokeUserApiKey = exports.createUserApiKey = exports.listUserApiKeys = exports.updateUserPassword = exports.getAllCreditSummaries = exports.updateUser = exports.getCurrentUser = exports.getServiceStatus = exports.getCaptchaConfig = exports.completeSsoAuth = exports.ssoTokenExchange = exports.generateSsoToken = exports.resetPassword = exports.validateResetToken = exports.forgotPassword = exports.checkPasswordStrength = exports.getPasswordPolicy = exports.verifyEmail = exports.resendVerificationEmail = exports.refreshAuthSession = exports.getCurrentAuthUser = exports.logoutUser = exports.loginUser = exports.registerUser = void 0;
5
- exports.cancelOperation = exports.getOperationStatus = exports.streamOperationEvents = exports.getServiceOfferings = exports.selectGraph = exports.getAvailableExtensions = exports.createGraph = exports.getGraphs = exports.queryTablesV1GraphsGraphIdTablesQueryPost = exports.ingestTablesV1GraphsGraphIdTablesIngestPost = exports.updateFileV1GraphsGraphIdTablesFilesFileIdPatch = exports.getFileInfoV1GraphsGraphIdTablesFilesFileIdGet = exports.deleteFileV1GraphsGraphIdTablesFilesFileIdDelete = exports.getUploadUrlV1GraphsGraphIdTablesTableNameFilesPost = exports.listTableFilesV1GraphsGraphIdTablesTableNameFilesGet = exports.listTablesV1GraphsGraphIdTablesGet = exports.getSubgraphQuota = exports.getSubgraphInfo = exports.deleteSubgraph = exports.createSubgraph = exports.listSubgraphs = exports.getGraphLimits = exports.getDatabaseInfo = exports.getDatabaseHealth = exports.checkStorageLimits = exports.getStorageUsage = exports.checkCreditBalance = exports.listCreditTransactions = exports.getCreditSummary = exports.getGraphMonthlyBill = exports.getGraphBillingHistory = exports.getGraphUsageDetails = exports.getCurrentGraphBill = exports.validateSchema = exports.exportGraphSchema = exports.getGraphSchema = exports.executeCypherQuery = exports.getGraphUsageStats = exports.getGraphMetrics = exports.getBackupStats = exports.restoreBackup = exports.getBackupDownloadUrl = exports.createBackup = exports.listBackups = exports.callMcpTool = exports.listMcpTools = exports.recommendAgent = exports.getAgentMetadata = exports.listAgents = void 0;
5
+ exports.cancelOperation = exports.getOperationStatus = exports.streamOperationEvents = exports.getServiceOfferings = exports.selectGraph = exports.getAvailableExtensions = exports.createGraph = exports.getGraphs = exports.queryTables = exports.ingestTables = exports.updateFileStatus = exports.getFileInfo = exports.deleteFile = exports.getUploadUrl = exports.listTableFiles = exports.listTables = exports.getSubgraphQuota = exports.getSubgraphInfo = exports.deleteSubgraph = exports.createSubgraph = exports.listSubgraphs = exports.getGraphLimits = exports.getDatabaseInfo = exports.getDatabaseHealth = exports.checkStorageLimits = exports.getStorageUsage = exports.checkCreditBalance = exports.listCreditTransactions = exports.getCreditSummary = exports.getGraphMonthlyBill = exports.getGraphBillingHistory = exports.getGraphUsageDetails = exports.getCurrentGraphBill = exports.validateSchema = exports.exportGraphSchema = exports.getGraphSchema = exports.executeCypherQuery = exports.getGraphUsageStats = exports.getGraphMetrics = exports.getBackupStats = exports.restoreBackup = exports.getBackupDownloadUrl = exports.createBackup = exports.listBackups = exports.callMcpTool = exports.listMcpTools = exports.recommendAgent = exports.getAgentMetadata = exports.listAgents = void 0;
6
6
  const client_gen_1 = require("./client.gen");
7
7
  /**
8
8
  * Register New User
@@ -1277,9 +1277,8 @@ exports.recommendAgent = recommendAgent;
1277
1277
  * - User permissions and subscription tier
1278
1278
  * - Backend capabilities (Kuzu, Neo4j, etc.)
1279
1279
  *
1280
- * Credit consumption:
1281
- * - Listing tools is included to encourage exploration
1282
- * - Tool execution costs vary by operation complexity
1280
+ * **Note:**
1281
+ * MCP tool listing is included - no credit consumption required.
1283
1282
  */
1284
1283
  const listMcpTools = (options) => {
1285
1284
  return (options.client ?? client_gen_1.client).get({
@@ -1332,8 +1331,11 @@ exports.listMcpTools = listMcpTools;
1332
1331
  * - `408 Request Timeout`: Tool execution exceeded timeout
1333
1332
  * - Clients should implement exponential backoff on errors
1334
1333
  *
1335
- * **Note:**
1336
- * MCP tool calls are included and do not consume credits.
1334
+ * **Credit Model:**
1335
+ * MCP tool execution is included - no credit consumption required. Database
1336
+ * operations (queries, schema inspection, analytics) are completely free.
1337
+ * Only AI operations that invoke Claude or other LLM APIs consume credits,
1338
+ * which happens at the AI agent layer, not the MCP tool layer.
1337
1339
  */
1338
1340
  const callMcpTool = (options) => {
1339
1341
  return (options.client ?? client_gen_1.client).post({
@@ -1640,6 +1642,13 @@ exports.getGraphUsageStats = getGraphUsageStats;
1640
1642
  * 1. Create file upload: `POST /v1/graphs/{graph_id}/tables/{table_name}/files`
1641
1643
  * 2. Ingest to graph: `POST /v1/graphs/{graph_id}/tables/ingest`
1642
1644
  *
1645
+ * **Security Best Practice - Use Parameterized Queries:**
1646
+ * ALWAYS use query parameters instead of string interpolation to prevent injection attacks:
1647
+ * - ✅ SAFE: `MATCH (n:Entity {type: $entity_type}) RETURN n` with `parameters: {"entity_type": "Company"}`
1648
+ * - ❌ UNSAFE: `MATCH (n:Entity {type: "Company"}) RETURN n` with user input concatenated into query string
1649
+ *
1650
+ * Query parameters provide automatic escaping and type safety. All examples in this API use parameterized queries.
1651
+ *
1643
1652
  * This endpoint automatically selects the best execution strategy based on:
1644
1653
  * - Query characteristics (size, complexity)
1645
1654
  * - Client capabilities (SSE, NDJSON, JSON)
@@ -1711,13 +1720,38 @@ exports.executeCypherQuery = executeCypherQuery;
1711
1720
  * Get Runtime Graph Schema
1712
1721
  * Get runtime schema information for the specified graph database.
1713
1722
  *
1714
- * This endpoint inspects the actual graph database structure and returns:
1723
+ * ## What This Returns
1724
+ *
1725
+ * This endpoint inspects the **actual current state** of the graph database and returns:
1715
1726
  * - **Node Labels**: All node types currently in the database
1716
1727
  * - **Relationship Types**: All relationship types currently in the database
1717
- * - **Node Properties**: Properties for each node type (limited to first 10 for performance)
1728
+ * - **Node Properties**: Properties discovered from actual data (up to 10 properties per node type)
1729
+ *
1730
+ * ## Runtime vs Declared Schema
1731
+ *
1732
+ * **Use this endpoint** (`/schema`) when you need to know:
1733
+ * - What data is ACTUALLY in the database right now
1734
+ * - What properties exist on real nodes
1735
+ * - What relationships have been created
1736
+ * - Current database structure for querying
1718
1737
  *
1719
- * This shows what actually exists in the database right now - the runtime state.
1720
- * For the declared schema definition, use GET /schema/export instead.
1738
+ * **Use `/schema/export` instead** when you need:
1739
+ * - The original schema definition used to create the graph
1740
+ * - Schema in a specific format (JSON, YAML, Cypher DDL)
1741
+ * - Schema for documentation or version control
1742
+ * - Schema to replicate in another graph
1743
+ *
1744
+ * ## Example Use Cases
1745
+ *
1746
+ * - **Building queries**: See what node labels and properties exist to write accurate Cypher
1747
+ * - **Data exploration**: Discover what's in an unfamiliar graph
1748
+ * - **Schema drift detection**: Compare runtime vs declared schema
1749
+ * - **API integration**: Dynamically adapt to current graph structure
1750
+ *
1751
+ * ## Performance Note
1752
+ *
1753
+ * Property discovery is limited to 10 properties per node type for performance.
1754
+ * For complete schema definitions, use `/schema/export`.
1721
1755
  *
1722
1756
  * This operation is included - no credit consumption required.
1723
1757
  */
@@ -1739,8 +1773,54 @@ const getGraphSchema = (options) => {
1739
1773
  };
1740
1774
  exports.getGraphSchema = getGraphSchema;
1741
1775
  /**
1742
- * Export Graph Schema
1743
- * Export the schema of an existing graph in JSON, YAML, or Cypher format
1776
+ * Export Declared Graph Schema
1777
+ * Export the declared schema definition of an existing graph.
1778
+ *
1779
+ * ## What This Returns
1780
+ *
1781
+ * This endpoint returns the **original schema definition** that was used to create the graph:
1782
+ * - The schema as it was **declared** during graph creation
1783
+ * - Complete node and relationship definitions
1784
+ * - Property types and constraints
1785
+ * - Schema metadata (name, version, type)
1786
+ *
1787
+ * ## Runtime vs Declared Schema
1788
+ *
1789
+ * **Use this endpoint** (`/schema/export`) when you need:
1790
+ * - The original schema definition used to create the graph
1791
+ * - Schema in a specific format (JSON, YAML, Cypher DDL)
1792
+ * - Schema for documentation or version control
1793
+ * - Schema to replicate in another graph
1794
+ *
1795
+ * **Use `/schema` instead** when you need:
1796
+ * - What data is ACTUALLY in the database right now
1797
+ * - What properties exist on real nodes (discovered from data)
1798
+ * - Current runtime database structure for querying
1799
+ *
1800
+ * ## Export Formats
1801
+ *
1802
+ * ### JSON Format (`format=json`)
1803
+ * Returns structured JSON with nodes, relationships, and properties.
1804
+ * Best for programmatic access and API integration.
1805
+ *
1806
+ * ### YAML Format (`format=yaml`)
1807
+ * Returns human-readable YAML with comments.
1808
+ * Best for documentation and configuration management.
1809
+ *
1810
+ * ### Cypher DDL Format (`format=cypher`)
1811
+ * Returns Cypher CREATE statements for recreating the schema.
1812
+ * Best for database migration and replication.
1813
+ *
1814
+ * ## Data Statistics
1815
+ *
1816
+ * Set `include_data_stats=true` to include:
1817
+ * - Node counts by label
1818
+ * - Relationship counts by type
1819
+ * - Total nodes and relationships
1820
+ *
1821
+ * This combines declared schema with runtime statistics.
1822
+ *
1823
+ * This operation is included - no credit consumption required.
1744
1824
  */
1745
1825
  const exportGraphSchema = (options) => {
1746
1826
  return (options.client ?? client_gen_1.client).get({
@@ -2389,9 +2469,42 @@ const getSubgraphQuota = (options) => {
2389
2469
  exports.getSubgraphQuota = getSubgraphQuota;
2390
2470
  /**
2391
2471
  * List Staging Tables
2392
- * List all DuckDB staging tables for a graph
2472
+ * List all DuckDB staging tables with comprehensive metrics and status.
2473
+ *
2474
+ * Get a complete inventory of all staging tables for a graph, including
2475
+ * file counts, storage sizes, and row estimates. Essential for monitoring
2476
+ * the data pipeline and determining which tables are ready for ingestion.
2477
+ *
2478
+ * **Returned Metrics:**
2479
+ * - Table name and type (node/relationship)
2480
+ * - File count per table
2481
+ * - Total storage size in bytes
2482
+ * - Estimated row count
2483
+ * - S3 location pattern
2484
+ * - Ready-for-ingestion status
2485
+ *
2486
+ * **Use Cases:**
2487
+ * - Monitor data upload progress
2488
+ * - Check which tables have files ready
2489
+ * - Track storage consumption
2490
+ * - Validate pipeline before ingestion
2491
+ * - Capacity planning
2492
+ *
2493
+ * **Workflow:**
2494
+ * 1. List tables to see current state
2495
+ * 2. Upload files to empty tables
2496
+ * 3. Re-list to verify uploads
2497
+ * 4. Check file counts and sizes
2498
+ * 5. Ingest when ready
2499
+ *
2500
+ * **Important Notes:**
2501
+ * - Tables with `file_count > 0` have data ready
2502
+ * - Check `total_size_bytes` for storage monitoring
2503
+ * - Use `s3_location` to verify upload paths
2504
+ * - Empty tables (file_count=0) are skipped during ingestion
2505
+ * - Table queries are included - no credit consumption
2393
2506
  */
2394
- const listTablesV1GraphsGraphIdTablesGet = (options) => {
2507
+ const listTables = (options) => {
2395
2508
  return (options.client ?? client_gen_1.client).get({
2396
2509
  security: [
2397
2510
  {
@@ -2407,12 +2520,45 @@ const listTablesV1GraphsGraphIdTablesGet = (options) => {
2407
2520
  ...options
2408
2521
  });
2409
2522
  };
2410
- exports.listTablesV1GraphsGraphIdTablesGet = listTablesV1GraphsGraphIdTablesGet;
2523
+ exports.listTables = listTables;
2411
2524
  /**
2412
- * List Files in Table
2413
- * List all files uploaded to a staging table
2525
+ * List Files in Staging Table
2526
+ * List all files uploaded to a staging table with comprehensive metadata.
2527
+ *
2528
+ * Get a complete inventory of all files in a staging table, including upload status,
2529
+ * file sizes, row counts, and S3 locations. Essential for monitoring upload progress
2530
+ * and validating data before ingestion.
2531
+ *
2532
+ * **Use Cases:**
2533
+ * - Monitor file upload progress
2534
+ * - Verify files are ready for ingestion
2535
+ * - Check file formats and sizes
2536
+ * - Track storage usage per table
2537
+ * - Identify failed or incomplete uploads
2538
+ * - Pre-ingestion validation
2539
+ *
2540
+ * **Returned Metadata:**
2541
+ * - File ID, name, and format (parquet, csv, json)
2542
+ * - Size in bytes and row count (if available)
2543
+ * - Upload status and method
2544
+ * - Creation and upload timestamps
2545
+ * - S3 key for reference
2546
+ *
2547
+ * **Upload Status Values:**
2548
+ * - `pending`: Upload URL generated, awaiting upload
2549
+ * - `uploaded`: Successfully uploaded, ready for ingestion
2550
+ * - `disabled`: Excluded from ingestion
2551
+ * - `archived`: Soft deleted
2552
+ * - `failed`: Upload failed
2553
+ *
2554
+ * **Important Notes:**
2555
+ * - Only `uploaded` files are ingested
2556
+ * - Check `row_count` to estimate data volume
2557
+ * - Use `total_size_bytes` for storage monitoring
2558
+ * - Files with `failed` status should be deleted and re-uploaded
2559
+ * - File listing is included - no credit consumption
2414
2560
  */
2415
- const listTableFilesV1GraphsGraphIdTablesTableNameFilesGet = (options) => {
2561
+ const listTableFiles = (options) => {
2416
2562
  return (options.client ?? client_gen_1.client).get({
2417
2563
  security: [
2418
2564
  {
@@ -2428,12 +2574,44 @@ const listTableFilesV1GraphsGraphIdTablesTableNameFilesGet = (options) => {
2428
2574
  ...options
2429
2575
  });
2430
2576
  };
2431
- exports.listTableFilesV1GraphsGraphIdTablesTableNameFilesGet = listTableFilesV1GraphsGraphIdTablesTableNameFilesGet;
2577
+ exports.listTableFiles = listTableFiles;
2432
2578
  /**
2433
- * Create File Upload
2434
- * Create a new file upload for a table and get a presigned S3 URL
2579
+ * Get File Upload URL
2580
+ * Generate a presigned S3 URL for secure file upload.
2581
+ *
2582
+ * Initiates file upload to a staging table by generating a secure, time-limited
2583
+ * presigned S3 URL. Files are uploaded directly to S3, bypassing the API for
2584
+ * optimal performance.
2585
+ *
2586
+ * **Upload Workflow:**
2587
+ * 1. Call this endpoint to get presigned URL
2588
+ * 2. PUT file directly to S3 URL
2589
+ * 3. Call PATCH /tables/files/{file_id} with status='uploaded'
2590
+ * 4. Backend validates file and calculates metrics
2591
+ * 5. File ready for ingestion
2592
+ *
2593
+ * **Supported Formats:**
2594
+ * - Parquet (`application/x-parquet` with `.parquet` extension)
2595
+ * - CSV (`text/csv` with `.csv` extension)
2596
+ * - JSON (`application/json` with `.json` extension)
2597
+ *
2598
+ * **Validation:**
2599
+ * - File extension must match content type
2600
+ * - File name 1-255 characters
2601
+ * - No path traversal characters (.. / \)
2602
+ * - Auto-creates table if it doesn't exist
2603
+ *
2604
+ * **Auto-Table Creation:**
2605
+ * Tables are automatically created on first file upload with type inferred from name
2606
+ * (e.g., "Transaction" → relationship) and empty schema populated during ingestion.
2607
+ *
2608
+ * **Important Notes:**
2609
+ * - Presigned URLs expire (default: 1 hour)
2610
+ * - Use appropriate Content-Type header when uploading to S3
2611
+ * - File extension must match content type
2612
+ * - Upload URL generation is included - no credit consumption
2435
2613
  */
2436
- const getUploadUrlV1GraphsGraphIdTablesTableNameFilesPost = (options) => {
2614
+ const getUploadUrl = (options) => {
2437
2615
  return (options.client ?? client_gen_1.client).post({
2438
2616
  security: [
2439
2617
  {
@@ -2453,12 +2631,42 @@ const getUploadUrlV1GraphsGraphIdTablesTableNameFilesPost = (options) => {
2453
2631
  }
2454
2632
  });
2455
2633
  };
2456
- exports.getUploadUrlV1GraphsGraphIdTablesTableNameFilesPost = getUploadUrlV1GraphsGraphIdTablesTableNameFilesPost;
2634
+ exports.getUploadUrl = getUploadUrl;
2457
2635
  /**
2458
- * Delete File
2459
- * Delete a specific file from S3 and database tracking. DuckDB will automatically exclude it from queries.
2636
+ * Delete File from Staging
2637
+ * Delete a file from S3 storage and database tracking.
2638
+ *
2639
+ * Remove unwanted, duplicate, or incorrect files from staging tables before ingestion.
2640
+ * The file is deleted from both S3 and database tracking, and table statistics
2641
+ * are automatically recalculated.
2642
+ *
2643
+ * **Use Cases:**
2644
+ * - Remove duplicate uploads
2645
+ * - Delete files with incorrect data
2646
+ * - Clean up failed uploads
2647
+ * - Fix data quality issues before ingestion
2648
+ * - Manage storage usage
2649
+ *
2650
+ * **What Happens:**
2651
+ * 1. File deleted from S3 storage
2652
+ * 2. Database tracking record removed
2653
+ * 3. Table statistics recalculated (file count, size, row count)
2654
+ * 4. DuckDB automatically excludes file from future queries
2655
+ *
2656
+ * **Security:**
2657
+ * - Write access required (verified via auth)
2658
+ * - Shared repositories block file deletions
2659
+ * - Full audit trail of deletion operations
2660
+ * - Cannot delete after ingestion to graph
2661
+ *
2662
+ * **Important Notes:**
2663
+ * - Delete files before ingestion for best results
2664
+ * - Table statistics update automatically
2665
+ * - No need to refresh DuckDB - exclusion is automatic
2666
+ * - Consider re-uploading corrected version after deletion
2667
+ * - File deletion is included - no credit consumption
2460
2668
  */
2461
- const deleteFileV1GraphsGraphIdTablesFilesFileIdDelete = (options) => {
2669
+ const deleteFile = (options) => {
2462
2670
  return (options.client ?? client_gen_1.client).delete({
2463
2671
  security: [
2464
2672
  {
@@ -2474,12 +2682,26 @@ const deleteFileV1GraphsGraphIdTablesFilesFileIdDelete = (options) => {
2474
2682
  ...options
2475
2683
  });
2476
2684
  };
2477
- exports.deleteFileV1GraphsGraphIdTablesFilesFileIdDelete = deleteFileV1GraphsGraphIdTablesFilesFileIdDelete;
2685
+ exports.deleteFile = deleteFile;
2478
2686
  /**
2479
- * Get File Info
2480
- * Get detailed information about a specific file
2687
+ * Get File Information
2688
+ * Get detailed information about a specific file.
2689
+ *
2690
+ * Retrieve comprehensive metadata for a single file, including upload status,
2691
+ * size, row count, and timestamps. Useful for validating individual files
2692
+ * before ingestion.
2693
+ *
2694
+ * **Use Cases:**
2695
+ * - Validate file upload completion
2696
+ * - Check file metadata before ingestion
2697
+ * - Debug upload issues
2698
+ * - Verify file format and size
2699
+ * - Track file lifecycle
2700
+ *
2701
+ * **Note:**
2702
+ * File info retrieval is included - no credit consumption
2481
2703
  */
2482
- const getFileInfoV1GraphsGraphIdTablesFilesFileIdGet = (options) => {
2704
+ const getFileInfo = (options) => {
2483
2705
  return (options.client ?? client_gen_1.client).get({
2484
2706
  security: [
2485
2707
  {
@@ -2495,12 +2717,47 @@ const getFileInfoV1GraphsGraphIdTablesFilesFileIdGet = (options) => {
2495
2717
  ...options
2496
2718
  });
2497
2719
  };
2498
- exports.getFileInfoV1GraphsGraphIdTablesFilesFileIdGet = getFileInfoV1GraphsGraphIdTablesFilesFileIdGet;
2720
+ exports.getFileInfo = getFileInfo;
2499
2721
  /**
2500
- * Update File
2501
- * Update file metadata after upload (size, row count). Marks file as completed.
2722
+ * Update File Upload Status
2723
+ * Update file status after upload completes.
2724
+ *
2725
+ * Marks files as uploaded after successful S3 upload. The backend validates
2726
+ * the file, calculates size and row count, enforces storage limits, and
2727
+ * registers the DuckDB table for queries.
2728
+ *
2729
+ * **Status Values:**
2730
+ * - `uploaded`: File successfully uploaded to S3 (triggers validation)
2731
+ * - `disabled`: Exclude file from ingestion
2732
+ * - `archived`: Soft delete file
2733
+ *
2734
+ * **What Happens on 'uploaded' Status:**
2735
+ * 1. Verify file exists in S3
2736
+ * 2. Calculate actual file size
2737
+ * 3. Enforce tier storage limits
2738
+ * 4. Calculate or estimate row count
2739
+ * 5. Update table statistics
2740
+ * 6. Register DuckDB external table
2741
+ * 7. File ready for ingestion
2742
+ *
2743
+ * **Row Count Calculation:**
2744
+ * - **Parquet**: Exact count from file metadata
2745
+ * - **CSV**: Count rows (minus header)
2746
+ * - **JSON**: Count array elements
2747
+ * - **Fallback**: Estimate from file size if reading fails
2748
+ *
2749
+ * **Storage Limits:**
2750
+ * Enforced per subscription tier. Returns HTTP 413 if limit exceeded.
2751
+ * Check current usage before large uploads.
2752
+ *
2753
+ * **Important Notes:**
2754
+ * - Always call this after S3 upload completes
2755
+ * - Check response for actual row count
2756
+ * - Storage limit errors (413) mean tier upgrade needed
2757
+ * - DuckDB registration failures are non-fatal (retried later)
2758
+ * - Status updates are included - no credit consumption
2502
2759
  */
2503
- const updateFileV1GraphsGraphIdTablesFilesFileIdPatch = (options) => {
2760
+ const updateFileStatus = (options) => {
2504
2761
  return (options.client ?? client_gen_1.client).patch({
2505
2762
  security: [
2506
2763
  {
@@ -2520,12 +2777,66 @@ const updateFileV1GraphsGraphIdTablesFilesFileIdPatch = (options) => {
2520
2777
  }
2521
2778
  });
2522
2779
  };
2523
- exports.updateFileV1GraphsGraphIdTablesFilesFileIdPatch = updateFileV1GraphsGraphIdTablesFilesFileIdPatch;
2780
+ exports.updateFileStatus = updateFileStatus;
2524
2781
  /**
2525
2782
  * Ingest Tables to Graph
2526
- * Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database. Use rebuild=true to regenerate the entire graph from scratch (safe operation - S3 is source of truth).
2527
- */
2528
- const ingestTablesV1GraphsGraphIdTablesIngestPost = (options) => {
2783
+ * Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
2784
+ *
2785
+ * Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
2786
+ * Processes all tables in a single bulk operation with comprehensive error handling and metrics.
2787
+ *
2788
+ * **Use Cases:**
2789
+ * - Initial graph population from uploaded data
2790
+ * - Incremental data updates with new files
2791
+ * - Complete database rebuild from source files
2792
+ * - Recovery from failed ingestion attempts
2793
+ *
2794
+ * **Workflow:**
2795
+ * 1. Upload data files via `POST /tables/{table_name}/files`
2796
+ * 2. Files are validated and marked as 'uploaded'
2797
+ * 3. Trigger ingestion: `POST /tables/ingest`
2798
+ * 4. DuckDB staging tables created from S3 patterns
2799
+ * 5. Data copied row-by-row from DuckDB to Kuzu
2800
+ * 6. Per-table results and metrics returned
2801
+ *
2802
+ * **Rebuild Feature:**
2803
+ * Setting `rebuild=true` regenerates the entire graph database from scratch:
2804
+ * - Deletes existing Kuzu database
2805
+ * - Recreates with fresh schema from active GraphSchema
2806
+ * - Ingests all data files
2807
+ * - Safe operation - S3 is source of truth
2808
+ * - Useful for schema changes or data corrections
2809
+ * - Graph marked as 'rebuilding' during process
2810
+ *
2811
+ * **Error Handling:**
2812
+ * - Per-table error isolation with `ignore_errors` flag
2813
+ * - Partial success support (some tables succeed, some fail)
2814
+ * - Detailed error reporting per table
2815
+ * - Graph status tracking throughout process
2816
+ * - Automatic failure recovery and cleanup
2817
+ *
2818
+ * **Performance:**
2819
+ * - Processes all tables in sequence
2820
+ * - Each table timed independently
2821
+ * - Total execution metrics provided
2822
+ * - Scales to thousands of files
2823
+ * - Optimized for large datasets
2824
+ *
2825
+ * **Concurrency Control:**
2826
+ * Only one ingestion can run per graph at a time. If another ingestion is in progress,
2827
+ * you'll receive a 409 Conflict error. The distributed lock automatically expires after
2828
+ * the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
2829
+ *
2830
+ * **Important Notes:**
2831
+ * - Only files with 'uploaded' status are processed
2832
+ * - Tables with no uploaded files are skipped
2833
+ * - Use `ignore_errors=false` for strict validation
2834
+ * - Monitor progress via per-table results
2835
+ * - Check graph metadata for rebuild status
2836
+ * - Wait for current ingestion to complete before starting another
2837
+ * - Table ingestion is included - no credit consumption
2838
+ */
2839
+ const ingestTables = (options) => {
2529
2840
  return (options.client ?? client_gen_1.client).post({
2530
2841
  security: [
2531
2842
  {
@@ -2545,12 +2856,61 @@ const ingestTablesV1GraphsGraphIdTablesIngestPost = (options) => {
2545
2856
  }
2546
2857
  });
2547
2858
  };
2548
- exports.ingestTablesV1GraphsGraphIdTablesIngestPost = ingestTablesV1GraphsGraphIdTablesIngestPost;
2859
+ exports.ingestTables = ingestTables;
2549
2860
  /**
2550
2861
  * Query Staging Tables with SQL
2551
- * Execute SQL queries on DuckDB staging tables
2862
+ * Execute SQL queries on DuckDB staging tables for data inspection and validation.
2863
+ *
2864
+ * Query raw staging data directly with SQL before ingestion into the graph database.
2865
+ * Useful for data quality checks, validation, and exploratory analysis.
2866
+ *
2867
+ * **Security Best Practice - Use Parameterized Queries:**
2868
+ * ALWAYS use query parameters instead of string concatenation to prevent SQL injection:
2869
+ * - ✅ SAFE: `SELECT * FROM Entity WHERE type = ? LIMIT ?` with `parameters: ["Company", 100]`
2870
+ * - ❌ UNSAFE: `SELECT * FROM Entity WHERE type = 'Company' LIMIT 100` with user input concatenated into SQL string
2871
+ *
2872
+ * Query parameters provide automatic escaping and type safety. Use `?` placeholders with parameters array.
2873
+ *
2874
+ * **Use Cases:**
2875
+ * - Validate data quality before graph ingestion
2876
+ * - Inspect row-level data for debugging
2877
+ * - Run analytics on staging tables
2878
+ * - Check for duplicates, nulls, or data issues
2879
+ * - Preview data transformations
2880
+ *
2881
+ * **Workflow:**
2882
+ * 1. Upload data files via `POST /tables/{table_name}/files`
2883
+ * 2. Query staging tables to validate: `POST /tables/query`
2884
+ * 3. Fix any data issues by re-uploading
2885
+ * 4. Ingest validated data: `POST /tables/ingest`
2886
+ *
2887
+ * **Supported SQL:**
2888
+ * - Full DuckDB SQL syntax
2889
+ * - SELECT, JOIN, WHERE, GROUP BY, ORDER BY
2890
+ * - Aggregations, window functions, CTEs
2891
+ * - Multiple table joins across staging area
2892
+ *
2893
+ * **Common Operations:**
2894
+ * - Count rows: `SELECT COUNT(*) FROM Entity`
2895
+ * - Filter by type: `SELECT * FROM Entity WHERE entity_type = ? LIMIT ?` with `parameters: ["Company", 100]`
2896
+ * - Check for nulls: `SELECT * FROM Entity WHERE name IS NULL LIMIT 10`
2897
+ * - Find duplicates: `SELECT identifier, COUNT(*) as cnt FROM Entity GROUP BY identifier HAVING COUNT(*) > 1`
2898
+ * - Filter amounts: `SELECT * FROM Transaction WHERE amount > ? AND date >= ?` with `parameters: [1000, "2024-01-01"]`
2899
+ *
2900
+ * **Limits:**
2901
+ * - Query timeout: 30 seconds
2902
+ * - Result limit: 10,000 rows (use LIMIT clause)
2903
+ * - Read-only: No INSERT, UPDATE, DELETE
2904
+ * - User's tables only: Cannot query other users' data
2905
+ *
2906
+ * **Shared Repositories:**
2907
+ * Shared repositories (SEC, etc.) do not allow direct SQL queries.
2908
+ * Use the graph query endpoint instead: `POST /v1/graphs/{graph_id}/query`
2909
+ *
2910
+ * **Note:**
2911
+ * Staging table queries are included - no credit consumption
2552
2912
  */
2553
- const queryTablesV1GraphsGraphIdTablesQueryPost = (options) => {
2913
+ const queryTables = (options) => {
2554
2914
  return (options.client ?? client_gen_1.client).post({
2555
2915
  security: [
2556
2916
  {
@@ -2570,10 +2930,43 @@ const queryTablesV1GraphsGraphIdTablesQueryPost = (options) => {
2570
2930
  }
2571
2931
  });
2572
2932
  };
2573
- exports.queryTablesV1GraphsGraphIdTablesQueryPost = queryTablesV1GraphsGraphIdTablesQueryPost;
2933
+ exports.queryTables = queryTables;
2574
2934
  /**
2575
2935
  * Get User Graphs
2576
- * Get all graph databases accessible to the current user.
2936
+ * List all graph databases accessible to the current user with roles and selection status.
2937
+ *
2938
+ * Returns a comprehensive list of all graphs the user can access, including their
2939
+ * role in each graph (admin or member) and which graph is currently selected as
2940
+ * the active workspace.
2941
+ *
2942
+ * **Returned Information:**
2943
+ * - Graph ID and display name for each accessible graph
2944
+ * - User's role (admin/member) indicating permission level
2945
+ * - Selection status (one graph can be marked as "selected")
2946
+ * - Creation timestamp for each graph
2947
+ *
2948
+ * **Graph Roles:**
2949
+ * - `admin`: Full access - can manage graph settings, invite users, delete graph
2950
+ * - `member`: Read/write access - can query and modify data, cannot manage settings
2951
+ *
2952
+ * **Selected Graph Concept:**
2953
+ * The "selected" graph is the user's currently active workspace. Many API operations
2954
+ * default to the selected graph if no graph_id is provided. Users can change their
2955
+ * selected graph via the `POST /v1/graphs/{graph_id}/select` endpoint.
2956
+ *
2957
+ * **Use Cases:**
2958
+ * - Display graph selector in UI
2959
+ * - Show user's accessible workspaces
2960
+ * - Identify which graph is currently active
2961
+ * - Filter graphs by role for permission-based features
2962
+ *
2963
+ * **Empty Response:**
2964
+ * New users or users without graph access will receive an empty list with
2965
+ * `selectedGraphId: null`. Users should create a new graph or request access
2966
+ * to an existing graph.
2967
+ *
2968
+ * **Note:**
2969
+ * Graph listing is included - no credit consumption required.
2577
2970
  */
2578
2971
  const getGraphs = (options) => {
2579
2972
  return (options?.client ?? client_gen_1.client).get({
@@ -2658,7 +3051,34 @@ const createGraph = (options) => {
2658
3051
  exports.createGraph = createGraph;
2659
3052
  /**
2660
3053
  * Get Available Schema Extensions
2661
- * List all available schema extensions for graph creation
3054
+ * List all available schema extensions for graph creation.
3055
+ *
3056
+ * Schema extensions provide pre-built industry-specific data models that extend
3057
+ * the base graph schema with specialized nodes, relationships, and properties.
3058
+ *
3059
+ * **Available Extensions:**
3060
+ * - **RoboLedger**: Complete accounting system with XBRL reporting, general ledger, and financial statements
3061
+ * - **RoboInvestor**: Investment portfolio management and tracking
3062
+ * - **RoboSCM**: Supply chain management and logistics
3063
+ * - **RoboFO**: Front office operations and CRM
3064
+ * - **RoboHRM**: Human resources management
3065
+ * - **RoboEPM**: Enterprise performance management
3066
+ * - **RoboReport**: Business intelligence and reporting
3067
+ *
3068
+ * **Extension Information:**
3069
+ * Each extension includes:
3070
+ * - Display name and description
3071
+ * - Node and relationship counts
3072
+ * - Context-aware capabilities (e.g., SEC repositories get different features than entity graphs)
3073
+ *
3074
+ * **Use Cases:**
3075
+ * - Browse available extensions before creating a graph
3076
+ * - Understand extension capabilities and data models
3077
+ * - Plan graph schema based on business requirements
3078
+ * - Combine multiple extensions for comprehensive data modeling
3079
+ *
3080
+ * **Note:**
3081
+ * Extension listing is included - no credit consumption required.
2662
3082
  */
2663
3083
  const getAvailableExtensions = (options) => {
2664
3084
  return (options?.client ?? client_gen_1.client).get({
@@ -2679,7 +3099,35 @@ const getAvailableExtensions = (options) => {
2679
3099
  exports.getAvailableExtensions = getAvailableExtensions;
2680
3100
  /**
2681
3101
  * Select Graph
2682
- * Select a specific graph as the active graph for the user.
3102
+ * Select a specific graph as the active workspace for the user.
3103
+ *
3104
+ * The selected graph becomes the default context for operations in client applications
3105
+ * and can be used to maintain user workspace preferences across sessions.
3106
+ *
3107
+ * **Functionality:**
3108
+ * - Sets the specified graph as the user's currently selected graph
3109
+ * - Deselects any previously selected graph (only one can be selected at a time)
3110
+ * - Persists selection across sessions until changed
3111
+ * - Returns confirmation with the selected graph ID
3112
+ *
3113
+ * **Requirements:**
3114
+ * - User must have access to the graph (as admin or member)
3115
+ * - Graph must exist and not be deleted
3116
+ * - User can only select graphs they have permission to access
3117
+ *
3118
+ * **Use Cases:**
3119
+ * - Switch between multiple graphs in a multi-graph environment
3120
+ * - Set default workspace after creating a new graph
3121
+ * - Restore user's preferred workspace on login
3122
+ * - Support graph context switching in client applications
3123
+ *
3124
+ * **Client Integration:**
3125
+ * Many client operations can default to the selected graph, simplifying API calls
3126
+ * by eliminating the need to specify graph_id repeatedly. Check the selected
3127
+ * graph with `GET /v1/graphs` which returns `selectedGraphId`.
3128
+ *
3129
+ * **Note:**
3130
+ * Graph selection is included - no credit consumption required.
2683
3131
  */
2684
3132
  const selectGraph = (options) => {
2685
3133
  return (options.client ?? client_gen_1.client).post({