@robosystems/client 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/sdk.gen.js CHANGED
@@ -1277,9 +1277,8 @@ exports.recommendAgent = recommendAgent;
1277
1277
  * - User permissions and subscription tier
1278
1278
  * - Backend capabilities (Kuzu, Neo4j, etc.)
1279
1279
  *
1280
- * Credit consumption:
1281
- * - Listing tools is included to encourage exploration
1282
- * - Tool execution costs vary by operation complexity
1280
+ * **Note:**
1281
+ * MCP tool listing is included - no credit consumption required.
1283
1282
  */
1284
1283
  const listMcpTools = (options) => {
1285
1284
  return (options.client ?? client_gen_1.client).get({
@@ -1332,8 +1331,11 @@ exports.listMcpTools = listMcpTools;
1332
1331
  * - `408 Request Timeout`: Tool execution exceeded timeout
1333
1332
  * - Clients should implement exponential backoff on errors
1334
1333
  *
1335
- * **Note:**
1336
- * MCP tool calls are included and do not consume credits.
1334
+ * **Credit Model:**
1335
+ * MCP tool execution is included - no credit consumption required. Database
1336
+ * operations (queries, schema inspection, analytics) are completely free.
1337
+ * Only AI operations that invoke Claude or other LLM APIs consume credits,
1338
+ * which happens at the AI agent layer, not the MCP tool layer.
1337
1339
  */
1338
1340
  const callMcpTool = (options) => {
1339
1341
  return (options.client ?? client_gen_1.client).post({
@@ -1640,6 +1642,13 @@ exports.getGraphUsageStats = getGraphUsageStats;
1640
1642
  * 1. Create file upload: `POST /v1/graphs/{graph_id}/tables/{table_name}/files`
1641
1643
  * 2. Ingest to graph: `POST /v1/graphs/{graph_id}/tables/ingest`
1642
1644
  *
1645
+ * **Security Best Practice - Use Parameterized Queries:**
1646
+ * ALWAYS use query parameters instead of string interpolation to prevent injection attacks:
1647
+ * - ✅ SAFE: `MATCH (n:Entity {type: $entity_type}) RETURN n` with `parameters: {"entity_type": "Company"}`
1648
+ * - ❌ UNSAFE: `MATCH (n:Entity {type: "Company"}) RETURN n` with user input concatenated into query string
1649
+ *
1650
+ * Query parameters provide automatic escaping and type safety. All examples in this API use parameterized queries.
1651
+ *
1643
1652
  * This endpoint automatically selects the best execution strategy based on:
1644
1653
  * - Query characteristics (size, complexity)
1645
1654
  * - Client capabilities (SSE, NDJSON, JSON)
@@ -1711,13 +1720,38 @@ exports.executeCypherQuery = executeCypherQuery;
1711
1720
  * Get Runtime Graph Schema
1712
1721
  * Get runtime schema information for the specified graph database.
1713
1722
  *
1714
- * This endpoint inspects the actual graph database structure and returns:
1723
+ * ## What This Returns
1724
+ *
1725
+ * This endpoint inspects the **actual current state** of the graph database and returns:
1715
1726
  * - **Node Labels**: All node types currently in the database
1716
1727
  * - **Relationship Types**: All relationship types currently in the database
1717
- * - **Node Properties**: Properties for each node type (limited to first 10 for performance)
1728
+ * - **Node Properties**: Properties discovered from actual data (up to 10 properties per node type)
1729
+ *
1730
+ * ## Runtime vs Declared Schema
1718
1731
  *
1719
- * This shows what actually exists in the database right now - the runtime state.
1720
- * For the declared schema definition, use GET /schema/export instead.
1732
+ * **Use this endpoint** (`/schema`) when you need to know:
1733
+ * - What data is ACTUALLY in the database right now
1734
+ * - What properties exist on real nodes
1735
+ * - What relationships have been created
1736
+ * - Current database structure for querying
1737
+ *
1738
+ * **Use `/schema/export` instead** when you need:
1739
+ * - The original schema definition used to create the graph
1740
+ * - Schema in a specific format (JSON, YAML, Cypher DDL)
1741
+ * - Schema for documentation or version control
1742
+ * - Schema to replicate in another graph
1743
+ *
1744
+ * ## Example Use Cases
1745
+ *
1746
+ * - **Building queries**: See what node labels and properties exist to write accurate Cypher
1747
+ * - **Data exploration**: Discover what's in an unfamiliar graph
1748
+ * - **Schema drift detection**: Compare runtime vs declared schema
1749
+ * - **API integration**: Dynamically adapt to current graph structure
1750
+ *
1751
+ * ## Performance Note
1752
+ *
1753
+ * Property discovery is limited to 10 properties per node type for performance.
1754
+ * For complete schema definitions, use `/schema/export`.
1721
1755
  *
1722
1756
  * This operation is included - no credit consumption required.
1723
1757
  */
@@ -1739,8 +1773,54 @@ const getGraphSchema = (options) => {
1739
1773
  };
1740
1774
  exports.getGraphSchema = getGraphSchema;
1741
1775
  /**
1742
- * Export Graph Schema
1743
- * Export the schema of an existing graph in JSON, YAML, or Cypher format
1776
+ * Export Declared Graph Schema
1777
+ * Export the declared schema definition of an existing graph.
1778
+ *
1779
+ * ## What This Returns
1780
+ *
1781
+ * This endpoint returns the **original schema definition** that was used to create the graph:
1782
+ * - The schema as it was **declared** during graph creation
1783
+ * - Complete node and relationship definitions
1784
+ * - Property types and constraints
1785
+ * - Schema metadata (name, version, type)
1786
+ *
1787
+ * ## Runtime vs Declared Schema
1788
+ *
1789
+ * **Use this endpoint** (`/schema/export`) when you need:
1790
+ * - The original schema definition used to create the graph
1791
+ * - Schema in a specific format (JSON, YAML, Cypher DDL)
1792
+ * - Schema for documentation or version control
1793
+ * - Schema to replicate in another graph
1794
+ *
1795
+ * **Use `/schema` instead** when you need:
1796
+ * - What data is ACTUALLY in the database right now
1797
+ * - What properties exist on real nodes (discovered from data)
1798
+ * - Current runtime database structure for querying
1799
+ *
1800
+ * ## Export Formats
1801
+ *
1802
+ * ### JSON Format (`format=json`)
1803
+ * Returns structured JSON with nodes, relationships, and properties.
1804
+ * Best for programmatic access and API integration.
1805
+ *
1806
+ * ### YAML Format (`format=yaml`)
1807
+ * Returns human-readable YAML with comments.
1808
+ * Best for documentation and configuration management.
1809
+ *
1810
+ * ### Cypher DDL Format (`format=cypher`)
1811
+ * Returns Cypher CREATE statements for recreating the schema.
1812
+ * Best for database migration and replication.
1813
+ *
1814
+ * ## Data Statistics
1815
+ *
1816
+ * Set `include_data_stats=true` to include:
1817
+ * - Node counts by label
1818
+ * - Relationship counts by type
1819
+ * - Total nodes and relationships
1820
+ *
1821
+ * This combines declared schema with runtime statistics.
1822
+ *
1823
+ * This operation is included - no credit consumption required.
1744
1824
  */
1745
1825
  const exportGraphSchema = (options) => {
1746
1826
  return (options.client ?? client_gen_1.client).get({
@@ -2391,12 +2471,11 @@ exports.getSubgraphQuota = getSubgraphQuota;
2391
2471
  * List Staging Tables
2392
2472
  * List all DuckDB staging tables with comprehensive metrics and status.
2393
2473
  *
2394
- * **Purpose:**
2395
2474
  * Get a complete inventory of all staging tables for a graph, including
2396
2475
  * file counts, storage sizes, and row estimates. Essential for monitoring
2397
2476
  * the data pipeline and determining which tables are ready for ingestion.
2398
2477
  *
2399
- * **What You Get:**
2478
+ * **Returned Metrics:**
2400
2479
  * - Table name and type (node/relationship)
2401
2480
  * - File count per table
2402
2481
  * - Total storage size in bytes
@@ -2418,43 +2497,12 @@ exports.getSubgraphQuota = getSubgraphQuota;
2418
2497
  * 4. Check file counts and sizes
2419
2498
  * 5. Ingest when ready
2420
2499
  *
2421
- * **Example Response:**
2422
- * ```json
2423
- * {
2424
- * "tables": [
2425
- * {
2426
- * "table_name": "Entity",
2427
- * "row_count": 5000,
2428
- * "file_count": 3,
2429
- * "total_size_bytes": 2457600,
2430
- * "s3_location": "s3://bucket/user-staging/user123/graph456/Entity***.parquet"
2431
- * },
2432
- * {
2433
- * "table_name": "Transaction",
2434
- * "row_count": 15000,
2435
- * "file_count": 5,
2436
- * "total_size_bytes": 8192000,
2437
- * "s3_location": "s3://bucket/user-staging/user123/graph456/Transaction***.parquet"
2438
- * }
2439
- * ],
2440
- * "total_count": 2
2441
- * }
2442
- * ```
2443
- *
2444
- * **Example Usage:**
2445
- * ```bash
2446
- * curl -H "Authorization: Bearer YOUR_TOKEN" \
2447
- * https://api.robosystems.ai/v1/graphs/kg123/tables
2448
- * ```
2449
- *
2450
- * **Tips:**
2500
+ * **Important Notes:**
2451
2501
  * - Tables with `file_count > 0` have data ready
2452
2502
  * - Check `total_size_bytes` for storage monitoring
2453
2503
  * - Use `s3_location` to verify upload paths
2454
2504
  * - Empty tables (file_count=0) are skipped during ingestion
2455
- *
2456
- * **Note:**
2457
- * Table queries are included - no credit consumption.
2505
+ * - Table queries are included - no credit consumption
2458
2506
  */
2459
2507
  const listTables = (options) => {
2460
2508
  return (options.client ?? client_gen_1.client).get({
@@ -2477,7 +2525,6 @@ exports.listTables = listTables;
2477
2525
  * List Files in Staging Table
2478
2526
  * List all files uploaded to a staging table with comprehensive metadata.
2479
2527
  *
2480
- * **Purpose:**
2481
2528
  * Get a complete inventory of all files in a staging table, including upload status,
2482
2529
  * file sizes, row counts, and S3 locations. Essential for monitoring upload progress
2483
2530
  * and validating data before ingestion.
@@ -2490,59 +2537,26 @@ exports.listTables = listTables;
2490
2537
  * - Identify failed or incomplete uploads
2491
2538
  * - Pre-ingestion validation
2492
2539
  *
2493
- * **What You Get:**
2494
- * - File ID and name
2495
- * - File format (parquet, csv, etc.)
2496
- * - Size in bytes
2497
- * - Row count (if available)
2540
+ * **Returned Metadata:**
2541
+ * - File ID, name, and format (parquet, csv, json)
2542
+ * - Size in bytes and row count (if available)
2498
2543
  * - Upload status and method
2499
2544
  * - Creation and upload timestamps
2500
2545
  * - S3 key for reference
2501
2546
  *
2502
2547
  * **Upload Status Values:**
2503
- * - `created`: File record created, not yet uploaded
2504
- * - `uploading`: Upload in progress
2548
+ * - `pending`: Upload URL generated, awaiting upload
2505
2549
  * - `uploaded`: Successfully uploaded, ready for ingestion
2550
+ * - `disabled`: Excluded from ingestion
2551
+ * - `archived`: Soft deleted
2506
2552
  * - `failed`: Upload failed
2507
2553
  *
2508
- * **Example Response:**
2509
- * ```json
2510
- * {
2511
- * "graph_id": "kg123",
2512
- * "table_name": "Entity",
2513
- * "files": [
2514
- * {
2515
- * "file_id": "f123",
2516
- * "file_name": "entities_batch1.parquet",
2517
- * "file_format": "parquet",
2518
- * "size_bytes": 1048576,
2519
- * "row_count": 5000,
2520
- * "upload_status": "uploaded",
2521
- * "upload_method": "presigned_url",
2522
- * "created_at": "2025-10-28T10:00:00Z",
2523
- * "uploaded_at": "2025-10-28T10:01:30Z",
2524
- * "s3_key": "user-staging/user123/kg123/Entity/entities_batch1.parquet"
2525
- * }
2526
- * ],
2527
- * "total_files": 1,
2528
- * "total_size_bytes": 1048576
2529
- * }
2530
- * ```
2531
- *
2532
- * **Example Usage:**
2533
- * ```bash
2534
- * curl -H "Authorization: Bearer YOUR_TOKEN" \
2535
- * https://api.robosystems.ai/v1/graphs/kg123/tables/Entity/files
2536
- * ```
2537
- *
2538
- * **Tips:**
2554
+ * **Important Notes:**
2539
2555
  * - Only `uploaded` files are ingested
2540
2556
  * - Check `row_count` to estimate data volume
2541
2557
  * - Use `total_size_bytes` for storage monitoring
2542
2558
  * - Files with `failed` status should be deleted and re-uploaded
2543
- *
2544
- * **Note:**
2545
- * File listing is included - no credit consumption.
2559
+ * - File listing is included - no credit consumption
2546
2560
  */
2547
2561
  const listTableFiles = (options) => {
2548
2562
  return (options.client ?? client_gen_1.client).get({
@@ -2565,14 +2579,13 @@ exports.listTableFiles = listTableFiles;
2565
2579
  * Get File Upload URL
2566
2580
  * Generate a presigned S3 URL for secure file upload.
2567
2581
  *
2568
- * **Purpose:**
2569
- * Initiate file upload to a staging table by generating a secure, time-limited
2582
+ * Initiates file upload to a staging table by generating a secure, time-limited
2570
2583
  * presigned S3 URL. Files are uploaded directly to S3, bypassing the API for
2571
2584
  * optimal performance.
2572
2585
  *
2573
2586
  * **Upload Workflow:**
2574
2587
  * 1. Call this endpoint to get presigned URL
2575
- * 2. PUT file directly to S3 URL (using curl, axios, etc.)
2588
+ * 2. PUT file directly to S3 URL
2576
2589
  * 3. Call PATCH /tables/files/{file_id} with status='uploaded'
2577
2590
  * 4. Backend validates file and calculates metrics
2578
2591
  * 5. File ready for ingestion
@@ -2589,52 +2602,14 @@ exports.listTableFiles = listTableFiles;
2589
2602
  * - Auto-creates table if it doesn't exist
2590
2603
  *
2591
2604
  * **Auto-Table Creation:**
2592
- * If the table doesn't exist, it's automatically created with:
2593
- * - Type inferred from name (e.g., "Transaction" → relationship)
2594
- * - Empty schema (populated on ingestion)
2595
- * - Ready for file uploads
2596
- *
2597
- * **Example Response:**
2598
- * ```json
2599
- * {
2600
- * "upload_url": "https://bucket.s3.amazonaws.com/path?X-Amz-Algorithm=...",
2601
- * "expires_in": 3600,
2602
- * "file_id": "f123-456-789",
2603
- * "s3_key": "user-staging/user123/kg456/Entity/f123.../data.parquet"
2604
- * }
2605
- * ```
2606
- *
2607
- * **Example Usage:**
2608
- * ```bash
2609
- * # Step 1: Get upload URL
2610
- * curl -X POST "https://api.robosystems.ai/v1/graphs/kg123/tables/Entity/files" \
2611
- * -H "Authorization: Bearer YOUR_TOKEN" \
2612
- * -H "Content-Type: application/json" \
2613
- * -d '{
2614
- * "file_name": "entities.parquet",
2615
- * "content_type": "application/x-parquet"
2616
- * }'
2617
- *
2618
- * # Step 2: Upload file directly to S3
2619
- * curl -X PUT "$UPLOAD_URL" \
2620
- * -H "Content-Type: application/x-parquet" \
2621
- * --data-binary "@entities.parquet"
2622
- *
2623
- * # Step 3: Mark as uploaded
2624
- * curl -X PATCH "https://api.robosystems.ai/v1/graphs/kg123/tables/files/$FILE_ID" \
2625
- * -H "Authorization: Bearer YOUR_TOKEN" \
2626
- * -H "Content-Type: application/json" \
2627
- * -d '{"status": "uploaded"}'
2628
- * ```
2605
+ * Tables are automatically created on first file upload with type inferred from name
2606
+ * (e.g., "Transaction" → relationship) and empty schema populated during ingestion.
2629
2607
  *
2630
- * **Tips:**
2608
+ * **Important Notes:**
2631
2609
  * - Presigned URLs expire (default: 1 hour)
2632
2610
  * - Use appropriate Content-Type header when uploading to S3
2633
2611
  * - File extension must match content type
2634
- * - Large files benefit from direct S3 upload
2635
- *
2636
- * **Note:**
2637
- * Upload URL generation is included - no credit consumption.
2612
+ * - Upload URL generation is included - no credit consumption
2638
2613
  */
2639
2614
  const getUploadUrl = (options) => {
2640
2615
  return (options.client ?? client_gen_1.client).post({
@@ -2661,7 +2636,6 @@ exports.getUploadUrl = getUploadUrl;
2661
2636
  * Delete File from Staging
2662
2637
  * Delete a file from S3 storage and database tracking.
2663
2638
  *
2664
- * **Purpose:**
2665
2639
  * Remove unwanted, duplicate, or incorrect files from staging tables before ingestion.
2666
2640
  * The file is deleted from both S3 and database tracking, and table statistics
2667
2641
  * are automatically recalculated.
@@ -2685,30 +2659,12 @@ exports.getUploadUrl = getUploadUrl;
2685
2659
  * - Full audit trail of deletion operations
2686
2660
  * - Cannot delete after ingestion to graph
2687
2661
  *
2688
- * **Example Response:**
2689
- * ```json
2690
- * {
2691
- * "status": "deleted",
2692
- * "file_id": "f123",
2693
- * "file_name": "entities_batch1.parquet",
2694
- * "message": "File deleted successfully. DuckDB will automatically exclude it from queries."
2695
- * }
2696
- * ```
2697
- *
2698
- * **Example Usage:**
2699
- * ```bash
2700
- * curl -X DELETE -H "Authorization: Bearer YOUR_TOKEN" \
2701
- * https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123
2702
- * ```
2703
- *
2704
- * **Tips:**
2662
+ * **Important Notes:**
2705
2663
  * - Delete files before ingestion for best results
2706
2664
  * - Table statistics update automatically
2707
2665
  * - No need to refresh DuckDB - exclusion is automatic
2708
2666
  * - Consider re-uploading corrected version after deletion
2709
- *
2710
- * **Note:**
2711
- * File deletion is included - no credit consumption.
2667
+ * - File deletion is included - no credit consumption
2712
2668
  */
2713
2669
  const deleteFile = (options) => {
2714
2670
  return (options.client ?? client_gen_1.client).delete({
@@ -2731,7 +2687,6 @@ exports.deleteFile = deleteFile;
2731
2687
  * Get File Information
2732
2688
  * Get detailed information about a specific file.
2733
2689
  *
2734
- * **Purpose:**
2735
2690
  * Retrieve comprehensive metadata for a single file, including upload status,
2736
2691
  * size, row count, and timestamps. Useful for validating individual files
2737
2692
  * before ingestion.
@@ -2743,33 +2698,8 @@ exports.deleteFile = deleteFile;
2743
2698
  * - Verify file format and size
2744
2699
  * - Track file lifecycle
2745
2700
  *
2746
- * **Example Response:**
2747
- * ```json
2748
- * {
2749
- * "file_id": "f123",
2750
- * "graph_id": "kg123",
2751
- * "table_id": "t456",
2752
- * "table_name": "Entity",
2753
- * "file_name": "entities_batch1.parquet",
2754
- * "file_format": "parquet",
2755
- * "size_bytes": 1048576,
2756
- * "row_count": 5000,
2757
- * "upload_status": "uploaded",
2758
- * "upload_method": "presigned_url",
2759
- * "created_at": "2025-10-28T10:00:00Z",
2760
- * "uploaded_at": "2025-10-28T10:01:30Z",
2761
- * "s3_key": "user-staging/user123/kg123/Entity/entities_batch1.parquet"
2762
- * }
2763
- * ```
2764
- *
2765
- * **Example Usage:**
2766
- * ```bash
2767
- * curl -H "Authorization: Bearer YOUR_TOKEN" \
2768
- * https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123
2769
- * ```
2770
- *
2771
2701
  * **Note:**
2772
- * File info retrieval is included - no credit consumption.
2702
+ * File info retrieval is included - no credit consumption
2773
2703
  */
2774
2704
  const getFileInfo = (options) => {
2775
2705
  return (options.client ?? client_gen_1.client).get({
@@ -2792,8 +2722,7 @@ exports.getFileInfo = getFileInfo;
2792
2722
  * Update File Upload Status
2793
2723
  * Update file status after upload completes.
2794
2724
  *
2795
- * **Purpose:**
2796
- * Mark files as uploaded after successful S3 upload. The backend validates
2725
+ * Marks files as uploaded after successful S3 upload. The backend validates
2797
2726
  * the file, calculates size and row count, enforces storage limits, and
2798
2727
  * registers the DuckDB table for queries.
2799
2728
  *
@@ -2818,40 +2747,15 @@ exports.getFileInfo = getFileInfo;
2818
2747
  * - **Fallback**: Estimate from file size if reading fails
2819
2748
  *
2820
2749
  * **Storage Limits:**
2821
- * Enforced per subscription tier:
2822
- * - Prevents uploads exceeding tier limit
2823
- * - Returns HTTP 413 if limit exceeded
2824
- * - Check current usage before large uploads
2825
- *
2826
- * **Example Response:**
2827
- * ```json
2828
- * {
2829
- * "status": "success",
2830
- * "file_id": "f123",
2831
- * "upload_status": "uploaded",
2832
- * "file_size_bytes": 1048576,
2833
- * "row_count": 5000,
2834
- * "message": "File validated and ready for ingestion"
2835
- * }
2836
- * ```
2837
- *
2838
- * **Example Usage:**
2839
- * ```bash
2840
- * # After uploading file to S3 presigned URL
2841
- * curl -X PATCH "https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123" \
2842
- * -H "Authorization: Bearer YOUR_TOKEN" \
2843
- * -H "Content-Type: application/json" \
2844
- * -d '{"status": "uploaded"}'
2845
- * ```
2750
+ * Enforced per subscription tier. Returns HTTP 413 if limit exceeded.
2751
+ * Check current usage before large uploads.
2846
2752
  *
2847
- * **Tips:**
2753
+ * **Important Notes:**
2848
2754
  * - Always call this after S3 upload completes
2849
2755
  * - Check response for actual row count
2850
2756
  * - Storage limit errors (413) mean tier upgrade needed
2851
2757
  * - DuckDB registration failures are non-fatal (retried later)
2852
- *
2853
- * **Note:**
2854
- * Status updates are included - no credit consumption.
2758
+ * - Status updates are included - no credit consumption
2855
2759
  */
2856
2760
  const updateFileStatus = (options) => {
2857
2761
  return (options.client ?? client_gen_1.client).patch({
@@ -2878,7 +2782,6 @@ exports.updateFileStatus = updateFileStatus;
2878
2782
  * Ingest Tables to Graph
2879
2783
  * Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
2880
2784
  *
2881
- * **Purpose:**
2882
2785
  * Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
2883
2786
  * Processes all tables in a single bulk operation with comprehensive error handling and metrics.
2884
2787
  *
@@ -2919,55 +2822,19 @@ exports.updateFileStatus = updateFileStatus;
2919
2822
  * - Scales to thousands of files
2920
2823
  * - Optimized for large datasets
2921
2824
  *
2922
- * **Example Request:**
2923
- * ```bash
2924
- * curl -X POST "https://api.robosystems.ai/v1/graphs/kg123/tables/ingest" \
2925
- * -H "Authorization: Bearer YOUR_TOKEN" \
2926
- * -H "Content-Type: application/json" \
2927
- * -d '{
2928
- * "ignore_errors": true,
2929
- * "rebuild": false
2930
- * }'
2931
- * ```
2932
- *
2933
- * **Example Response:**
2934
- * ```json
2935
- * {
2936
- * "status": "success",
2937
- * "graph_id": "kg123",
2938
- * "total_tables": 5,
2939
- * "successful_tables": 5,
2940
- * "failed_tables": 0,
2941
- * "skipped_tables": 0,
2942
- * "total_rows_ingested": 25000,
2943
- * "total_execution_time_ms": 15420.5,
2944
- * "results": [
2945
- * {
2946
- * "table_name": "Entity",
2947
- * "status": "success",
2948
- * "rows_ingested": 5000,
2949
- * "execution_time_ms": 3200.1,
2950
- * "error": null
2951
- * }
2952
- * ]
2953
- * }
2954
- * ```
2955
- *
2956
2825
  * **Concurrency Control:**
2957
2826
  * Only one ingestion can run per graph at a time. If another ingestion is in progress,
2958
2827
  * you'll receive a 409 Conflict error. The distributed lock automatically expires after
2959
2828
  * the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
2960
2829
  *
2961
- * **Tips:**
2830
+ * **Important Notes:**
2962
2831
  * - Only files with 'uploaded' status are processed
2963
2832
  * - Tables with no uploaded files are skipped
2964
2833
  * - Use `ignore_errors=false` for strict validation
2965
2834
  * - Monitor progress via per-table results
2966
2835
  * - Check graph metadata for rebuild status
2967
2836
  * - Wait for current ingestion to complete before starting another
2968
- *
2969
- * **Note:**
2970
- * Table ingestion is included - no credit consumption.
2837
+ * - Table ingestion is included - no credit consumption
2971
2838
  */
2972
2839
  const ingestTables = (options) => {
2973
2840
  return (options.client ?? client_gen_1.client).post({
@@ -2994,10 +2861,16 @@ exports.ingestTables = ingestTables;
2994
2861
  * Query Staging Tables with SQL
2995
2862
  * Execute SQL queries on DuckDB staging tables for data inspection and validation.
2996
2863
  *
2997
- * **Purpose:**
2998
2864
  * Query raw staging data directly with SQL before ingestion into the graph database.
2999
2865
  * Useful for data quality checks, validation, and exploratory analysis.
3000
2866
  *
2867
+ * **Security Best Practice - Use Parameterized Queries:**
2868
+ * ALWAYS use query parameters instead of string concatenation to prevent SQL injection:
2869
+ * - ✅ SAFE: `SELECT * FROM Entity WHERE type = ? LIMIT ?` with `parameters: ["Company", 100]`
2870
+ * - ❌ UNSAFE: `SELECT * FROM Entity WHERE type = 'Company' LIMIT 100` with user input concatenated into SQL string
2871
+ *
2872
+ * Query parameters provide automatic escaping and type safety. Use `?` placeholders with parameters array.
2873
+ *
3001
2874
  * **Use Cases:**
3002
2875
  * - Validate data quality before graph ingestion
3003
2876
  * - Inspect row-level data for debugging
@@ -3017,27 +2890,12 @@ exports.ingestTables = ingestTables;
3017
2890
  * - Aggregations, window functions, CTEs
3018
2891
  * - Multiple table joins across staging area
3019
2892
  *
3020
- * **Example Queries:**
3021
- * ```sql
3022
- * -- Count rows in staging table
3023
- * SELECT COUNT(*) FROM Entity;
3024
- *
3025
- * -- Check for nulls
3026
- * SELECT * FROM Entity WHERE name IS NULL LIMIT 10;
3027
- *
3028
- * -- Find duplicates
3029
- * SELECT identifier, COUNT(*) as cnt
3030
- * FROM Entity
3031
- * GROUP BY identifier
3032
- * HAVING COUNT(*) > 1;
3033
- *
3034
- * -- Join across tables
3035
- * SELECT e.name, COUNT(t.id) as transaction_count
3036
- * FROM Entity e
3037
- * LEFT JOIN Transaction t ON e.identifier = t.entity_id
3038
- * GROUP BY e.name
3039
- * ORDER BY transaction_count DESC;
3040
- * ```
2893
+ * **Common Operations:**
2894
+ * - Count rows: `SELECT COUNT(*) FROM Entity`
2895
+ * - Filter by type: `SELECT * FROM Entity WHERE entity_type = ? LIMIT ?` with `parameters: ["Company", 100]`
2896
+ * - Check for nulls: `SELECT * FROM Entity WHERE name IS NULL LIMIT 10`
2897
+ * - Find duplicates: `SELECT identifier, COUNT(*) as cnt FROM Entity GROUP BY identifier HAVING COUNT(*) > 1`
2898
+ * - Filter amounts: `SELECT * FROM Transaction WHERE amount > ? AND date >= ?` with `parameters: [1000, "2024-01-01"]`
3041
2899
  *
3042
2900
  * **Limits:**
3043
2901
  * - Query timeout: 30 seconds
@@ -3050,7 +2908,7 @@ exports.ingestTables = ingestTables;
3050
2908
  * Use the graph query endpoint instead: `POST /v1/graphs/{graph_id}/query`
3051
2909
  *
3052
2910
  * **Note:**
3053
- * Staging table queries are included - no credit consumption.
2911
+ * Staging table queries are included - no credit consumption
3054
2912
  */
3055
2913
  const queryTables = (options) => {
3056
2914
  return (options.client ?? client_gen_1.client).post({
@@ -3075,7 +2933,40 @@ const queryTables = (options) => {
3075
2933
  exports.queryTables = queryTables;
3076
2934
  /**
3077
2935
  * Get User Graphs
3078
- * Get all graph databases accessible to the current user.
2936
+ * List all graph databases accessible to the current user with roles and selection status.
2937
+ *
2938
+ * Returns a comprehensive list of all graphs the user can access, including their
2939
+ * role in each graph (admin or member) and which graph is currently selected as
2940
+ * the active workspace.
2941
+ *
2942
+ * **Returned Information:**
2943
+ * - Graph ID and display name for each accessible graph
2944
+ * - User's role (admin/member) indicating permission level
2945
+ * - Selection status (one graph can be marked as "selected")
2946
+ * - Creation timestamp for each graph
2947
+ *
2948
+ * **Graph Roles:**
2949
+ * - `admin`: Full access - can manage graph settings, invite users, delete graph
2950
+ * - `member`: Read/write access - can query and modify data, cannot manage settings
2951
+ *
2952
+ * **Selected Graph Concept:**
2953
+ * The "selected" graph is the user's currently active workspace. Many API operations
2954
+ * default to the selected graph if no graph_id is provided. Users can change their
2955
+ * selected graph via the `POST /v1/graphs/{graph_id}/select` endpoint.
2956
+ *
2957
+ * **Use Cases:**
2958
+ * - Display graph selector in UI
2959
+ * - Show user's accessible workspaces
2960
+ * - Identify which graph is currently active
2961
+ * - Filter graphs by role for permission-based features
2962
+ *
2963
+ * **Empty Response:**
2964
+ * New users or users without graph access will receive an empty list with
2965
+ * `selectedGraphId: null`. Users should create a new graph or request access
2966
+ * to an existing graph.
2967
+ *
2968
+ * **Note:**
2969
+ * Graph listing is included - no credit consumption required.
3079
2970
  */
3080
2971
  const getGraphs = (options) => {
3081
2972
  return (options?.client ?? client_gen_1.client).get({
@@ -3160,7 +3051,34 @@ const createGraph = (options) => {
3160
3051
  exports.createGraph = createGraph;
3161
3052
  /**
3162
3053
  * Get Available Schema Extensions
3163
- * List all available schema extensions for graph creation
3054
+ * List all available schema extensions for graph creation.
3055
+ *
3056
+ * Schema extensions provide pre-built industry-specific data models that extend
3057
+ * the base graph schema with specialized nodes, relationships, and properties.
3058
+ *
3059
+ * **Available Extensions:**
3060
+ * - **RoboLedger**: Complete accounting system with XBRL reporting, general ledger, and financial statements
3061
+ * - **RoboInvestor**: Investment portfolio management and tracking
3062
+ * - **RoboSCM**: Supply chain management and logistics
3063
+ * - **RoboFO**: Front office operations and CRM
3064
+ * - **RoboHRM**: Human resources management
3065
+ * - **RoboEPM**: Enterprise performance management
3066
+ * - **RoboReport**: Business intelligence and reporting
3067
+ *
3068
+ * **Extension Information:**
3069
+ * Each extension includes:
3070
+ * - Display name and description
3071
+ * - Node and relationship counts
3072
+ * - Context-aware capabilities (e.g., SEC repositories get different features than entity graphs)
3073
+ *
3074
+ * **Use Cases:**
3075
+ * - Browse available extensions before creating a graph
3076
+ * - Understand extension capabilities and data models
3077
+ * - Plan graph schema based on business requirements
3078
+ * - Combine multiple extensions for comprehensive data modeling
3079
+ *
3080
+ * **Note:**
3081
+ * Extension listing is included - no credit consumption required.
3164
3082
  */
3165
3083
  const getAvailableExtensions = (options) => {
3166
3084
  return (options?.client ?? client_gen_1.client).get({
@@ -3181,7 +3099,35 @@ const getAvailableExtensions = (options) => {
3181
3099
  exports.getAvailableExtensions = getAvailableExtensions;
3182
3100
  /**
3183
3101
  * Select Graph
3184
- * Select a specific graph as the active graph for the user.
3102
+ * Select a specific graph as the active workspace for the user.
3103
+ *
3104
+ * The selected graph becomes the default context for operations in client applications
3105
+ * and can be used to maintain user workspace preferences across sessions.
3106
+ *
3107
+ * **Functionality:**
3108
+ * - Sets the specified graph as the user's currently selected graph
3109
+ * - Deselects any previously selected graph (only one can be selected at a time)
3110
+ * - Persists selection across sessions until changed
3111
+ * - Returns confirmation with the selected graph ID
3112
+ *
3113
+ * **Requirements:**
3114
+ * - User must have access to the graph (as admin or member)
3115
+ * - Graph must exist and not be deleted
3116
+ * - User can only select graphs they have permission to access
3117
+ *
3118
+ * **Use Cases:**
3119
+ * - Switch between multiple graphs in a multi-graph environment
3120
+ * - Set default workspace after creating a new graph
3121
+ * - Restore user's preferred workspace on login
3122
+ * - Support graph context switching in client applications
3123
+ *
3124
+ * **Client Integration:**
3125
+ * Many client operations can default to the selected graph, simplifying API calls
3126
+ * by eliminating the need to specify graph_id repeatedly. Check the selected
3127
+ * graph with `GET /v1/graphs` which returns `selectedGraphId`.
3128
+ *
3129
+ * **Note:**
3130
+ * Graph selection is included - no credit consumption required.
3185
3131
  */
3186
3132
  const selectGraph = (options) => {
3187
3133
  return (options.client ?? client_gen_1.client).post({