@arbidocs/sdk 0.3.5 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.cts +542 -199
  2. package/dist/index.d.ts +542 -199
  3. package/package.json +1 -1
package/dist/index.d.cts CHANGED
@@ -226,6 +226,26 @@ interface paths {
226
226
  patch?: never;
227
227
  trace?: never;
228
228
  };
229
+ '/api/user/sso-send-verification-email': {
230
+ parameters: {
231
+ query?: never;
232
+ header?: never;
233
+ path?: never;
234
+ cookie?: never;
235
+ };
236
+ get?: never;
237
+ put?: never;
238
+ /**
239
+ * Sso Send Verification Email
240
+ * @description Prompt Auth0 to send a verification email to the user's email address.
241
+ */
242
+ post: operations['sso_send_verification_email_api_user_sso_send_verification_email_post'];
243
+ delete?: never;
244
+ options?: never;
245
+ head?: never;
246
+ patch?: never;
247
+ trace?: never;
248
+ };
229
249
  '/api/user/workspaces': {
230
250
  parameters: {
231
251
  query?: never;
@@ -603,6 +623,15 @@ interface paths {
603
623
  * @description Upload multiple documents to a workspace with encryption.
604
624
  * Documents are queued for processing, parsed, and indexed for vector search.
605
625
  *
626
+ * Supports both file uploads and programmatic document creation:
627
+ * - For file uploads: standard multipart form with files
628
+ * - For artifacts/content: create a file from text content on the client side
629
+ *
630
+ * Optional parameters for version tracking and document typing:
631
+ * - parent_ext_id: Links new document as child of parent (version chain)
632
+ * - wp_type: Document type (source, skill, memory, artifact). Defaults to 'source'.
633
+ * - tag_ext_id: Links document to a tag
634
+ *
606
635
  * Requires active subscription (paid/trial/dev) if Stripe is configured.
607
636
  */
608
637
  post: operations['upload_documents'];
@@ -724,29 +753,6 @@ interface paths {
724
753
  patch?: never;
725
754
  trace?: never;
726
755
  };
727
- '/api/document/upload-email': {
728
- parameters: {
729
- query?: never;
730
- header?: never;
731
- path?: never;
732
- cookie?: never;
733
- };
734
- get?: never;
735
- put?: never;
736
- /**
737
- * Upload Email
738
- * @description Upload files via email gateway to a new timestamped workspace.
739
- *
740
- * Authentication is via SUPPORT_API_KEY (X-API-Key header), not user JWT.
741
- * Creates a new workspace named with the current timestamp and uploads files to it.
742
- */
743
- post: operations['upload_email'];
744
- delete?: never;
745
- options?: never;
746
- head?: never;
747
- patch?: never;
748
- trace?: never;
749
- };
750
756
  '/api/conversation/message/{message_ext_id}': {
751
757
  parameters: {
752
758
  query?: never;
@@ -943,10 +949,10 @@ interface paths {
943
949
  put?: never;
944
950
  /**
945
951
  * Submit human input to an active agent workflow
946
- * @description Submit the user's response to an agent's ask_user request.
952
+ * @description Send a user message to an active agent workflow.
947
953
  *
948
- * The agent pauses when it calls ask_user, emitting a user_input_request SSE event.
949
- * The frontend collects the user's choice and POSTs it here to resume the workflow.
954
+ * Works for both ask_user responses (agent is paused) and interjections
955
+ * (agent is actively working). Both use the same unified message queue.
950
956
  */
951
957
  post: operations['respond_to_agent'];
952
958
  delete?: never;
@@ -1325,6 +1331,12 @@ interface components {
1325
1331
  * "detail": [
1326
1332
  * {"tool": "get_document_passages", "doc_ext_id": "doc-abc12345", "from_ref": "5", "to_ref": "8"}
1327
1333
  * ]}
1334
+ * {"type": "agent_step", "index": 3, "detail": [
1335
+ * {"tool": "web_search", "query": "latest arbitration developments 2026"}
1336
+ * ]}
1337
+ * {"type": "agent_step", "index": 3, "detail": [
1338
+ * {"tool": "delegate_to_agent", "task": "Execute Python code to compute the SHA256 hash..."}
1339
+ * ]}
1328
1340
  * {"type": "agent_step", "index": 4, "step": "evaluation", "detail": [
1329
1341
  * {"statement": "Newton prosecuted 28 coiners", "chunk_ids": ["chunk-abc"]},
1330
1342
  * {"statement": "Counterfeiting was high treason", "chunk_ids": ["chunk-def"]}
@@ -1336,6 +1348,11 @@ interface components {
1336
1348
  * {"type": "agent_step", "index": 7, "step": "reviewing"}
1337
1349
  */
1338
1350
  AgentStepEvent: {
1351
+ /**
1352
+ * T
1353
+ * @description Seconds elapsed since stream start.
1354
+ */
1355
+ t?: number | null;
1339
1356
  /**
1340
1357
  * Type
1341
1358
  * @default agent_step
@@ -1387,12 +1404,8 @@ interface components {
1387
1404
  * @default true
1388
1405
  */
1389
1406
  HUMAN_IN_THE_LOOP: boolean;
1390
- /**
1391
- * Web Search
1392
- * @description Enable web tools: web_search (find URLs) and read_url (fetch and process webpages).
1393
- * @default false
1394
- */
1395
- WEB_SEARCH: boolean;
1407
+ /** @description Web search tool configuration. */
1408
+ WEB_SEARCH?: components['schemas']['WebSearchConfig'];
1396
1409
  /**
1397
1410
  * Mcp Tools
1398
1411
  * @description MCP tool names to enable from LiteLLM (e.g. ['read_wiki_contents', 'ask_question']).
@@ -1401,22 +1414,34 @@ interface components {
1401
1414
  MCP_TOOLS: string[];
1402
1415
  /**
1403
1416
  * Planning Enabled
1404
- * @description Run a planning step before the first agent decision. The agent generates a text plan that guides subsequent tool calls.
1417
+ * @description Include the create_plan tool so the agent can generate a research plan when it deems a task complex enough. Plan approval is configured on PlanningLLM.
1405
1418
  * @default false
1406
1419
  */
1407
1420
  PLANNING_ENABLED: boolean;
1408
- /**
1409
- * Planning Approval
1410
- * @description When planning is enabled, pause after generating the plan to allow the user to approve, skip, or comment before proceeding. Requires PLANNING_ENABLED=True to have any effect.
1411
- * @default false
1412
- */
1413
- PLANNING_APPROVAL: boolean;
1414
1421
  /**
1415
1422
  * Suggested Queries
1416
1423
  * @description Generate a single suggested follow-up query after each response.
1417
1424
  * @default false
1418
1425
  */
1419
1426
  SUGGESTED_QUERIES: boolean;
1427
+ /**
1428
+ * Artifacts Enabled
1429
+ * @description Enable the create_artifact tool for generating standalone documents (summaries, reports, drafts) displayed in a side panel.
1430
+ * @default false
1431
+ */
1432
+ ARTIFACTS_ENABLED: boolean;
1433
+ /**
1434
+ * Vision Enabled
1435
+ * @description Enable the view_document_pages tool for visual inspection of document pages (figures, charts, scanned pages) using a vision-capable model.
1436
+ * @default false
1437
+ */
1438
+ VISION_ENABLED: boolean;
1439
+ /**
1440
+ * Review Enabled
1441
+ * @description Enable ReviewLLM to review agent draft answers against source material before sending to the user. Uses the reasoning model for higher quality.
1442
+ * @default false
1443
+ */
1444
+ REVIEW_ENABLED: boolean;
1420
1445
  /**
1421
1446
  * Persona
1422
1447
  * @description Agent persona prepended to the system prompt. Customise to change the agent's identity and tone.
@@ -1456,6 +1481,12 @@ interface components {
1456
1481
  * @default 10
1457
1482
  */
1458
1483
  AGENT_MAX_ITERATIONS: number;
1484
+ /**
1485
+ * Max Passage Pages
1486
+ * @description Maximum number of pages allowed per get_document_passages call. Calls requesting a wider range are rejected, forcing the agent to use get_table_of_contents and target specific sections.
1487
+ * @default 10
1488
+ */
1489
+ MAX_PASSAGE_PAGES: number;
1459
1490
  /**
1460
1491
  * Agent History Char Threshold
1461
1492
  * @description Char threshold for conversation history injected into the agent context. Below: verbatim user/assistant pairs. Above: summarized by SummariseLLM.
@@ -1465,40 +1496,37 @@ interface components {
1465
1496
  /**
1466
1497
  * Agent System Prompt
1467
1498
  * @description System prompt for the agent. Dynamic context is appended automatically.
1468
- * @default YOUR TASK:
1469
- * Determine which, if any, of the available documents contain information relevant to the user's query. Use your tools to retrieve and examine document content.
1499
+ * @default YOU ARE:
1500
+ * ARBI an AI assistant made by Arbitration City Ltd (https://arbicity.com), capable of helping with a wide variety of tasks using workspace documents, web search, and general knowledge.
1501
+ * Always respond in first person. Be helpful and natural, not overly formal.
1502
+ *
1503
+ * YOUR TASK:
1504
+ * Answer the user's query. Use workspace documents when relevant. If appropriate, you may use available tools to perform external research.
1470
1505
  *
1471
- * TOOLS AVAILABLE:
1472
- * - search_documents: Search for relevant passages across documents
1473
- * - get_document_passages: Read specific pages from a document
1474
- * - get_table_of_contents: Get headings and page references for documents
1475
- * - web_search: Search the web for current/external information
1476
- * - read_urls: Fetch and index webpages. Always call immediately after web_search
1477
- * with the 2-3 most relevant URLs. Then use search_documents to query them.
1478
- * - ask_user: Ask the user a clarifying question with structured options.
1479
- * You MUST call this tool to ask the user anything — do NOT write questions
1480
- * or options as plain text. Only the tool delivers the question to the user.
1481
- * IMPORTANT: Once the user responds, do NOT ask the same question again.
1482
- * Use their answer and proceed with research. If the user's answer is
1483
- * unexpected (e.g. refers to something not in the documents), explain what
1484
- * you found and answer based on available information.
1506
+ * ANSWERING GUIDELINES:
1507
+ * - If the answer comes from workspace documents, cite them normally.
1508
+ * - If the answer comes from external research, say so.
1509
+ * - If the answer is based on general knowledge (no tool results), state that
1510
+ * it is based on general knowledge and may not be fully accurate.
1511
+ * - You are allowed to answer questions that are not about the documents.
1485
1512
  *
1486
1513
  * EFFICIENCY GUIDELINES:
1487
- * 1. PARALLEL CALLS: When a query has multiple aspects, call multiple tools in parallel (e.g., search different keywords simultaneously)
1488
- * 2. AVOID DUPLICATION: Before searching, review learnings from previous results. Do not repeat searches for information you already have
1514
+ * 1. PARALLEL CALLS: When a query has multiple aspects, call multiple tools in parallel (e.g., search different keywords simultaneously)
1515
+ * 2. AVOID DUPLICATION: Before searching, review learnings from previous results. Do not repeat searches for information you already have
1489
1516
  * 3. TARGETED SEARCHES: Use specific, focused queries rather than broad ones
1490
- * 4. BROAD PASSAGE RANGES: When using get_document_passages, request broad ranges (5-10+ pages per call). A few large fetches are far better than many small ones
1491
- * 5. STOP WHEN READY: Provide your answer when you have sufficient evidence. Signs you have enough:
1517
+ * 4. NARROW RETRIEVALS: When retrieving document passages, use sub-page references (e.g. 5.3-8.1) where available from search results or table of contents. Request narrow ranges per call and fetch additional ranges in subsequent iterations if needed avoid fetching entire documents at once
1518
+ * 5. STOP WHEN READY: Provide your answer when you have sufficient evidence. Signs you have enough:
1492
1519
  * - Multiple sources confirm the same facts
1493
1520
  * - Key questions from the query are addressed
1494
1521
  * - Additional searches return redundant information
1495
1522
  *
1496
1523
  * WORKFLOW:
1497
- * 1. Analyze the query to identify distinct information needs
1498
- * 2. Execute parallel searches for different aspects when appropriate
1499
- * 3. Evaluate results - extract learnings, note gaps
1500
- * 4. Only search again if specific gaps remain unfilled
1501
- * 5. Answer when confident (no tool call) - do not over-research
1524
+ * 1. For complex multi-document queries, consider calling create_plan first to structure your approach
1525
+ * 2. Choose the right tool for each need documents for uploaded content, web for external/current info
1526
+ * 3. Execute parallel searches for different aspects when appropriate
1527
+ * 4. Evaluate results - extract learnings, note gaps
1528
+ * 5. Only search again if specific gaps remain unfilled
1529
+ * 6. Answer when confident (no tool call) - do not over-research
1502
1530
  *
1503
1531
  * IMPORTANT:
1504
1532
  * - The document index contains metadata only - you must retrieve actual content
@@ -1512,19 +1540,25 @@ interface components {
1512
1540
  * @default {
1513
1541
  * "ENABLED": true,
1514
1542
  * "HUMAN_IN_THE_LOOP": true,
1515
- * "WEB_SEARCH": false,
1543
+ * "WEB_SEARCH": {
1544
+ * "ENABLED": true,
1545
+ * "SAVE_SOURCES": true
1546
+ * },
1516
1547
  * "MCP_TOOLS": [],
1517
1548
  * "PLANNING_ENABLED": false,
1518
- * "PLANNING_APPROVAL": false,
1519
1549
  * "SUGGESTED_QUERIES": false,
1550
+ * "ARTIFACTS_ENABLED": false,
1551
+ * "VISION_ENABLED": false,
1552
+ * "REVIEW_ENABLED": false,
1520
1553
  * "PERSONA": "You are ARBI, an AI assistant created by ARBI CITY.\n\nYou maintain formal, objective tone appropriate for professional/legal contexts. If any part of the answer is based on general knowledge instead of the supplied sources, you must state so clearly and recognise that it may not be accurate.",
1521
1554
  * "AGENT_MODEL_NAME": "Q3VL@ARBICITY",
1522
1555
  * "AGENT_API_TYPE": "remote",
1523
1556
  * "LLM_AGENT_TEMPERATURE": 0.7,
1524
1557
  * "AGENT_MAX_TOKENS": 20000,
1525
1558
  * "AGENT_MAX_ITERATIONS": 10,
1559
+ * "MAX_PASSAGE_PAGES": 10,
1526
1560
  * "AGENT_HISTORY_CHAR_THRESHOLD": 8000,
1527
- * "AGENT_SYSTEM_PROMPT": "YOUR TASK:\nDetermine which, if any, of the available documents contain information relevant to the user's query. Use your tools to retrieve and examine document content.\n\nTOOLS AVAILABLE:\n- search_documents: Search for relevant passages across documents\n- get_document_passages: Read specific pages from a document\n- get_table_of_contents: Get headings and page references for documents\n- web_search: Search the web for current/external information\n- read_urls: Fetch and index webpages. Always call immediately after web_search\n with the 2-3 most relevant URLs. Then use search_documents to query them.\n- ask_user: Ask the user a clarifying question with structured options.\n You MUST call this tool to ask the user anything do NOT write questions\n or options as plain text. Only the tool delivers the question to the user.\n IMPORTANT: Once the user responds, do NOT ask the same question again.\n Use their answer and proceed with research. If the user's answer is\n unexpected (e.g. refers to something not in the documents), explain what\n you found and answer based on available information.\n\nEFFICIENCY GUIDELINES:\n1. PARALLEL CALLS: When a query has multiple aspects, call multiple tools in parallel (e.g., search different keywords simultaneously)\n2. AVOID DUPLICATION: Before searching, review learnings from previous results. Do not repeat searches for information you already have\n3. TARGETED SEARCHES: Use specific, focused queries rather than broad ones\n4. BROAD PASSAGE RANGES: When using get_document_passages, request broad ranges (5-10+ pages per call). A few large fetches are far better than many small ones\n5. STOP WHEN READY: Provide your answer when you have sufficient evidence. Signs you have enough:\n - Multiple sources confirm the same facts\n - Key questions from the query are addressed\n - Additional searches return redundant information\n\nWORKFLOW:\n1. Analyze the query to identify distinct information needs\n2. Execute parallel searches for different aspects when appropriate\n3. Evaluate results - extract learnings, note gaps\n4. Only search again if specific gaps remain unfilled\n5. Answer when confident (no tool call) - do not over-research\n\nIMPORTANT:\n- The document index contains metadata only - you must retrieve actual content\n- Do not add inline citation markers - the system handles citations automatically"
1561
+ * "AGENT_SYSTEM_PROMPT": "YOU ARE:\nARBI an AI assistant made by Arbitration City Ltd (https://arbicity.com), capable of helping with a wide variety of tasks using workspace documents, web search, and general knowledge.\nAlways respond in first person. Be helpful and natural, not overly formal.\n\nYOUR TASK:\nAnswer the user's query. Use workspace documents when relevant. If appropriate, you may use available tools to perform external research.\n\nANSWERING GUIDELINES:\n- If the answer comes from workspace documents, cite them normally.\n- If the answer comes from external research, say so.\n- If the answer is based on general knowledge (no tool results), state that\n it is based on general knowledge and may not be fully accurate.\n- You are allowed to answer questions that are not about the documents.\n\nEFFICIENCY GUIDELINES:\n1. PARALLEL CALLS: When a query has multiple aspects, call multiple tools in parallel (e.g., search different keywords simultaneously)\n2. AVOID DUPLICATION: Before searching, review learnings from previous results. Do not repeat searches for information you already have\n3. TARGETED SEARCHES: Use specific, focused queries rather than broad ones\n4. NARROW RETRIEVALS: When retrieving document passages, use sub-page references (e.g. 5.3-8.1) where available from search results or table of contents. Request narrow ranges per call and fetch additional ranges in subsequent iterations if needed avoid fetching entire documents at once\n5. STOP WHEN READY: Provide your answer when you have sufficient evidence. Signs you have enough:\n - Multiple sources confirm the same facts\n - Key questions from the query are addressed\n - Additional searches return redundant information\n\nWORKFLOW:\n1. For complex multi-document queries, consider calling create_plan first to structure your approach\n2. Choose the right tool for each need — documents for uploaded content, web for external/current info\n3. Execute parallel searches for different aspects when appropriate\n4. Evaluate results - extract learnings, note gaps\n5. Only search again if specific gaps remain unfilled\n6. Answer when confident (no tool call) - do not over-research\n\nIMPORTANT:\n- The document index contains metadata only - you must retrieve actual content\n- Do not add inline citation markers - the system handles citations automatically"
1528
1562
  * }
1529
1563
  */
1530
1564
  Agents: components['schemas']['AgentsConfig'];
@@ -1532,15 +1566,24 @@ interface components {
1532
1566
  * @default {
1533
1567
  * "API_TYPE": "remote",
1534
1568
  * "MODEL_NAME": "Q3VL@ARBICITY",
1535
- * "SYSTEM_INSTRUCTION": "You are ARBI, an AI assistant created by ARBI CITY.\n\nYou maintain formal, objective tone appropriate for professional/legal contexts. If any part of the answer is based on general knowledge instead of the supplied sources, you must state so clearly and recognise that it may not be accurate.You can ask a follow-up question instead of answering immediately if you are confused by the question and unable to intuit the user's intent. \n\nStructure responses with clear headings and logical organization:\n- Use hierarchical organization: main conclusion first, followed by supporting points\n- Bullet points for lists of discrete items\n- Numbering for sequential steps or prioritized items\n- Bold text for critical conclusions or decision points\n\nYou should recognise that not all assertions in the sources are factual. Some are mere assertions/arguments by the author, particularly in a legal context where different parties may express different opinions or even contradict each other's factual narrative. The best way to handle this is usually to attribute where the information comes from. I.e. according to xx document, or xx party.\n\nYou should avoid generalisations and provide exact quotations where possible. Never make up citations, facts or other information. Acknowledge uncertainty where source materials are insufficient or ambiguous.",
1536
- * "AGENT_REVIEW_INSTRUCTION": "You are reviewing a draft answer prepared by a research agent. The draft was written based on summaries, but you now have access to the full source material.\n\nYour task:\n1. Review the draft answer against the source material provided\n2. Verify claims are supported by the sources\n3. Correct any inaccuracies or unsupported statements\n4. Add any important details from the sources that were missed\n5. Maintain formal, objective tone appropriate for professional contexts\n\nIf information is not found in the sources, clearly state this. Do not add inline citation markers - the system handles citations automatically.",
1537
- * "DISABLED": true,
1569
+ * "SYSTEM_INSTRUCTION": "You are ARBI, an AI assistant created by ARBI CITY.\n\nYou maintain formal, objective tone appropriate for professional/legal contexts. If any part of the answer is based on general knowledge instead of the supplied sources, you must state so clearly and recognise that it may not be accurate.You can ask a follow-up question instead of answering immediately if you are confused by the question and unable to intuit the user's intent.\n\nStructure responses with clear headings and logical organization:\n- Use hierarchical organization: main conclusion first, followed by supporting points\n- Bullet points for lists of discrete items\n- Numbering for sequential steps or prioritized items\n- Bold text for critical conclusions or decision points\n\nYou should recognise that not all assertions in the sources are factual. Some are mere assertions/arguments by the author, particularly in a legal context where different parties may express different opinions or even contradict each other's factual narrative. The best way to handle this is usually to attribute where the information comes from. I.e. according to xx document, or xx party.\n\nYou should avoid generalisations and provide exact quotations where possible. Never make up citations, facts or other information. Acknowledge uncertainty where source materials are insufficient or ambiguous.",
1538
1570
  * "MAX_CHAR_SIZE_TO_ANSWER": 200000,
1539
1571
  * "TEMPERATURE": 0.1,
1540
1572
  * "MAX_TOKENS": 5000
1541
1573
  * }
1542
1574
  */
1543
1575
  QueryLLM: components['schemas']['QueryLLMConfig'];
1576
+ /**
1577
+ * @default {
1578
+ * "API_TYPE": "remote",
1579
+ * "MODEL_NAME": "GPTOSS120@ARBICITY",
1580
+ * "SYSTEM_INSTRUCTION": "You are reviewing a draft answer prepared by a research agent. The draft was written based on summaries, but you now have access to the full source material.\n\nYour task:\n1. Review the draft answer against the source material provided\n2. Verify claims are supported by the sources\n3. Correct any inaccuracies or unsupported statements\n4. Add any important details from the sources that were missed\n5. Maintain formal, objective tone appropriate for professional contexts\n\nIf information is not found in the sources but the draft answer is based on web search results or general knowledge, preserve it and ensure it is clearly labelled as such. Only flag missing source support for claims that purport to come from the documents.\nDo not add inline citation markers - the system handles citations automatically.",
1581
+ * "TEMPERATURE": 0.1,
1582
+ * "MAX_TOKENS": 5000,
1583
+ * "MAX_CHAR_SIZE_TO_ANSWER": 200000
1584
+ * }
1585
+ */
1586
+ ReviewLLM: components['schemas']['ReviewLLMConfig'];
1544
1587
  /**
1545
1588
  * @default {
1546
1589
  * "API_TYPE": "remote",
@@ -1566,7 +1609,7 @@ interface components {
1566
1609
  /**
1567
1610
  * @default {
1568
1611
  * "API_TYPE": "remote",
1569
- * "MODEL_NAME": "Q3VL@ARBICITY",
1612
+ * "MODEL_NAME": "GPTOSS120@ARBICITY",
1570
1613
  * "SYSTEM_INSTRUCTION": "You are a conversation summariser. Condense the conversation history into a concise summary that preserves:\n- Key decisions and conclusions reached\n- Specific names, dates, numbers, and document references\n- Open questions and unresolved items\n- The user's goals and constraints\n\nWrite in past tense, third person. Be specific, not vague. Do not omit important details in favour of brevity.",
1571
1614
  * "TEMPERATURE": 0.1,
1572
1615
  * "MAX_TOKENS": 2000,
@@ -1637,16 +1680,39 @@ interface components {
1637
1680
  /**
1638
1681
  * @default {
1639
1682
  * "API_TYPE": "remote",
1640
- * "MODEL_NAME": "Q3VL@ARBICITY",
1683
+ * "MODEL_NAME": "GPTOSS120@ARBICITY",
1641
1684
  * "ENABLED": true,
1642
- * "SYSTEM_INSTRUCTION": "You are a knowledge synthesizer. You receive the full agent scratchpad from a completed conversation and decide what, if anything, is worth saving.\n\n## Artifact Types\n- **\"memory\"**: Facts, findings, reference data — for *looking up* information.\n- **\"skill\"**: Procedures, workflows, step-by-step instructions — for *doing* something.\n\nWhen ambiguous, default to memory.\n\n## Rules\n- Return an empty artifacts array if the conversation is trivial or produced nothing substantive.\n- Each artifact must cover ONE coherent topic. Never combine disparate subjects into a single artifact — create separate artifacts for each distinct topic.\n- Ground everything in the provided context. Do not invent information.\n- Be concise. These are reference documents, not essays.\n\n## Output Format\nJSON with an `artifacts` array. Each item has `wp_type`, `title`, and `content` (markdown).\n\nMemory content: `# Title`, date, source, key findings as bullets, source documents.\nSkill content: `# Title`, prerequisites, numbered steps, when to apply.",
1685
+ * "SYSTEM_INSTRUCTION": "You are a knowledge synthesizer. You receive the full agent scratchpad from a completed conversation and decide what, if anything, is worth saving.\n\n## Work Product Types\n- **\"memory\"**: Facts, findings, reference data — for *looking up* information.\n- **\"skill\"**: Procedures, workflows, step-by-step instructions — for *doing* something.\n\nWhen ambiguous, default to memory.\n\n## Rules\n- Return an empty work_products array if the conversation is trivial or produced nothing substantive.\n- Each work product must cover ONE coherent topic. Never combine disparate subjects into a single document — create separate documents for each distinct topic.\n- Ground everything in the provided context. Do not invent information.\n- Be concise. These are reference documents, not essays.\n\n## Output Format\nJSON with a `work_products` array. Each item has `wp_type`, `title`, and `content` (markdown).\n\nMemory content: `# Title`, date, source, key findings as bullets, source documents.\nSkill content: `# Title`, prerequisites, numbered steps, when to apply.",
1643
1686
  * "TEMPERATURE": 0.3,
1644
1687
  * "MAX_TOKENS": 8000,
1645
1688
  * "MAX_CHAR_CONTEXT": 100000,
1646
1689
  * "MAX_CONCURRENT": 5
1647
1690
  * }
1648
1691
  */
1649
- ArtifactLLM: components['schemas']['ArtifactLLMConfig'];
1692
+ MemoryLLM: components['schemas']['MemoryLLMConfig'];
1693
+ /**
1694
+ * @default {
1695
+ * "API_TYPE": "remote",
1696
+ * "MODEL_NAME": "GPTOSS120@ARBICITY",
1697
+ * "SYSTEM_INSTRUCTION": "You are a research planning assistant. Your job is to analyze a user query and produce a concise, numbered research plan.\n\nYou have access to a workspace of documents. The available tools for executing the plan are:\n- search_documents: Search workspace documents for relevant passages (semantic, keyword, or hybrid)\n- get_document_passages: Read a specific page range from a document\n- get_table_of_contents: Get document headings with page references\n\nConsider:\n- What information is needed to answer the query\n- Which documents are likely relevant based on the document index\n- What search queries and document passages to examine\n- What order of operations will be most efficient\n- Whether parallel searches can be used for different aspects\n\nRespond with ONLY the plan (numbered steps). Do not execute any steps.",
1698
+ * "TEMPERATURE": 0.3,
1699
+ * "MAX_TOKENS": 4000,
1700
+ * "MAX_CHAR_SIZE_TO_ANSWER": 200000,
1701
+ * "APPROVAL_TIMEOUT": 0
1702
+ * }
1703
+ */
1704
+ PlanningLLM: components['schemas']['PlanningLLMConfig'];
1705
+ /**
1706
+ * @default {
1707
+ * "API_TYPE": "remote",
1708
+ * "MODEL_NAME": "Q3VL@ARBICITY",
1709
+ * "TEMPERATURE": 0.1,
1710
+ * "MAX_TOKENS": 4000,
1711
+ * "MAX_PAGES_PER_CALL": 5,
1712
+ * "IMAGE_MAX_DIMENSION": 1280
1713
+ * }
1714
+ */
1715
+ VisionLLM: components['schemas']['VisionLLMConfig'];
1650
1716
  /**
1651
1717
  * @default {
1652
1718
  * "SIM_THREASHOLD": 0.1,
@@ -1709,84 +1775,75 @@ interface components {
1709
1775
  KeywordEmbedder: components['schemas']['KeywordEmbedderConfig'];
1710
1776
  };
1711
1777
  /**
1712
- * ArtifactLLMConfig
1713
- * @description Configuration for ArtifactLLM - synthesizes document artifacts (skills, memories, reports).
1778
+ * ArtifactEvent
1779
+ * @description A generated artifact (document/draft) for the artifact panel.
1714
1780
  *
1715
- * Produces markdown documents from gathered context (learnings, retrieval results,
1716
- * conversation history). Output is saved as a .txt document through the standard
1717
- * upload pipeline and linked to an assistant tag via DocTags.
1781
+ * Artifacts ARE documents (wp_type='artifact') - the doc_ext_id is the single
1782
+ * identifier used throughout. They support versioning for redline/track-changes
1783
+ * functionality (useful for legal document review).
1718
1784
  *
1719
- * Note: The assistant tag relationship is stored on Configurations.tag_id (FK),
1720
- * not in this config. This config just controls synthesis behavior.
1785
+ * The frontend should:
1786
+ * - Open the artifact panel when receiving an artifact event
1787
+ * - Stream content updates while is_complete=False
1788
+ * - Enable redline view when multiple versions exist
1789
+ *
1790
+ * Examples:
1791
+ * {"type": "artifact", "doc_ext_id": "doc-abc12345", "title": "Contract Summary",
1792
+ * "mode": "markdown", "content": "# Summary\n...", "is_complete": false}
1793
+ * {"type": "artifact", "doc_ext_id": "doc-abc12345", "title": "Contract Summary",
1794
+ * "mode": "markdown", "content": "# Summary\n...", "is_complete": true, "version": 1}
1721
1795
  */
1722
- ArtifactLLMConfig: {
1796
+ ArtifactEvent: {
1723
1797
  /**
1724
- * Api Type
1725
- * @description The inference type (local or remote).
1726
- * @default remote
1727
- * @enum {string}
1798
+ * T
1799
+ * @description Seconds elapsed since stream start.
1728
1800
  */
1729
- API_TYPE: 'local' | 'remote';
1801
+ t?: number | null;
1730
1802
  /**
1731
- * Model Name
1732
- * @description The name of the non-reasoning model to be used.
1733
- * @default Q3VL@ARBICITY
1803
+ * Type
1804
+ * @default artifact
1805
+ * @constant
1734
1806
  */
1735
- MODEL_NAME: string;
1807
+ type: 'artifact';
1736
1808
  /**
1737
- * Enabled
1738
- * @description Whether to run artifact synthesis (skills, memories) after agent queries.
1739
- * @default true
1809
+ * Doc Ext Id
1810
+ * @description Document external ID (artifacts are documents)
1740
1811
  */
1741
- ENABLED: boolean;
1812
+ doc_ext_id: string;
1742
1813
  /**
1743
- * System Instruction
1744
- * @description System instruction for artifact classification and synthesis.
1745
- * @default You are a knowledge synthesizer. You receive the full agent scratchpad from a completed conversation and decide what, if anything, is worth saving.
1746
- *
1747
- * ## Artifact Types
1748
- * - **"memory"**: Facts, findings, reference data — for *looking up* information.
1749
- * - **"skill"**: Procedures, workflows, step-by-step instructions — for *doing* something.
1750
- *
1751
- * When ambiguous, default to memory.
1752
- *
1753
- * ## Rules
1754
- * - Return an empty artifacts array if the conversation is trivial or produced nothing substantive.
1755
- * - Each artifact must cover ONE coherent topic. Never combine disparate subjects into a single artifact — create separate artifacts for each distinct topic.
1756
- * - Ground everything in the provided context. Do not invent information.
1757
- * - Be concise. These are reference documents, not essays.
1758
- *
1759
- * ## Output Format
1760
- * JSON with an `artifacts` array. Each item has `wp_type`, `title`, and `content` (markdown).
1761
- *
1762
- * Memory content: `# Title`, date, source, key findings as bullets, source documents.
1763
- * Skill content: `# Title`, prerequisites, numbered steps, when to apply.
1814
+ * Title
1815
+ * @description Display title for the artifact
1764
1816
  */
1765
- SYSTEM_INSTRUCTION: string;
1817
+ title: string;
1766
1818
  /**
1767
- * Temperature
1768
- * @description Temperature for synthesis.
1769
- * @default 0.3
1819
+ * Mode
1820
+ * @description Rendering mode: markdown (default, supports redlines), html, or plain text
1821
+ * @default markdown
1822
+ * @enum {string}
1770
1823
  */
1771
- TEMPERATURE: number;
1824
+ mode: 'markdown' | 'html' | 'text';
1772
1825
  /**
1773
- * Max Tokens
1774
- * @description Maximum tokens for artifact content.
1775
- * @default 8000
1826
+ * Content
1827
+ * @description The artifact content (markdown, html, or plain text)
1776
1828
  */
1777
- MAX_TOKENS: number;
1829
+ content: string;
1778
1830
  /**
1779
- * Max Char Context
1780
- * @description Maximum characters of input context.
1781
- * @default 100000
1831
+ * Is Complete
1832
+ * @description Whether the artifact is fully generated (False during streaming)
1833
+ * @default false
1782
1834
  */
1783
- MAX_CHAR_CONTEXT: number;
1835
+ is_complete: boolean;
1784
1836
  /**
1785
- * Max Concurrent
1786
- * @description Maximum concurrent synthesis operations.
1787
- * @default 5
1837
+ * Version
1838
+ * @description Version number for tracking changes/redlines
1839
+ * @default 1
1788
1840
  */
1789
- MAX_CONCURRENT: number;
1841
+ version: number;
1842
+ /**
1843
+ * Message Ext Id
1844
+ * @description External ID of the assistant message that generated this artifact
1845
+ */
1846
+ message_ext_id?: string | null;
1790
1847
  };
1791
1848
  /**
1792
1849
  * AuthMessage
@@ -1900,6 +1957,12 @@ interface components {
1900
1957
  * @default false
1901
1958
  */
1902
1959
  heading: boolean;
1960
+ /** Bbox */
1961
+ bbox?: number[] | null;
1962
+ /** Element Type */
1963
+ element_type?: string | null;
1964
+ /** Heading Level */
1965
+ heading_level?: number | null;
1903
1966
  };
1904
1967
  /** ChunkerConfig */
1905
1968
  ChunkerConfig: {
@@ -1981,12 +2044,15 @@ interface components {
1981
2044
  */
1982
2045
  ConfigUpdateData: {
1983
2046
  QueryLLM?: components['schemas']['QueryLLMConfig'] | null;
2047
+ ReviewLLM?: components['schemas']['ReviewLLMConfig'] | null;
1984
2048
  Agents?: components['schemas']['AgentsConfig'] | null;
1985
2049
  EvaluatorLLM?: components['schemas']['EvaluatorLLMConfig'] | null;
1986
2050
  TitleLLM?: components['schemas']['TitleLLMConfig'] | null;
1987
2051
  SummariseLLM?: components['schemas']['SummariseLLMConfig'] | null;
1988
2052
  DoctagLLM?: components['schemas']['DoctagLLMConfig'] | null;
1989
- ArtifactLLM?: components['schemas']['ArtifactLLMConfig'] | null;
2053
+ MemoryLLM?: components['schemas']['MemoryLLMConfig'] | null;
2054
+ PlanningLLM?: components['schemas']['PlanningLLMConfig'] | null;
2055
+ VisionLLM?: components['schemas']['VisionLLMConfig'] | null;
1990
2056
  ModelCitation?: components['schemas']['ModelCitationConfig'] | null;
1991
2057
  Parser?: components['schemas']['ParserConfig'] | null;
1992
2058
  Chunker?: components['schemas']['ChunkerConfig'] | null;
@@ -2304,6 +2370,8 @@ interface components {
2304
2370
  re_ocred?: boolean | null;
2305
2371
  /** Config Ext Id */
2306
2372
  config_ext_id?: string | null;
2373
+ /** Parent Ext Id */
2374
+ parent_ext_id?: string | null;
2307
2375
  /** Created By Ext Id */
2308
2376
  created_by_ext_id: string;
2309
2377
  /** Updated By Ext Id */
@@ -2524,6 +2592,11 @@ interface components {
2524
2592
  * @description An error occurred during streaming. Terminal event — close EventSource.
2525
2593
  */
2526
2594
  ErrorEvent: {
2595
+ /**
2596
+ * T
2597
+ * @description Seconds elapsed since stream start.
2598
+ */
2599
+ t?: number | null;
2527
2600
  /**
2528
2601
  * Type
2529
2602
  * @default error
@@ -2717,6 +2790,86 @@ interface components {
2717
2790
  /** Tools */
2718
2791
  tools: components['schemas']['McpToolInfo'][];
2719
2792
  };
2793
+ /**
2794
+ * MemoryLLMConfig
2795
+ * @description Configuration for MemoryLLM - synthesizes memory documents (skills, memories, reports).
2796
+ *
2797
+ * Produces markdown documents from gathered context (learnings, retrieval results,
2798
+ * conversation history). Output is saved as a .txt document through the standard
2799
+ * upload pipeline and linked to an assistant tag via DocTags.
2800
+ *
2801
+ * Note: The assistant tag relationship is stored on Configurations.tag_id (FK),
2802
+ * not in this config. This config just controls synthesis behavior.
2803
+ */
2804
+ MemoryLLMConfig: {
2805
+ /**
2806
+ * Api Type
2807
+ * @description The inference type (local or remote).
2808
+ * @default remote
2809
+ * @enum {string}
2810
+ */
2811
+ API_TYPE: 'local' | 'remote';
2812
+ /**
2813
+ * Model Name
2814
+ * @description The model for memory synthesis. Defaults to reasoning model.
2815
+ * @default GPTOSS120@ARBICITY
2816
+ */
2817
+ MODEL_NAME: string;
2818
+ /**
2819
+ * Enabled
2820
+ * @description Whether to run memory synthesis (skills, memories) after agent queries.
2821
+ * @default true
2822
+ */
2823
+ ENABLED: boolean;
2824
+ /**
2825
+ * System Instruction
2826
+ * @description System instruction for memory classification and synthesis.
2827
+ * @default You are a knowledge synthesizer. You receive the full agent scratchpad from a completed conversation and decide what, if anything, is worth saving.
2828
+ *
2829
+ * ## Work Product Types
2830
+ * - **"memory"**: Facts, findings, reference data — for *looking up* information.
2831
+ * - **"skill"**: Procedures, workflows, step-by-step instructions — for *doing* something.
2832
+ *
2833
+ * When ambiguous, default to memory.
2834
+ *
2835
+ * ## Rules
2836
+ * - Return an empty work_products array if the conversation is trivial or produced nothing substantive.
2837
+ * - Each work product must cover ONE coherent topic. Never combine disparate subjects into a single document — create separate documents for each distinct topic.
2838
+ * - Ground everything in the provided context. Do not invent information.
2839
+ * - Be concise. These are reference documents, not essays.
2840
+ *
2841
+ * ## Output Format
2842
+ * JSON with a `work_products` array. Each item has `wp_type`, `title`, and `content` (markdown).
2843
+ *
2844
+ * Memory content: `# Title`, date, source, key findings as bullets, source documents.
2845
+ * Skill content: `# Title`, prerequisites, numbered steps, when to apply.
2846
+ */
2847
+ SYSTEM_INSTRUCTION: string;
2848
+ /**
2849
+ * Temperature
2850
+ * @description Temperature for synthesis.
2851
+ * @default 0.3
2852
+ */
2853
+ TEMPERATURE: number;
2854
+ /**
2855
+ * Max Tokens
2856
+ * @description Maximum tokens for memory content.
2857
+ * @default 8000
2858
+ */
2859
+ MAX_TOKENS: number;
2860
+ /**
2861
+ * Max Char Context
2862
+ * @description Maximum characters of input context.
2863
+ * @default 100000
2864
+ */
2865
+ MAX_CHAR_CONTEXT: number;
2866
+ /**
2867
+ * Max Concurrent
2868
+ * @description Maximum concurrent synthesis operations.
2869
+ * @default 5
2870
+ */
2871
+ MAX_CONCURRENT: number;
2872
+ };
2720
2873
  /** MessageDeleteResponse */
2721
2874
  MessageDeleteResponse: {
2722
2875
  /**
@@ -2890,8 +3043,8 @@ interface components {
2890
3043
  Agents: {
2891
3044
  [key: string]: string | boolean;
2892
3045
  };
2893
- /** Artifactllm */
2894
- ArtifactLLM: {
3046
+ /** Memoryllm */
3047
+ MemoryLLM: {
2895
3048
  [key: string]: boolean;
2896
3049
  };
2897
3050
  };
@@ -2935,7 +3088,7 @@ interface components {
2935
3088
  * Type is self-descriptive, no need to parse content field.
2936
3089
  * @enum {string}
2937
3090
  */
2938
- NotificationType: 'user_message' | 'workspaceuser_added_owner' | 'workspaceuser_added_collaborator' | 'workspaceuser_added_guest' | 'workspaceuser_removed' | 'workspaceuser_updated_owner' | 'workspaceuser_updated_collaborator' | 'workspaceuser_updated_guest' | 'contact_accepted';
3091
+ NotificationType: 'user_message' | 'workspaceuser_added_owner' | 'workspaceuser_added_collaborator' | 'workspaceuser_added_guest' | 'workspaceuser_removed' | 'workspaceuser_updated_owner' | 'workspaceuser_updated_collaborator' | 'workspaceuser_updated_guest' | 'contact_accepted' | 'email_reply';
2939
3092
  /**
2940
3093
  * NotificationUpdate
2941
3094
  * @description Single notification update for bulk PATCH.
@@ -2989,6 +3142,72 @@ interface components {
2989
3142
  ParsedStage: 'marker' | 'subchunk' | 'final';
2990
3143
  /** ParserConfig */
2991
3144
  ParserConfig: Record<string, never>;
3145
+ /**
3146
+ * PlanningLLMConfig
3147
+ * @description Configuration for PlanningLLM — generates research plans on demand.
3148
+ *
3149
+ * Backed by a reasoning model. The agent calls the create_plan tool when it
3150
+ * deems a task complex enough; the plan text is returned as a tool result.
3151
+ */
3152
+ PlanningLLMConfig: {
3153
+ /**
3154
+ * Api Type
3155
+ * @description The inference type (local or remote).
3156
+ * @default remote
3157
+ * @enum {string}
3158
+ */
3159
+ API_TYPE: 'local' | 'remote';
3160
+ /**
3161
+ * Model Name
3162
+ * @description The model for plan generation. Defaults to reasoning model.
3163
+ * @default GPTOSS120@ARBICITY
3164
+ */
3165
+ MODEL_NAME: string;
3166
+ /**
3167
+ * System Instruction
3168
+ * @description System instruction for the planning LLM.
3169
+ * @default You are a research planning assistant. Your job is to analyze a user query and produce a concise, numbered research plan.
3170
+ *
3171
+ * You have access to a workspace of documents. The available tools for executing the plan are:
3172
+ * - search_documents: Search workspace documents for relevant passages (semantic, keyword, or hybrid)
3173
+ * - get_document_passages: Read a specific page range from a document
3174
+ * - get_table_of_contents: Get document headings with page references
3175
+ *
3176
+ * Consider:
3177
+ * - What information is needed to answer the query
3178
+ * - Which documents are likely relevant based on the document index
3179
+ * - What search queries and document passages to examine
3180
+ * - What order of operations will be most efficient
3181
+ * - Whether parallel searches can be used for different aspects
3182
+ *
3183
+ * Respond with ONLY the plan (numbered steps). Do not execute any steps.
3184
+ */
3185
+ SYSTEM_INSTRUCTION: string;
3186
+ /**
3187
+ * Temperature
3188
+ * @description Temperature for planning.
3189
+ * @default 0.3
3190
+ */
3191
+ TEMPERATURE: number;
3192
+ /**
3193
+ * Max Tokens
3194
+ * @description Maximum tokens for the plan output.
3195
+ * @default 4000
3196
+ */
3197
+ MAX_TOKENS: number;
3198
+ /**
3199
+ * Max Char Size To Answer
3200
+ * @description Maximum character size for planning context.
3201
+ * @default 200000
3202
+ */
3203
+ MAX_CHAR_SIZE_TO_ANSWER: number;
3204
+ /**
3205
+ * Approval Timeout
3206
+ * @description Seconds to wait for user approval after generating the plan. 0 = no approval (auto-proceed). Safe for non-interactive contexts.
3207
+ * @default 0
3208
+ */
3209
+ APPROVAL_TIMEOUT: number;
3210
+ };
2992
3211
  /**
2993
3212
  * PresenceUpdateMessage
2994
3213
  * @description Sent when a contact's online status changes or is no longer tracked.
@@ -3053,7 +3272,7 @@ interface components {
3053
3272
  API_TYPE: 'local' | 'remote';
3054
3273
  /**
3055
3274
  * Model Name
3056
- * @description The model for query answering and review.
3275
+ * @description The model for query answering.
3057
3276
  * @default Q3VL@ARBICITY
3058
3277
  */
3059
3278
  MODEL_NAME: string;
@@ -3075,27 +3294,6 @@ interface components {
3075
3294
  * You should avoid generalisations and provide exact quotations where possible. Never make up citations, facts or other information. Acknowledge uncertainty where source materials are insufficient or ambiguous.
3076
3295
  */
3077
3296
  SYSTEM_INSTRUCTION: string;
3078
- /**
3079
- * Agent Review Instruction
3080
- * @description System instruction for reviewing agent draft answers against source material.
3081
- * @default You are reviewing a draft answer prepared by a research agent. The draft was written based on summaries, but you now have access to the full source material.
3082
- *
3083
- * Your task:
3084
- * 1. Review the draft answer against the source material provided
3085
- * 2. Verify claims are supported by the sources
3086
- * 3. Correct any inaccuracies or unsupported statements
3087
- * 4. Add any important details from the sources that were missed
3088
- * 5. Maintain formal, objective tone appropriate for professional contexts
3089
- *
3090
- * If information is not found in the sources, clearly state this. Do not add inline citation markers - the system handles citations automatically.
3091
- */
3092
- AGENT_REVIEW_INSTRUCTION: string;
3093
- /**
3094
- * Disabled
3095
- * @description If True, skip agent draft review. The agent's answer is used directly. Does not affect direct LLM mode (non-agent) where QueryLLM is the primary answerer.
3096
- * @default true
3097
- */
3098
- DISABLED: boolean;
3099
3297
  /**
3100
3298
  * Max Char Size To Answer
3101
3299
  * @description Maximum character size to answer.
@@ -3382,6 +3580,59 @@ interface components {
3382
3580
  */
3383
3581
  HYBRID_RERANKER_WEIGHT: number;
3384
3582
  };
3583
+ /**
3584
+ * ReviewLLMConfig
3585
+ * @description Configuration for ReviewLLM - reviews agent draft answers against source material.
3586
+ */
3587
+ ReviewLLMConfig: {
3588
+ /**
3589
+ * Api Type
3590
+ * @description The inference type (local or remote).
3591
+ * @default remote
3592
+ * @enum {string}
3593
+ */
3594
+ API_TYPE: 'local' | 'remote';
3595
+ /**
3596
+ * Model Name
3597
+ * @description The model for reviewing agent draft answers. Defaults to reasoning model.
3598
+ * @default GPTOSS120@ARBICITY
3599
+ */
3600
+ MODEL_NAME: string;
3601
+ /**
3602
+ * System Instruction
3603
+ * @description System instruction for reviewing agent draft answers against source material.
3604
+ * @default You are reviewing a draft answer prepared by a research agent. The draft was written based on summaries, but you now have access to the full source material.
3605
+ *
3606
+ * Your task:
3607
+ * 1. Review the draft answer against the source material provided
3608
+ * 2. Verify claims are supported by the sources
3609
+ * 3. Correct any inaccuracies or unsupported statements
3610
+ * 4. Add any important details from the sources that were missed
3611
+ * 5. Maintain formal, objective tone appropriate for professional contexts
3612
+ *
3613
+ * If information is not found in the sources but the draft answer is based on web search results or general knowledge, preserve it and ensure it is clearly labelled as such. Only flag missing source support for claims that purport to come from the documents.
3614
+ * Do not add inline citation markers - the system handles citations automatically.
3615
+ */
3616
+ SYSTEM_INSTRUCTION: string;
3617
+ /**
3618
+ * Temperature
3619
+ * @description Temperature for review.
3620
+ * @default 0.1
3621
+ */
3622
+ TEMPERATURE: number;
3623
+ /**
3624
+ * Max Tokens
3625
+ * @description Maximum tokens for review response.
3626
+ * @default 5000
3627
+ */
3628
+ MAX_TOKENS: number;
3629
+ /**
3630
+ * Max Char Size To Answer
3631
+ * @description Maximum character size for review context.
3632
+ * @default 200000
3633
+ */
3634
+ MAX_CHAR_SIZE_TO_ANSWER: number;
3635
+ };
3385
3636
  /**
3386
3637
  * SSESchemas
3387
3638
  * @description Container for all SSE event schemas — drives OpenAPI component generation.
@@ -3395,8 +3646,19 @@ interface components {
3395
3646
  agent_step?: components['schemas']['AgentStepEvent'] | null;
3396
3647
  user_input_request?: components['schemas']['UserInputRequestEvent'] | null;
3397
3648
  token?: components['schemas']['TokenEvent'] | null;
3649
+ artifact?: components['schemas']['ArtifactEvent'] | null;
3398
3650
  error?: components['schemas']['ErrorEvent'] | null;
3399
3651
  };
3652
+ /** SSOSendVerificationEmailRequest */
3653
+ SSOSendVerificationEmailRequest: {
3654
+ /** Auth0 Id */
3655
+ auth0_id: string;
3656
+ };
3657
+ /** SSOSendVerificationEmailResponse */
3658
+ SSOSendVerificationEmailResponse: {
3659
+ /** Detail */
3660
+ detail: string;
3661
+ };
3400
3662
  /**
3401
3663
  * SSOStatusRequest
3402
3664
  * @description SSO status request - checks user registration state and syncs profile.
@@ -3484,6 +3746,11 @@ interface components {
3484
3746
  * @description Provides the assistant message ID so frontend can set up the response container.
3485
3747
  */
3486
3748
  StreamStartEvent: {
3749
+ /**
3750
+ * T
3751
+ * @description Seconds elapsed since stream start.
3752
+ */
3753
+ t?: number | null;
3487
3754
  /**
3488
3755
  * Type
3489
3756
  * @default stream_start
@@ -3566,8 +3833,8 @@ interface components {
3566
3833
  API_TYPE: 'local' | 'remote';
3567
3834
  /**
3568
3835
  * Model Name
3569
- * @description The name of the non-reasoning model to be used.
3570
- * @default Q3VL@ARBICITY
3836
+ * @description The model for conversation summarisation. Defaults to reasoning model.
3837
+ * @default GPTOSS120@ARBICITY
3571
3838
  */
3572
3839
  MODEL_NAME: string;
3573
3840
  /**
@@ -3777,6 +4044,11 @@ interface components {
3777
4044
  * @description A chunk of the assistant's answer text.
3778
4045
  */
3779
4046
  TokenEvent: {
4047
+ /**
4048
+ * T
4049
+ * @description Seconds elapsed since stream start.
4050
+ */
4051
+ t?: number | null;
3780
4052
  /**
3781
4053
  * Type
3782
4054
  * @default token
@@ -3919,6 +4191,11 @@ interface components {
3919
4191
  * "timeout_seconds": 300}
3920
4192
  */
3921
4193
  UserInputRequestEvent: {
4194
+ /**
4195
+ * T
4196
+ * @description Seconds elapsed since stream start.
4197
+ */
4198
+ t?: number | null;
3922
4199
  /**
3923
4200
  * Type
3924
4201
  * @default user_input_request
@@ -4087,6 +4364,71 @@ interface components {
4087
4364
  /** Detail */
4088
4365
  detail: string;
4089
4366
  };
4367
+ /**
4368
+ * VisionLLMConfig
4369
+ * @description Configuration for VisionLLM — visually inspects document pages.
4370
+ *
4371
+ * Used as a targeted fallback when text extraction (get_document_passages)
4372
+ * is insufficient, e.g. for figures, charts, tables, scanned pages.
4373
+ * The prompt is constructed dynamically per call (no system instruction).
4374
+ */
4375
+ VisionLLMConfig: {
4376
+ /**
4377
+ * Api Type
4378
+ * @description The inference type (local or remote).
4379
+ * @default remote
4380
+ * @enum {string}
4381
+ */
4382
+ API_TYPE: 'local' | 'remote';
4383
+ /**
4384
+ * Model Name
4385
+ * @description The vision-capable model name (must support image inputs).
4386
+ * @default Q3VL@ARBICITY
4387
+ */
4388
+ MODEL_NAME: string;
4389
+ /**
4390
+ * Temperature
4391
+ * @description Temperature for vision analysis.
4392
+ * @default 0.1
4393
+ */
4394
+ TEMPERATURE: number;
4395
+ /**
4396
+ * Max Tokens
4397
+ * @description Maximum tokens for the vision response.
4398
+ * @default 4000
4399
+ */
4400
+ MAX_TOKENS: number;
4401
+ /**
4402
+ * Max Pages Per Call
4403
+ * @description Maximum number of pages that can be inspected in a single call.
4404
+ * @default 5
4405
+ */
4406
+ MAX_PAGES_PER_CALL: number;
4407
+ /**
4408
+ * Image Max Dimension
4409
+ * @description Resize images so the longest side does not exceed this value (pixels).
4410
+ * @default 1280
4411
+ */
4412
+ IMAGE_MAX_DIMENSION: number;
4413
+ };
4414
+ /**
4415
+ * WebSearchConfig
4416
+ * @description Configuration for web search tools.
4417
+ */
4418
+ WebSearchConfig: {
4419
+ /**
4420
+ * Enabled
4421
+ * @description Enable web_search and read_urls tools.
4422
+ * @default true
4423
+ */
4424
+ ENABLED: boolean;
4425
+ /**
4426
+ * Save Sources
4427
+ * @description Save fetched webpages as workspace documents for citations.
4428
+ * @default true
4429
+ */
4430
+ SAVE_SOURCES: boolean;
4431
+ };
4090
4432
  /**
4091
4433
  * WebSocketSchemas
4092
4434
  * @description Container for all WebSocket message schemas.
@@ -4511,6 +4853,39 @@ interface operations {
4511
4853
  };
4512
4854
  };
4513
4855
  };
4856
+ sso_send_verification_email_api_user_sso_send_verification_email_post: {
4857
+ parameters: {
4858
+ query?: never;
4859
+ header?: never;
4860
+ path?: never;
4861
+ cookie?: never;
4862
+ };
4863
+ requestBody: {
4864
+ content: {
4865
+ 'application/json': components['schemas']['SSOSendVerificationEmailRequest'];
4866
+ };
4867
+ };
4868
+ responses: {
4869
+ /** @description Successful Response */
4870
+ 200: {
4871
+ headers: {
4872
+ [name: string]: unknown;
4873
+ };
4874
+ content: {
4875
+ 'application/json': components['schemas']['SSOSendVerificationEmailResponse'];
4876
+ };
4877
+ };
4878
+ /** @description Validation Error */
4879
+ 422: {
4880
+ headers: {
4881
+ [name: string]: unknown;
4882
+ };
4883
+ content: {
4884
+ 'application/json': components['schemas']['HTTPValidationError'];
4885
+ };
4886
+ };
4887
+ };
4888
+ };
4514
4889
  get_user_workspaces: {
4515
4890
  parameters: {
4516
4891
  query?: never;
@@ -5239,6 +5614,12 @@ interface operations {
5239
5614
  shared?: boolean;
5240
5615
  /** @description Configuration to use for processing */
5241
5616
  config_ext_id?: string | null;
5617
+ /** @description Parent document ID for version tracking */
5618
+ parent_ext_id?: string | null;
5619
+ /** @description Work product type: source, skill, memory, artifact */
5620
+ wp_type?: string | null;
5621
+ /** @description Tag to link the document to */
5622
+ tag_ext_id?: string | null;
5242
5623
  };
5243
5624
  header?: {
5244
5625
  'workspace-key'?: string;
@@ -5452,44 +5833,6 @@ interface operations {
5452
5833
  };
5453
5834
  };
5454
5835
  };
5455
- upload_email: {
5456
- parameters: {
5457
- query: {
5458
- user_email: string;
5459
- };
5460
- header?: never;
5461
- path?: never;
5462
- cookie?: never;
5463
- };
5464
- requestBody?: {
5465
- content: {
5466
- 'multipart/form-data': {
5467
- /** @description Multiple files to upload */
5468
- files: string[];
5469
- };
5470
- };
5471
- };
5472
- responses: {
5473
- /** @description Successful Response */
5474
- 200: {
5475
- headers: {
5476
- [name: string]: unknown;
5477
- };
5478
- content: {
5479
- 'application/json': unknown;
5480
- };
5481
- };
5482
- /** @description Validation Error */
5483
- 422: {
5484
- headers: {
5485
- [name: string]: unknown;
5486
- };
5487
- content: {
5488
- 'application/json': components['schemas']['HTTPValidationError'];
5489
- };
5490
- };
5491
- };
5492
- };
5493
5836
  get_message_details: {
5494
5837
  parameters: {
5495
5838
  query?: never;