@workglow/ai 0.2.32 → 0.2.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser.js +849 -275
- package/dist/browser.js.map +9 -6
- package/dist/bun.js +849 -275
- package/dist/bun.js.map +9 -6
- package/dist/node.js +849 -275
- package/dist/node.js.map +9 -6
- package/dist/provider-utils/CloudProviderClient.d.ts +3 -9
- package/dist/provider-utils/CloudProviderClient.d.ts.map +1 -1
- package/dist/provider-utils.js +1 -10
- package/dist/provider-utils.js.map +3 -3
- package/dist/task/AiChatTask.d.ts +9 -2
- package/dist/task/AiChatTask.d.ts.map +1 -1
- package/dist/task/AiChatWithKbTask.d.ts +710 -0
- package/dist/task/AiChatWithKbTask.d.ts.map +1 -0
- package/dist/task/HierarchicalChunkerTask.d.ts.map +1 -1
- package/dist/task/KbSearchTask.d.ts +79 -0
- package/dist/task/KbSearchTask.d.ts.map +1 -0
- package/dist/task/base/responseFormat.d.ts +23 -0
- package/dist/task/base/responseFormat.d.ts.map +1 -0
- package/dist/task/index.d.ts +6 -1
- package/dist/task/index.d.ts.map +1 -1
- package/package.json +12 -12
package/dist/bun.js
CHANGED
|
@@ -1014,6 +1014,28 @@ var TypeCategory = {
|
|
|
1014
1014
|
description: "Classification category with label and score"
|
|
1015
1015
|
};
|
|
1016
1016
|
|
|
1017
|
+
// src/task/base/responseFormat.ts
|
|
1018
|
+
function buildResponseFormatAddendum(format) {
|
|
1019
|
+
if (format === "markdown") {
|
|
1020
|
+
return [
|
|
1021
|
+
"Format your reply as GitHub-flavored Markdown:",
|
|
1022
|
+
"- Use headings, bullet lists, numbered lists, tables, and fenced code blocks where they help readability.",
|
|
1023
|
+
"- Use **bold** and *italic* sparingly for emphasis.",
|
|
1024
|
+
"- Keep paragraphs short."
|
|
1025
|
+
].join(`
|
|
1026
|
+
`);
|
|
1027
|
+
}
|
|
1028
|
+
return "";
|
|
1029
|
+
}
|
|
1030
|
+
var KB_INLINE_CITATION_DIRECTIVE = [
|
|
1031
|
+
"When you reference information from the context above, cite it inline as a",
|
|
1032
|
+
"Markdown link: write the natural anchor text in brackets followed by the",
|
|
1033
|
+
"URL in parentheses, e.g. `[the dashboard](https://workglow.com/help/dashboard)`.",
|
|
1034
|
+
"Do not use numeric `[1]`-style citations. If a context entry has no URL,",
|
|
1035
|
+
"describe it in prose without a link."
|
|
1036
|
+
].join(`
|
|
1037
|
+
`);
|
|
1038
|
+
|
|
1017
1039
|
// src/task/base/StreamingAiTask.ts
|
|
1018
1040
|
import { getStreamingPorts, TaskConfigurationError as TaskConfigurationError3 } from "@workglow/task-graph";
|
|
1019
1041
|
|
|
@@ -1430,6 +1452,14 @@ var AiChatInputSchema = {
|
|
|
1430
1452
|
minimum: 1,
|
|
1431
1453
|
default: 100,
|
|
1432
1454
|
"x-ui-group": "Configuration"
|
|
1455
|
+
},
|
|
1456
|
+
responseFormat: {
|
|
1457
|
+
type: "string",
|
|
1458
|
+
enum: ["text", "markdown"],
|
|
1459
|
+
default: "text",
|
|
1460
|
+
title: "Response format",
|
|
1461
|
+
description: "How the model is instructed to format replies. 'text' = plain text. " + "'markdown' = GitHub-flavored Markdown.",
|
|
1462
|
+
"x-ui-group": "Configuration"
|
|
1433
1463
|
}
|
|
1434
1464
|
},
|
|
1435
1465
|
required: ["model", "prompt"],
|
|
@@ -1441,7 +1471,7 @@ var AiChatOutputSchema = {
|
|
|
1441
1471
|
text: {
|
|
1442
1472
|
type: "string",
|
|
1443
1473
|
title: "Text",
|
|
1444
|
-
description: "
|
|
1474
|
+
description: "Full streamed transcript across all assistant turns",
|
|
1445
1475
|
"x-stream": "append"
|
|
1446
1476
|
},
|
|
1447
1477
|
messages: {
|
|
@@ -1504,8 +1534,15 @@ class AiChatTask extends StreamingAiTask {
|
|
|
1504
1534
|
}
|
|
1505
1535
|
const connector = resolveHumanConnector(context);
|
|
1506
1536
|
const history = [];
|
|
1507
|
-
|
|
1508
|
-
|
|
1537
|
+
const addendum = buildResponseFormatAddendum(input.responseFormat);
|
|
1538
|
+
const composedSystemPrompt = [input.systemPrompt ?? "", addendum].filter((s) => s.length > 0).join(`
|
|
1539
|
+
|
|
1540
|
+
`);
|
|
1541
|
+
if (composedSystemPrompt.length > 0) {
|
|
1542
|
+
history.push({
|
|
1543
|
+
role: "system",
|
|
1544
|
+
content: [{ type: "text", text: composedSystemPrompt }]
|
|
1545
|
+
});
|
|
1509
1546
|
}
|
|
1510
1547
|
const firstUserBlocks = typeof input.prompt === "string" ? [{ type: "text", text: input.prompt }] : input.prompt;
|
|
1511
1548
|
history.push({ role: "user", content: firstUserBlocks });
|
|
@@ -1519,13 +1556,12 @@ class AiChatTask extends StreamingAiTask {
|
|
|
1519
1556
|
await getAiProviderRegistry().disposeSession(model.provider, sessionId);
|
|
1520
1557
|
});
|
|
1521
1558
|
}
|
|
1559
|
+
let completedTurns = 0;
|
|
1522
1560
|
yield {
|
|
1523
1561
|
type: "object-delta",
|
|
1524
1562
|
port: "messages",
|
|
1525
1563
|
objectDelta: [...history]
|
|
1526
1564
|
};
|
|
1527
|
-
let iterations = 0;
|
|
1528
|
-
let lastAssistantText = "";
|
|
1529
1565
|
for (let turn = 0;turn < maxIterations; turn++) {
|
|
1530
1566
|
const perTurnInput = { ...input, messages: [...history] };
|
|
1531
1567
|
const turnJobInput = await this.getJobInput(perTurnInput);
|
|
@@ -1537,17 +1573,487 @@ class AiChatTask extends StreamingAiTask {
|
|
|
1537
1573
|
...event,
|
|
1538
1574
|
port: event.port ?? "text"
|
|
1539
1575
|
};
|
|
1540
|
-
} else if (event.type === "finish") {} else {
|
|
1541
|
-
yield event;
|
|
1576
|
+
} else if (event.type === "finish") {} else {
|
|
1577
|
+
yield event;
|
|
1578
|
+
}
|
|
1579
|
+
}
|
|
1580
|
+
const assistantMsg = {
|
|
1581
|
+
role: "assistant",
|
|
1582
|
+
content: [{ type: "text", text: assistantText }]
|
|
1583
|
+
};
|
|
1584
|
+
history.push(assistantMsg);
|
|
1585
|
+
completedTurns = turn + 1;
|
|
1586
|
+
yield {
|
|
1587
|
+
type: "object-delta",
|
|
1588
|
+
port: "messages",
|
|
1589
|
+
objectDelta: [assistantMsg]
|
|
1590
|
+
};
|
|
1591
|
+
const request = {
|
|
1592
|
+
requestId: crypto.randomUUID(),
|
|
1593
|
+
targetHumanId: "default",
|
|
1594
|
+
kind: "elicit",
|
|
1595
|
+
message: "",
|
|
1596
|
+
contentSchema: chatConnectorContentSchema,
|
|
1597
|
+
contentData: undefined,
|
|
1598
|
+
expectsResponse: true,
|
|
1599
|
+
mode: "multi-turn",
|
|
1600
|
+
metadata: { iteration: turn, taskId: this.id }
|
|
1601
|
+
};
|
|
1602
|
+
const response = await connector.send(request, context.signal);
|
|
1603
|
+
if (response.action === "cancel" || response.action === "decline")
|
|
1604
|
+
break;
|
|
1605
|
+
const raw = response.content?.content;
|
|
1606
|
+
let userContent;
|
|
1607
|
+
if (typeof raw === "string") {
|
|
1608
|
+
const text = raw.trim();
|
|
1609
|
+
userContent = text.length > 0 ? [{ type: "text", text: raw }] : [];
|
|
1610
|
+
} else if (Array.isArray(raw)) {
|
|
1611
|
+
userContent = raw;
|
|
1612
|
+
} else {
|
|
1613
|
+
userContent = [];
|
|
1614
|
+
}
|
|
1615
|
+
if (userContent.length === 0)
|
|
1616
|
+
break;
|
|
1617
|
+
const userMsg = { role: "user", content: userContent };
|
|
1618
|
+
history.push(userMsg);
|
|
1619
|
+
yield {
|
|
1620
|
+
type: "object-delta",
|
|
1621
|
+
port: "messages",
|
|
1622
|
+
objectDelta: [userMsg]
|
|
1623
|
+
};
|
|
1624
|
+
}
|
|
1625
|
+
yield {
|
|
1626
|
+
type: "finish",
|
|
1627
|
+
data: { iterations: completedTurns }
|
|
1628
|
+
};
|
|
1629
|
+
}
|
|
1630
|
+
}
|
|
1631
|
+
|
|
1632
|
+
// src/task/AiChatWithKbTask.ts
|
|
1633
|
+
import { getKnowledgeBase, slugifyHeading } from "@workglow/knowledge-base";
|
|
1634
|
+
import { TaskConfigSchema as TaskConfigSchema3 } from "@workglow/task-graph";
|
|
1635
|
+
import { resolveHumanConnector as resolveHumanConnector2 } from "@workglow/util";
|
|
1636
|
+
|
|
1637
|
+
// src/task/KbSearchTask.ts
|
|
1638
|
+
import { TypeKnowledgeBase } from "@workglow/knowledge-base";
|
|
1639
|
+
import { CreateWorkflow, Task as Task2, Workflow } from "@workglow/task-graph";
|
|
1640
|
+
var inputSchema = {
|
|
1641
|
+
type: "object",
|
|
1642
|
+
properties: {
|
|
1643
|
+
knowledgeBase: TypeKnowledgeBase({
|
|
1644
|
+
title: "Knowledge Base",
|
|
1645
|
+
description: "The knowledge base instance to search in"
|
|
1646
|
+
}),
|
|
1647
|
+
query: {
|
|
1648
|
+
type: "string",
|
|
1649
|
+
title: "Query",
|
|
1650
|
+
description: "Search query (the KB's onSearch handles embedding internally)"
|
|
1651
|
+
},
|
|
1652
|
+
topK: {
|
|
1653
|
+
type: "number",
|
|
1654
|
+
title: "Top K",
|
|
1655
|
+
description: "Number of top results to return",
|
|
1656
|
+
minimum: 1,
|
|
1657
|
+
default: 5
|
|
1658
|
+
},
|
|
1659
|
+
filter: {
|
|
1660
|
+
type: "object",
|
|
1661
|
+
title: "Metadata Filter",
|
|
1662
|
+
description: "Filter results by metadata fields"
|
|
1663
|
+
}
|
|
1664
|
+
},
|
|
1665
|
+
required: ["knowledgeBase", "query"],
|
|
1666
|
+
additionalProperties: false
|
|
1667
|
+
};
|
|
1668
|
+
var outputSchema = {
|
|
1669
|
+
type: "object",
|
|
1670
|
+
properties: {
|
|
1671
|
+
results: {
|
|
1672
|
+
type: "array",
|
|
1673
|
+
items: {
|
|
1674
|
+
type: "object",
|
|
1675
|
+
title: "Chunk Search Result",
|
|
1676
|
+
description: "A single chunk match with score and metadata"
|
|
1677
|
+
},
|
|
1678
|
+
title: "Results",
|
|
1679
|
+
description: "Matching chunks in score-desc order"
|
|
1680
|
+
},
|
|
1681
|
+
count: {
|
|
1682
|
+
type: "number",
|
|
1683
|
+
title: "Count",
|
|
1684
|
+
description: "Number of results returned"
|
|
1685
|
+
}
|
|
1686
|
+
},
|
|
1687
|
+
required: ["results", "count"],
|
|
1688
|
+
additionalProperties: false
|
|
1689
|
+
};
|
|
1690
|
+
|
|
1691
|
+
class KbSearchTask extends Task2 {
|
|
1692
|
+
static type = "KbSearchTask";
|
|
1693
|
+
static category = "RAG";
|
|
1694
|
+
static title = "KB Search";
|
|
1695
|
+
static description = "Search a knowledge base for chunks matching a text query. Wraps the KB's `search` method (which embeds and retrieves via the KB's onSearch callback).";
|
|
1696
|
+
static cacheable = true;
|
|
1697
|
+
static inputSchema() {
|
|
1698
|
+
return inputSchema;
|
|
1699
|
+
}
|
|
1700
|
+
static outputSchema() {
|
|
1701
|
+
return outputSchema;
|
|
1702
|
+
}
|
|
1703
|
+
async execute(input, _context) {
|
|
1704
|
+
const { knowledgeBase, query, topK = 5, filter } = input;
|
|
1705
|
+
const kb = knowledgeBase;
|
|
1706
|
+
const results = await kb.search(query, { topK, filter });
|
|
1707
|
+
return { results, count: results.length };
|
|
1708
|
+
}
|
|
1709
|
+
}
|
|
1710
|
+
var kbSearch = (input, config) => {
|
|
1711
|
+
return new KbSearchTask(config).run(input);
|
|
1712
|
+
};
|
|
1713
|
+
Workflow.prototype.kbSearch = CreateWorkflow(KbSearchTask);
|
|
1714
|
+
|
|
1715
|
+
// src/task/AiChatWithKbTask.ts
|
|
1716
|
+
var modelSchema2 = TypeModel("model:AiChatWithKbTask");
|
|
1717
|
+
var chatChunkReferenceSchema = {
|
|
1718
|
+
type: "object",
|
|
1719
|
+
properties: {
|
|
1720
|
+
kbId: { type: "string" },
|
|
1721
|
+
kbLabel: { type: "string" },
|
|
1722
|
+
title: { type: "string" },
|
|
1723
|
+
url: { type: "string" },
|
|
1724
|
+
snippet: { type: "string" },
|
|
1725
|
+
score: { type: "number" },
|
|
1726
|
+
index: { type: "number" }
|
|
1727
|
+
},
|
|
1728
|
+
required: ["kbId", "kbLabel", "title", "snippet", "score", "index"]
|
|
1729
|
+
};
|
|
1730
|
+
var chatConnectorContentSchema2 = {
|
|
1731
|
+
type: "object",
|
|
1732
|
+
properties: {
|
|
1733
|
+
content: {
|
|
1734
|
+
type: "string",
|
|
1735
|
+
title: "Message",
|
|
1736
|
+
description: "Your reply (leave blank to end the conversation)"
|
|
1737
|
+
}
|
|
1738
|
+
},
|
|
1739
|
+
additionalProperties: false
|
|
1740
|
+
};
|
|
1741
|
+
var AiChatWithKbInputSchema = {
|
|
1742
|
+
type: "object",
|
|
1743
|
+
properties: {
|
|
1744
|
+
model: modelSchema2,
|
|
1745
|
+
prompt: {
|
|
1746
|
+
oneOf: [
|
|
1747
|
+
{ type: "string", title: "Prompt", description: "The initial user message" },
|
|
1748
|
+
{
|
|
1749
|
+
type: "array",
|
|
1750
|
+
title: "Prompt",
|
|
1751
|
+
description: "The initial user message as structured content blocks",
|
|
1752
|
+
items: ContentBlockSchema
|
|
1753
|
+
}
|
|
1754
|
+
],
|
|
1755
|
+
title: "Prompt",
|
|
1756
|
+
description: "The first user message to start the conversation"
|
|
1757
|
+
},
|
|
1758
|
+
messages: {
|
|
1759
|
+
type: "array",
|
|
1760
|
+
title: "Messages",
|
|
1761
|
+
description: "Conversation history (managed internally by the chat loop; not a user-facing input)",
|
|
1762
|
+
items: ChatMessageSchema,
|
|
1763
|
+
"x-ui-hidden": true
|
|
1764
|
+
},
|
|
1765
|
+
systemPrompt: {
|
|
1766
|
+
type: "string",
|
|
1767
|
+
title: "System Prompt",
|
|
1768
|
+
description: "Optional system instructions for the model"
|
|
1769
|
+
},
|
|
1770
|
+
maxTokens: {
|
|
1771
|
+
type: "number",
|
|
1772
|
+
title: "Max Tokens",
|
|
1773
|
+
description: "Per-turn token limit",
|
|
1774
|
+
minimum: 1,
|
|
1775
|
+
"x-ui-group": "Configuration"
|
|
1776
|
+
},
|
|
1777
|
+
temperature: {
|
|
1778
|
+
type: "number",
|
|
1779
|
+
title: "Temperature",
|
|
1780
|
+
description: "Sampling temperature",
|
|
1781
|
+
minimum: 0,
|
|
1782
|
+
maximum: 2,
|
|
1783
|
+
"x-ui-group": "Configuration"
|
|
1784
|
+
},
|
|
1785
|
+
maxIterations: {
|
|
1786
|
+
type: "number",
|
|
1787
|
+
title: "Max Iterations",
|
|
1788
|
+
description: "Safety cap on conversation turns",
|
|
1789
|
+
minimum: 1,
|
|
1790
|
+
default: 100,
|
|
1791
|
+
"x-ui-group": "Configuration"
|
|
1792
|
+
},
|
|
1793
|
+
knowledgeBaseIds: {
|
|
1794
|
+
type: "array",
|
|
1795
|
+
title: "Knowledge Base IDs",
|
|
1796
|
+
description: "Knowledge bases to retrieve from on each turn",
|
|
1797
|
+
items: { type: "string" }
|
|
1798
|
+
},
|
|
1799
|
+
topKPerKb: {
|
|
1800
|
+
type: "number",
|
|
1801
|
+
title: "Top K per KB",
|
|
1802
|
+
description: "Top results per KB before threshold filtering",
|
|
1803
|
+
minimum: 1,
|
|
1804
|
+
default: 4,
|
|
1805
|
+
"x-ui-group": "Configuration"
|
|
1806
|
+
},
|
|
1807
|
+
minScore: {
|
|
1808
|
+
type: "number",
|
|
1809
|
+
title: "Min score",
|
|
1810
|
+
description: "Score floor for a chunk to count as a useful match",
|
|
1811
|
+
minimum: 0,
|
|
1812
|
+
maximum: 1,
|
|
1813
|
+
default: 0.3,
|
|
1814
|
+
"x-ui-group": "Configuration"
|
|
1815
|
+
},
|
|
1816
|
+
maxReferences: {
|
|
1817
|
+
type: "number",
|
|
1818
|
+
title: "Max references",
|
|
1819
|
+
description: "Cap on the chunk references emitted per turn",
|
|
1820
|
+
minimum: 1,
|
|
1821
|
+
default: 6,
|
|
1822
|
+
"x-ui-group": "Configuration"
|
|
1823
|
+
},
|
|
1824
|
+
noMatchReply: {
|
|
1825
|
+
type: "string",
|
|
1826
|
+
title: "No-match reply",
|
|
1827
|
+
description: "When set and zero chunks match: emit this verbatim and skip the provider",
|
|
1828
|
+
"x-ui-group": "Configuration"
|
|
1829
|
+
},
|
|
1830
|
+
noMatchReferences: {
|
|
1831
|
+
type: "array",
|
|
1832
|
+
title: "No-match references",
|
|
1833
|
+
description: "When set and zero chunks match: emit these verbatim on the references port",
|
|
1834
|
+
items: chatChunkReferenceSchema,
|
|
1835
|
+
"x-ui-group": "Configuration"
|
|
1836
|
+
},
|
|
1837
|
+
responseFormat: {
|
|
1838
|
+
type: "string",
|
|
1839
|
+
enum: ["text", "markdown"],
|
|
1840
|
+
default: "text",
|
|
1841
|
+
title: "Response format",
|
|
1842
|
+
description: "How the model is instructed to format replies. 'text' = plain text. " + "'markdown' = GitHub-flavored Markdown; citations are emitted as inline " + "[anchor](url) links instead of [N] numbers.",
|
|
1843
|
+
"x-ui-group": "Configuration"
|
|
1844
|
+
}
|
|
1845
|
+
},
|
|
1846
|
+
required: ["model", "prompt", "knowledgeBaseIds"],
|
|
1847
|
+
additionalProperties: false
|
|
1848
|
+
};
|
|
1849
|
+
var AiChatWithKbOutputSchema = {
|
|
1850
|
+
type: "object",
|
|
1851
|
+
properties: {
|
|
1852
|
+
text: {
|
|
1853
|
+
type: "string",
|
|
1854
|
+
title: "Text",
|
|
1855
|
+
description: "Full streamed transcript across all assistant turns",
|
|
1856
|
+
"x-stream": "append"
|
|
1857
|
+
},
|
|
1858
|
+
messages: {
|
|
1859
|
+
type: "array",
|
|
1860
|
+
title: "Messages",
|
|
1861
|
+
description: "Full conversation history",
|
|
1862
|
+
items: ChatMessageSchema,
|
|
1863
|
+
"x-stream": "object"
|
|
1864
|
+
},
|
|
1865
|
+
iterations: {
|
|
1866
|
+
type: "number",
|
|
1867
|
+
title: "Iterations",
|
|
1868
|
+
description: "Number of completed turns"
|
|
1869
|
+
},
|
|
1870
|
+
references: {
|
|
1871
|
+
type: "array",
|
|
1872
|
+
title: "References",
|
|
1873
|
+
description: "Per-chunk citation references emitted each turn (one entry per surviving chunk; not deduped)",
|
|
1874
|
+
items: chatChunkReferenceSchema,
|
|
1875
|
+
"x-stream": "object"
|
|
1876
|
+
}
|
|
1877
|
+
},
|
|
1878
|
+
required: ["text", "messages", "iterations", "references"],
|
|
1879
|
+
additionalProperties: false
|
|
1880
|
+
};
|
|
1881
|
+
|
|
1882
|
+
class AiChatWithKbTask extends StreamingAiTask {
|
|
1883
|
+
static type = "AiChatWithKbTask";
|
|
1884
|
+
static streamingPhaseLabel = "Replying";
|
|
1885
|
+
static category = "AI Chat";
|
|
1886
|
+
static title = "AI Chat (Knowledge Base)";
|
|
1887
|
+
static description = "Multi-turn chat grounded in one or more knowledge bases. Retrieves on every user turn, injects numbered context, and emits structured per-chunk citation references.";
|
|
1888
|
+
static cacheable = false;
|
|
1889
|
+
static configSchema() {
|
|
1890
|
+
return {
|
|
1891
|
+
type: "object",
|
|
1892
|
+
properties: {
|
|
1893
|
+
...TaskConfigSchema3["properties"]
|
|
1894
|
+
},
|
|
1895
|
+
additionalProperties: false
|
|
1896
|
+
};
|
|
1897
|
+
}
|
|
1898
|
+
static inputSchema() {
|
|
1899
|
+
return AiChatWithKbInputSchema;
|
|
1900
|
+
}
|
|
1901
|
+
static outputSchema() {
|
|
1902
|
+
return AiChatWithKbOutputSchema;
|
|
1903
|
+
}
|
|
1904
|
+
_sessionId;
|
|
1905
|
+
async getJobInput(input) {
|
|
1906
|
+
const model = input.model;
|
|
1907
|
+
if (!this._sessionId) {
|
|
1908
|
+
this._sessionId = getAiProviderRegistry().createSession(model.provider, model);
|
|
1909
|
+
}
|
|
1910
|
+
return {
|
|
1911
|
+
taskType: "AiChatWithKbTask",
|
|
1912
|
+
aiProvider: model.provider,
|
|
1913
|
+
taskInput: input,
|
|
1914
|
+
sessionId: this._sessionId
|
|
1915
|
+
};
|
|
1916
|
+
}
|
|
1917
|
+
async* executeStream(input, context) {
|
|
1918
|
+
this._sessionId = undefined;
|
|
1919
|
+
const model = input.model;
|
|
1920
|
+
if (!model || typeof model !== "object") {
|
|
1921
|
+
throw new Error("AiChatWithKbTask: model was not resolved to ModelConfig");
|
|
1922
|
+
}
|
|
1923
|
+
const connector = resolveHumanConnector2(context);
|
|
1924
|
+
const history = [];
|
|
1925
|
+
if (input.systemPrompt) {
|
|
1926
|
+
history.push({ role: "system", content: [{ type: "text", text: input.systemPrompt }] });
|
|
1927
|
+
}
|
|
1928
|
+
const firstUserBlocks = typeof input.prompt === "string" ? [{ type: "text", text: input.prompt }] : input.prompt;
|
|
1929
|
+
history.push({ role: "user", content: firstUserBlocks });
|
|
1930
|
+
const workingInput = { ...input, messages: history };
|
|
1931
|
+
await this.getJobInput(workingInput);
|
|
1932
|
+
const maxIterations = input.maxIterations ?? 100;
|
|
1933
|
+
if (context.resourceScope && this._sessionId) {
|
|
1934
|
+
const sessionId = this._sessionId;
|
|
1935
|
+
context.resourceScope.register(`ai:session:${sessionId}`, async () => {
|
|
1936
|
+
await getAiProviderRegistry().disposeSession(model.provider, sessionId);
|
|
1937
|
+
});
|
|
1938
|
+
}
|
|
1939
|
+
yield {
|
|
1940
|
+
type: "object-delta",
|
|
1941
|
+
port: "messages",
|
|
1942
|
+
objectDelta: [...history]
|
|
1943
|
+
};
|
|
1944
|
+
const topK = input.topKPerKb ?? 4;
|
|
1945
|
+
const minScore = input.minScore ?? 0.3;
|
|
1946
|
+
const maxRefs = input.maxReferences ?? 6;
|
|
1947
|
+
let completedTurns = 0;
|
|
1948
|
+
let lastNonEmptyRefs = [];
|
|
1949
|
+
for (let turn = 0;turn < maxIterations; turn++) {
|
|
1950
|
+
const lastUserText = extractLastUserText(history);
|
|
1951
|
+
let perKbResults = [];
|
|
1952
|
+
if (lastUserText.length > 0) {
|
|
1953
|
+
perKbResults = await Promise.all((input.knowledgeBaseIds ?? []).map(async (kbId) => {
|
|
1954
|
+
const kb = getKnowledgeBase(kbId);
|
|
1955
|
+
if (!kb) {
|
|
1956
|
+
console.warn(`[AiChatWithKbTask] knowledge base "${kbId}" not registered`);
|
|
1957
|
+
return { kbId, kbLabel: kbId, kb: undefined, results: [] };
|
|
1958
|
+
}
|
|
1959
|
+
const search = context.own(new KbSearchTask);
|
|
1960
|
+
const out = await search.run({
|
|
1961
|
+
knowledgeBase: kb,
|
|
1962
|
+
query: lastUserText,
|
|
1963
|
+
topK
|
|
1964
|
+
});
|
|
1965
|
+
return {
|
|
1966
|
+
kbId,
|
|
1967
|
+
kbLabel: kb.title || kbId,
|
|
1968
|
+
kb,
|
|
1969
|
+
results: out.results
|
|
1970
|
+
};
|
|
1971
|
+
}));
|
|
1972
|
+
}
|
|
1973
|
+
const allChunks = perKbResults.flatMap(({ kbId, kbLabel, kb, results }) => results.filter((r) => r.score >= minScore).map((r) => ({ kbId, kbLabel, kb, r }))).sort((a, b) => b.r.score - a.r.score).slice(0, maxRefs);
|
|
1974
|
+
const docUrlKey = (kbId, docId) => `${kbId}:${docId}`;
|
|
1975
|
+
const docUrls = new Map;
|
|
1976
|
+
const docFetches = [];
|
|
1977
|
+
for (const { kbId, kb, r } of allChunks) {
|
|
1978
|
+
if (!kb)
|
|
1979
|
+
continue;
|
|
1980
|
+
const key = docUrlKey(kbId, r.doc_id);
|
|
1981
|
+
if (docUrls.has(key))
|
|
1982
|
+
continue;
|
|
1983
|
+
docUrls.set(key, undefined);
|
|
1984
|
+
docFetches.push(kb.getDocument(r.doc_id).then((doc) => {
|
|
1985
|
+
const md = doc?.metadata ?? {};
|
|
1986
|
+
const url = typeof md.url === "string" ? md.url : undefined;
|
|
1987
|
+
docUrls.set(key, url);
|
|
1988
|
+
}).catch(() => {}));
|
|
1989
|
+
}
|
|
1990
|
+
await Promise.all(docFetches);
|
|
1991
|
+
const refs = allChunks.map((entry, i) => buildChunkReference({
|
|
1992
|
+
index: i + 1,
|
|
1993
|
+
kbId: entry.kbId,
|
|
1994
|
+
kbLabel: entry.kbLabel,
|
|
1995
|
+
result: entry.r,
|
|
1996
|
+
url: docUrls.get(docUrlKey(entry.kbId, entry.r.doc_id))
|
|
1997
|
+
}));
|
|
1998
|
+
const effectiveRefs = refs.length > 0 ? refs : lastNonEmptyRefs;
|
|
1999
|
+
if (refs.length > 0) {
|
|
2000
|
+
lastNonEmptyRefs = refs;
|
|
2001
|
+
}
|
|
2002
|
+
const emitted = effectiveRefs.length > 0 ? [...effectiveRefs] : input.noMatchReferences ?? [];
|
|
2003
|
+
yield {
|
|
2004
|
+
type: "object-delta",
|
|
2005
|
+
port: "references",
|
|
2006
|
+
objectDelta: emitted
|
|
2007
|
+
};
|
|
2008
|
+
let assistantText = "";
|
|
2009
|
+
if (effectiveRefs.length === 0 && input.noMatchReply) {
|
|
2010
|
+
yield {
|
|
2011
|
+
type: "text-delta",
|
|
2012
|
+
port: "text",
|
|
2013
|
+
textDelta: input.noMatchReply
|
|
2014
|
+
};
|
|
2015
|
+
assistantText = input.noMatchReply;
|
|
2016
|
+
} else {
|
|
2017
|
+
const addendum = buildResponseFormatAddendum(input.responseFormat);
|
|
2018
|
+
const directive = input.responseFormat === "markdown" ? KB_INLINE_CITATION_DIRECTIVE : "";
|
|
2019
|
+
const userSystemPrompt = input.systemPrompt ?? "";
|
|
2020
|
+
const turnSystemPrompt = [
|
|
2021
|
+
userSystemPrompt,
|
|
2022
|
+
addendum,
|
|
2023
|
+
directive,
|
|
2024
|
+
"--- Context ---",
|
|
2025
|
+
formatChunksForPrompt(effectiveRefs, input.responseFormat)
|
|
2026
|
+
].filter((s) => s.length > 0).join(`
|
|
2027
|
+
|
|
2028
|
+
`);
|
|
2029
|
+
const perTurnInput = {
|
|
2030
|
+
...input,
|
|
2031
|
+
messages: [
|
|
2032
|
+
{ role: "system", content: [{ type: "text", text: turnSystemPrompt }] },
|
|
2033
|
+
...history.filter((m) => m.role !== "system")
|
|
2034
|
+
],
|
|
2035
|
+
systemPrompt: turnSystemPrompt
|
|
2036
|
+
};
|
|
2037
|
+
const turnJobInput = await this.getJobInput(perTurnInput);
|
|
2038
|
+
const strategy = getAiProviderRegistry().getStrategy(model);
|
|
2039
|
+
for await (const event of strategy.executeStream(turnJobInput, context, this.runConfig.runnerId)) {
|
|
2040
|
+
if (event.type === "text-delta") {
|
|
2041
|
+
assistantText += event.textDelta;
|
|
2042
|
+
yield {
|
|
2043
|
+
...event,
|
|
2044
|
+
port: event.port ?? "text"
|
|
2045
|
+
};
|
|
2046
|
+
} else if (event.type === "finish") {} else {
|
|
2047
|
+
yield event;
|
|
2048
|
+
}
|
|
1542
2049
|
}
|
|
1543
2050
|
}
|
|
1544
|
-
iterations++;
|
|
1545
|
-
lastAssistantText = assistantText;
|
|
1546
2051
|
const assistantMsg = {
|
|
1547
2052
|
role: "assistant",
|
|
1548
2053
|
content: [{ type: "text", text: assistantText }]
|
|
1549
2054
|
};
|
|
1550
2055
|
history.push(assistantMsg);
|
|
2056
|
+
completedTurns = turn + 1;
|
|
1551
2057
|
yield {
|
|
1552
2058
|
type: "object-delta",
|
|
1553
2059
|
port: "messages",
|
|
@@ -1558,7 +2064,7 @@ class AiChatTask extends StreamingAiTask {
|
|
|
1558
2064
|
targetHumanId: "default",
|
|
1559
2065
|
kind: "elicit",
|
|
1560
2066
|
message: "",
|
|
1561
|
-
contentSchema:
|
|
2067
|
+
contentSchema: chatConnectorContentSchema2,
|
|
1562
2068
|
contentData: undefined,
|
|
1563
2069
|
expectsResponse: true,
|
|
1564
2070
|
mode: "multi-turn",
|
|
@@ -1589,26 +2095,68 @@ class AiChatTask extends StreamingAiTask {
|
|
|
1589
2095
|
}
|
|
1590
2096
|
yield {
|
|
1591
2097
|
type: "finish",
|
|
1592
|
-
data: {
|
|
1593
|
-
text: lastAssistantText,
|
|
1594
|
-
messages: [...history],
|
|
1595
|
-
iterations
|
|
1596
|
-
}
|
|
2098
|
+
data: { iterations: completedTurns }
|
|
1597
2099
|
};
|
|
1598
2100
|
}
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
2101
|
+
}
|
|
2102
|
+
function extractLastUserText(messages) {
|
|
2103
|
+
for (let i = messages.length - 1;i >= 0; i--) {
|
|
2104
|
+
const m = messages[i];
|
|
2105
|
+
if (!m || m.role !== "user")
|
|
2106
|
+
continue;
|
|
2107
|
+
const text = m.content.map((b) => b.type === "text" ? b.text : "").join(" ").trim();
|
|
2108
|
+
if (text.length > 0)
|
|
2109
|
+
return text;
|
|
2110
|
+
}
|
|
2111
|
+
return "";
|
|
2112
|
+
}
|
|
2113
|
+
function buildChunkReference(args) {
|
|
2114
|
+
const md = args.result.metadata ?? {};
|
|
2115
|
+
const title = md.doc_title ?? md.title ?? args.result.doc_id ?? "Untitled";
|
|
2116
|
+
const baseUrl = args.url ?? md.url ?? undefined;
|
|
2117
|
+
const url = withSectionAnchor(baseUrl, md.sectionTitles);
|
|
2118
|
+
const text = md.text ?? "";
|
|
2119
|
+
const snippet = text.length > 150 ? text.slice(0, 150).trim() + "\u2026" : text;
|
|
2120
|
+
return {
|
|
2121
|
+
index: args.index,
|
|
2122
|
+
kbId: args.kbId,
|
|
2123
|
+
kbLabel: args.kbLabel,
|
|
2124
|
+
title,
|
|
2125
|
+
url,
|
|
2126
|
+
snippet,
|
|
2127
|
+
score: args.result.score
|
|
2128
|
+
};
|
|
2129
|
+
}
|
|
2130
|
+
function withSectionAnchor(url, sectionTitles) {
|
|
2131
|
+
if (!url)
|
|
2132
|
+
return url;
|
|
2133
|
+
if (url.includes("#"))
|
|
2134
|
+
return url;
|
|
2135
|
+
if (!sectionTitles || sectionTitles.length === 0)
|
|
2136
|
+
return url;
|
|
2137
|
+
const deepest = sectionTitles[sectionTitles.length - 1];
|
|
2138
|
+
if (typeof deepest !== "string")
|
|
2139
|
+
return url;
|
|
2140
|
+
const slug = slugifyHeading(deepest);
|
|
2141
|
+
if (slug.length === 0)
|
|
2142
|
+
return url;
|
|
2143
|
+
return `${url}#${slug}`;
|
|
2144
|
+
}
|
|
2145
|
+
function formatChunksForPrompt(refs, responseFormat) {
|
|
2146
|
+
return refs.map((r) => {
|
|
2147
|
+
const head = `[${r.index}] [${r.kbLabel}] (${r.title})`;
|
|
2148
|
+
if (responseFormat === "markdown" && r.url) {
|
|
2149
|
+
return `${head} <${r.url}>
|
|
2150
|
+
${r.snippet}`;
|
|
1605
2151
|
}
|
|
1606
|
-
return
|
|
1607
|
-
}
|
|
2152
|
+
return `${head} ${r.snippet}`;
|
|
2153
|
+
}).join(`
|
|
2154
|
+
|
|
2155
|
+
`);
|
|
1608
2156
|
}
|
|
1609
2157
|
|
|
1610
2158
|
// src/task/BackgroundRemovalTask.ts
|
|
1611
|
-
import { CreateWorkflow, Workflow } from "@workglow/task-graph";
|
|
2159
|
+
import { CreateWorkflow as CreateWorkflow2, Workflow as Workflow2 } from "@workglow/task-graph";
|
|
1612
2160
|
import { ImageValueSchema } from "@workglow/util/media";
|
|
1613
2161
|
|
|
1614
2162
|
// src/task/base/AiVisionTask.ts
|
|
@@ -1617,12 +2165,12 @@ class AiVisionTask extends AiTask {
|
|
|
1617
2165
|
}
|
|
1618
2166
|
|
|
1619
2167
|
// src/task/BackgroundRemovalTask.ts
|
|
1620
|
-
var
|
|
2168
|
+
var modelSchema3 = TypeModel("model:BackgroundRemovalTask");
|
|
1621
2169
|
var BackgroundRemovalInputSchema = {
|
|
1622
2170
|
type: "object",
|
|
1623
2171
|
properties: {
|
|
1624
2172
|
image: TypeImageInput,
|
|
1625
|
-
model:
|
|
2173
|
+
model: modelSchema3
|
|
1626
2174
|
},
|
|
1627
2175
|
required: ["image", "model"],
|
|
1628
2176
|
additionalProperties: false
|
|
@@ -1654,22 +2202,22 @@ class BackgroundRemovalTask extends AiVisionTask {
|
|
|
1654
2202
|
var backgroundRemoval = (input, config) => {
|
|
1655
2203
|
return new BackgroundRemovalTask(config).run(input);
|
|
1656
2204
|
};
|
|
1657
|
-
|
|
2205
|
+
Workflow2.prototype.backgroundRemoval = CreateWorkflow2(BackgroundRemovalTask);
|
|
1658
2206
|
|
|
1659
2207
|
// src/task/ChunkRetrievalTask.ts
|
|
1660
|
-
import { TypeKnowledgeBase } from "@workglow/knowledge-base";
|
|
1661
|
-
import { CreateWorkflow as
|
|
2208
|
+
import { TypeKnowledgeBase as TypeKnowledgeBase2 } from "@workglow/knowledge-base";
|
|
2209
|
+
import { CreateWorkflow as CreateWorkflow4, Task as Task3, Workflow as Workflow4 } from "@workglow/task-graph";
|
|
1662
2210
|
import {
|
|
1663
2211
|
isTypedArray,
|
|
1664
2212
|
TypedArraySchema as TypedArraySchema2
|
|
1665
2213
|
} from "@workglow/util/schema";
|
|
1666
2214
|
|
|
1667
2215
|
// src/task/TextEmbeddingTask.ts
|
|
1668
|
-
import { CreateWorkflow as
|
|
2216
|
+
import { CreateWorkflow as CreateWorkflow3, Workflow as Workflow3 } from "@workglow/task-graph";
|
|
1669
2217
|
import {
|
|
1670
2218
|
TypedArraySchema
|
|
1671
2219
|
} from "@workglow/util/schema";
|
|
1672
|
-
var
|
|
2220
|
+
var modelSchema4 = TypeModel("model:TextEmbeddingTask");
|
|
1673
2221
|
var TextEmbeddingInputSchema = {
|
|
1674
2222
|
type: "object",
|
|
1675
2223
|
properties: {
|
|
@@ -1678,7 +2226,7 @@ var TextEmbeddingInputSchema = {
|
|
|
1678
2226
|
title: "Text",
|
|
1679
2227
|
description: "The text to embed"
|
|
1680
2228
|
}),
|
|
1681
|
-
model:
|
|
2229
|
+
model: modelSchema4
|
|
1682
2230
|
},
|
|
1683
2231
|
required: ["text", "model"],
|
|
1684
2232
|
additionalProperties: false
|
|
@@ -1710,13 +2258,13 @@ class TextEmbeddingTask extends AiTask {
|
|
|
1710
2258
|
var textEmbedding = async (input, config) => {
|
|
1711
2259
|
return new TextEmbeddingTask(config).run(input);
|
|
1712
2260
|
};
|
|
1713
|
-
|
|
2261
|
+
Workflow3.prototype.textEmbedding = CreateWorkflow3(TextEmbeddingTask);
|
|
1714
2262
|
|
|
1715
2263
|
// src/task/ChunkRetrievalTask.ts
|
|
1716
|
-
var
|
|
2264
|
+
var inputSchema2 = {
|
|
1717
2265
|
type: "object",
|
|
1718
2266
|
properties: {
|
|
1719
|
-
knowledgeBase:
|
|
2267
|
+
knowledgeBase: TypeKnowledgeBase2({
|
|
1720
2268
|
title: "Knowledge Base",
|
|
1721
2269
|
description: "The knowledge base instance to search in"
|
|
1722
2270
|
}),
|
|
@@ -1789,7 +2337,7 @@ var inputSchema = {
|
|
|
1789
2337
|
else: {},
|
|
1790
2338
|
additionalProperties: false
|
|
1791
2339
|
};
|
|
1792
|
-
var
|
|
2340
|
+
var outputSchema2 = {
|
|
1793
2341
|
type: "object",
|
|
1794
2342
|
properties: {
|
|
1795
2343
|
chunks: {
|
|
@@ -1850,17 +2398,17 @@ var outputSchema = {
|
|
|
1850
2398
|
additionalProperties: false
|
|
1851
2399
|
};
|
|
1852
2400
|
|
|
1853
|
-
class ChunkRetrievalTask extends
|
|
2401
|
+
class ChunkRetrievalTask extends Task3 {
|
|
1854
2402
|
static type = "ChunkRetrievalTask";
|
|
1855
2403
|
static category = "RAG";
|
|
1856
2404
|
static title = "Chunk Retrieval";
|
|
1857
2405
|
static description = "End-to-end retrieval: embed query (if string) and search the knowledge base. Supports similarity and hybrid methods.";
|
|
1858
2406
|
static cacheable = true;
|
|
1859
2407
|
static inputSchema() {
|
|
1860
|
-
return
|
|
2408
|
+
return inputSchema2;
|
|
1861
2409
|
}
|
|
1862
2410
|
static outputSchema() {
|
|
1863
|
-
return
|
|
2411
|
+
return outputSchema2;
|
|
1864
2412
|
}
|
|
1865
2413
|
async execute(input, context) {
|
|
1866
2414
|
const {
|
|
@@ -1931,18 +2479,18 @@ class ChunkRetrievalTask extends Task2 {
|
|
|
1931
2479
|
var chunkRetrieval = (input, config) => {
|
|
1932
2480
|
return new ChunkRetrievalTask(config).run(input);
|
|
1933
2481
|
};
|
|
1934
|
-
|
|
2482
|
+
Workflow4.prototype.chunkRetrieval = CreateWorkflow4(ChunkRetrievalTask);
|
|
1935
2483
|
|
|
1936
2484
|
// src/task/ChunkVectorUpsertTask.ts
|
|
1937
|
-
import { ChunkRecordArraySchema, TypeKnowledgeBase as
|
|
1938
|
-
import { CreateWorkflow as
|
|
2485
|
+
import { ChunkRecordArraySchema, TypeKnowledgeBase as TypeKnowledgeBase3 } from "@workglow/knowledge-base";
|
|
2486
|
+
import { CreateWorkflow as CreateWorkflow5, Task as Task4, Workflow as Workflow5 } from "@workglow/task-graph";
|
|
1939
2487
|
import {
|
|
1940
2488
|
TypedArraySchema as TypedArraySchema3
|
|
1941
2489
|
} from "@workglow/util/schema";
|
|
1942
|
-
var
|
|
2490
|
+
var inputSchema3 = {
|
|
1943
2491
|
type: "object",
|
|
1944
2492
|
properties: {
|
|
1945
|
-
knowledgeBase:
|
|
2493
|
+
knowledgeBase: TypeKnowledgeBase3({
|
|
1946
2494
|
title: "Knowledge Base",
|
|
1947
2495
|
description: "The knowledge base instance to store vectors in"
|
|
1948
2496
|
}),
|
|
@@ -1960,7 +2508,7 @@ var inputSchema2 = {
|
|
|
1960
2508
|
required: ["knowledgeBase", "chunks", "vector"],
|
|
1961
2509
|
additionalProperties: false
|
|
1962
2510
|
};
|
|
1963
|
-
var
|
|
2511
|
+
var outputSchema3 = {
|
|
1964
2512
|
type: "object",
|
|
1965
2513
|
properties: {
|
|
1966
2514
|
count: {
|
|
@@ -1984,17 +2532,17 @@ var outputSchema2 = {
|
|
|
1984
2532
|
additionalProperties: false
|
|
1985
2533
|
};
|
|
1986
2534
|
|
|
1987
|
-
class ChunkVectorUpsertTask extends
|
|
2535
|
+
class ChunkVectorUpsertTask extends Task4 {
|
|
1988
2536
|
static type = "ChunkVectorUpsertTask";
|
|
1989
2537
|
static category = "Document";
|
|
1990
2538
|
static title = "Add to Vector Store";
|
|
1991
2539
|
static description = "Store chunks + their embeddings in a knowledge base (1:1 aligned)";
|
|
1992
2540
|
static cacheable = false;
|
|
1993
2541
|
static inputSchema() {
|
|
1994
|
-
return
|
|
2542
|
+
return inputSchema3;
|
|
1995
2543
|
}
|
|
1996
2544
|
static outputSchema() {
|
|
1997
|
-
return
|
|
2545
|
+
return outputSchema3;
|
|
1998
2546
|
}
|
|
1999
2547
|
async execute(input, context) {
|
|
2000
2548
|
const { knowledgeBase, chunks, vector, doc_title } = input;
|
|
@@ -2039,19 +2587,19 @@ class ChunkVectorUpsertTask extends Task3 {
|
|
|
2039
2587
|
var chunkVectorUpsert = (input, config) => {
|
|
2040
2588
|
return new ChunkVectorUpsertTask(config).run(input);
|
|
2041
2589
|
};
|
|
2042
|
-
|
|
2590
|
+
Workflow5.prototype.chunkVectorUpsert = CreateWorkflow5(ChunkVectorUpsertTask);
|
|
2043
2591
|
|
|
2044
2592
|
// src/task/ContextBuilderTask.ts
|
|
2045
2593
|
import { estimateTokens } from "@workglow/knowledge-base";
|
|
2046
2594
|
import {
|
|
2047
|
-
CreateWorkflow as
|
|
2048
|
-
Task as
|
|
2049
|
-
Workflow as
|
|
2595
|
+
CreateWorkflow as CreateWorkflow7,
|
|
2596
|
+
Task as Task5,
|
|
2597
|
+
Workflow as Workflow7
|
|
2050
2598
|
} from "@workglow/task-graph";
|
|
2051
2599
|
|
|
2052
2600
|
// src/task/CountTokensTask.ts
|
|
2053
|
-
import { CreateWorkflow as
|
|
2054
|
-
var
|
|
2601
|
+
import { CreateWorkflow as CreateWorkflow6, Workflow as Workflow6 } from "@workglow/task-graph";
|
|
2602
|
+
var modelSchema5 = TypeModel("model");
|
|
2055
2603
|
var CountTokensInputSchema = {
|
|
2056
2604
|
type: "object",
|
|
2057
2605
|
properties: {
|
|
@@ -2060,7 +2608,7 @@ var CountTokensInputSchema = {
|
|
|
2060
2608
|
title: "Text",
|
|
2061
2609
|
description: "The text to count tokens for"
|
|
2062
2610
|
},
|
|
2063
|
-
model:
|
|
2611
|
+
model: modelSchema5
|
|
2064
2612
|
},
|
|
2065
2613
|
required: ["text", "model"],
|
|
2066
2614
|
additionalProperties: false
|
|
@@ -2094,7 +2642,7 @@ class CountTokensTask extends AiTask {
|
|
|
2094
2642
|
var countTokens = async (input, config) => {
|
|
2095
2643
|
return new CountTokensTask(config).run(input);
|
|
2096
2644
|
};
|
|
2097
|
-
|
|
2645
|
+
Workflow6.prototype.countTokens = CreateWorkflow6(CountTokensTask);
|
|
2098
2646
|
|
|
2099
2647
|
// src/task/ContextBuilderTask.ts
|
|
2100
2648
|
var ContextFormat = {
|
|
@@ -2104,11 +2652,11 @@ var ContextFormat = {
|
|
|
2104
2652
|
MARKDOWN: "markdown",
|
|
2105
2653
|
JSON: "json"
|
|
2106
2654
|
};
|
|
2107
|
-
var
|
|
2655
|
+
var modelSchema6 = TypeModel("model", {
|
|
2108
2656
|
title: "Model",
|
|
2109
2657
|
description: "Model to use for token counting (optional, falls back to estimation)"
|
|
2110
2658
|
});
|
|
2111
|
-
var
|
|
2659
|
+
var inputSchema4 = {
|
|
2112
2660
|
type: "object",
|
|
2113
2661
|
properties: {
|
|
2114
2662
|
chunks: {
|
|
@@ -2168,12 +2716,12 @@ var inputSchema3 = {
|
|
|
2168
2716
|
|
|
2169
2717
|
`
|
|
2170
2718
|
},
|
|
2171
|
-
model:
|
|
2719
|
+
model: modelSchema6
|
|
2172
2720
|
},
|
|
2173
2721
|
required: ["chunks"],
|
|
2174
2722
|
additionalProperties: false
|
|
2175
2723
|
};
|
|
2176
|
-
var
|
|
2724
|
+
var outputSchema4 = {
|
|
2177
2725
|
type: "object",
|
|
2178
2726
|
properties: {
|
|
2179
2727
|
context: {
|
|
@@ -2201,17 +2749,17 @@ var outputSchema3 = {
|
|
|
2201
2749
|
additionalProperties: false
|
|
2202
2750
|
};
|
|
2203
2751
|
|
|
2204
|
-
class ContextBuilderTask extends
|
|
2752
|
+
class ContextBuilderTask extends Task5 {
|
|
2205
2753
|
static type = "ContextBuilderTask";
|
|
2206
2754
|
static category = "RAG";
|
|
2207
2755
|
static title = "Context Builder";
|
|
2208
2756
|
static description = "Format retrieved chunks into context for LLM prompts";
|
|
2209
2757
|
static cacheable = true;
|
|
2210
2758
|
static inputSchema() {
|
|
2211
|
-
return
|
|
2759
|
+
return inputSchema4;
|
|
2212
2760
|
}
|
|
2213
2761
|
static outputSchema() {
|
|
2214
|
-
return
|
|
2762
|
+
return outputSchema4;
|
|
2215
2763
|
}
|
|
2216
2764
|
async execute(input, context) {
|
|
2217
2765
|
return this.executePreview(input, context);
|
|
@@ -2397,15 +2945,15 @@ class ContextBuilderTask extends Task4 {
|
|
|
2397
2945
|
var contextBuilder = (input, config) => {
|
|
2398
2946
|
return new ContextBuilderTask(config).run(input);
|
|
2399
2947
|
};
|
|
2400
|
-
|
|
2948
|
+
Workflow7.prototype.contextBuilder = CreateWorkflow7(ContextBuilderTask);
|
|
2401
2949
|
|
|
2402
2950
|
// src/task/DocumentEnricherTask.ts
|
|
2403
2951
|
import { getChildren, hasChildren } from "@workglow/knowledge-base";
|
|
2404
|
-
import { CreateWorkflow as
|
|
2952
|
+
import { CreateWorkflow as CreateWorkflow10, Task as Task6, Workflow as Workflow10 } from "@workglow/task-graph";
|
|
2405
2953
|
|
|
2406
2954
|
// src/task/TextNamedEntityRecognitionTask.ts
|
|
2407
|
-
import { CreateWorkflow as
|
|
2408
|
-
var
|
|
2955
|
+
import { CreateWorkflow as CreateWorkflow8, Workflow as Workflow8 } from "@workglow/task-graph";
|
|
2956
|
+
var modelSchema7 = TypeModel("model:TextNamedEntityRecognitionTask");
|
|
2409
2957
|
var TextNamedEntityRecognitionInputSchema = {
|
|
2410
2958
|
type: "object",
|
|
2411
2959
|
properties: {
|
|
@@ -2424,7 +2972,7 @@ var TextNamedEntityRecognitionInputSchema = {
|
|
|
2424
2972
|
"x-ui-group": "Configuration",
|
|
2425
2973
|
"x-ui-group-open": false
|
|
2426
2974
|
},
|
|
2427
|
-
model:
|
|
2975
|
+
model: modelSchema7
|
|
2428
2976
|
},
|
|
2429
2977
|
required: ["text", "model"],
|
|
2430
2978
|
additionalProperties: false
|
|
@@ -2479,11 +3027,11 @@ class TextNamedEntityRecognitionTask extends AiTask {
|
|
|
2479
3027
|
var textNamedEntityRecognition = (input, config) => {
|
|
2480
3028
|
return new TextNamedEntityRecognitionTask(config).run(input);
|
|
2481
3029
|
};
|
|
2482
|
-
|
|
3030
|
+
Workflow8.prototype.textNamedEntityRecognition = CreateWorkflow8(TextNamedEntityRecognitionTask);
|
|
2483
3031
|
|
|
2484
3032
|
// src/task/TextSummaryTask.ts
|
|
2485
|
-
import { CreateWorkflow as
|
|
2486
|
-
var
|
|
3033
|
+
import { CreateWorkflow as CreateWorkflow9, Workflow as Workflow9 } from "@workglow/task-graph";
|
|
3034
|
+
var modelSchema8 = TypeModel("model:TextSummaryTask");
|
|
2487
3035
|
var TextSummaryInputSchema = {
|
|
2488
3036
|
type: "object",
|
|
2489
3037
|
properties: {
|
|
@@ -2492,7 +3040,7 @@ var TextSummaryInputSchema = {
|
|
|
2492
3040
|
title: "Text",
|
|
2493
3041
|
description: "The text to summarize"
|
|
2494
3042
|
},
|
|
2495
|
-
model:
|
|
3043
|
+
model: modelSchema8
|
|
2496
3044
|
},
|
|
2497
3045
|
required: ["text", "model"],
|
|
2498
3046
|
additionalProperties: false
|
|
@@ -2527,10 +3075,10 @@ class TextSummaryTask extends StreamingAiTask {
|
|
|
2527
3075
|
var textSummary = async (input, config) => {
|
|
2528
3076
|
return new TextSummaryTask(config).run(input);
|
|
2529
3077
|
};
|
|
2530
|
-
|
|
3078
|
+
Workflow9.prototype.textSummary = CreateWorkflow9(TextSummaryTask);
|
|
2531
3079
|
|
|
2532
3080
|
// src/task/DocumentEnricherTask.ts
|
|
2533
|
-
var
|
|
3081
|
+
var inputSchema5 = {
|
|
2534
3082
|
type: "object",
|
|
2535
3083
|
properties: {
|
|
2536
3084
|
doc_id: {
|
|
@@ -2574,7 +3122,7 @@ var inputSchema4 = {
|
|
|
2574
3122
|
required: [],
|
|
2575
3123
|
additionalProperties: false
|
|
2576
3124
|
};
|
|
2577
|
-
var
|
|
3125
|
+
var outputSchema5 = {
|
|
2578
3126
|
type: "object",
|
|
2579
3127
|
properties: {
|
|
2580
3128
|
doc_id: {
|
|
@@ -2601,17 +3149,17 @@ var outputSchema4 = {
|
|
|
2601
3149
|
additionalProperties: false
|
|
2602
3150
|
};
|
|
2603
3151
|
|
|
2604
|
-
class DocumentEnricherTask extends
|
|
3152
|
+
class DocumentEnricherTask extends Task6 {
|
|
2605
3153
|
static type = "DocumentEnricherTask";
|
|
2606
3154
|
static category = "Document";
|
|
2607
3155
|
static title = "Document Enricher";
|
|
2608
3156
|
static description = "Enrich document nodes with summaries and entities";
|
|
2609
3157
|
static cacheable = true;
|
|
2610
3158
|
static inputSchema() {
|
|
2611
|
-
return
|
|
3159
|
+
return inputSchema5;
|
|
2612
3160
|
}
|
|
2613
3161
|
static outputSchema() {
|
|
2614
|
-
return
|
|
3162
|
+
return outputSchema5;
|
|
2615
3163
|
}
|
|
2616
3164
|
async execute(input, context) {
|
|
2617
3165
|
const {
|
|
@@ -2760,19 +3308,19 @@ class DocumentEnricherTask extends Task5 {
|
|
|
2760
3308
|
var documentEnricher = (input, config) => {
|
|
2761
3309
|
return new DocumentEnricherTask(config).run(input);
|
|
2762
3310
|
};
|
|
2763
|
-
|
|
3311
|
+
Workflow10.prototype.documentEnricher = CreateWorkflow10(DocumentEnricherTask);
|
|
2764
3312
|
|
|
2765
3313
|
// src/task/DocumentUpsertTask.ts
|
|
2766
3314
|
import {
|
|
2767
3315
|
Document,
|
|
2768
3316
|
DocumentMetadataSchema,
|
|
2769
|
-
TypeKnowledgeBase as
|
|
3317
|
+
TypeKnowledgeBase as TypeKnowledgeBase4
|
|
2770
3318
|
} from "@workglow/knowledge-base";
|
|
2771
|
-
import { CreateWorkflow as
|
|
2772
|
-
var
|
|
3319
|
+
import { CreateWorkflow as CreateWorkflow11, Task as Task7, Workflow as Workflow11 } from "@workglow/task-graph";
|
|
3320
|
+
var inputSchema6 = {
|
|
2773
3321
|
type: "object",
|
|
2774
3322
|
properties: {
|
|
2775
|
-
knowledgeBase:
|
|
3323
|
+
knowledgeBase: TypeKnowledgeBase4({
|
|
2776
3324
|
title: "Knowledge Base",
|
|
2777
3325
|
description: "The knowledge base instance to store the document in"
|
|
2778
3326
|
}),
|
|
@@ -2800,7 +3348,7 @@ var inputSchema5 = {
|
|
|
2800
3348
|
required: ["knowledgeBase", "doc_id", "documentTree"],
|
|
2801
3349
|
additionalProperties: false
|
|
2802
3350
|
};
|
|
2803
|
-
var
|
|
3351
|
+
var outputSchema6 = {
|
|
2804
3352
|
type: "object",
|
|
2805
3353
|
properties: {
|
|
2806
3354
|
doc_id: {
|
|
@@ -2813,17 +3361,17 @@ var outputSchema5 = {
|
|
|
2813
3361
|
additionalProperties: false
|
|
2814
3362
|
};
|
|
2815
3363
|
|
|
2816
|
-
class DocumentUpsertTask extends
|
|
3364
|
+
class DocumentUpsertTask extends Task7 {
|
|
2817
3365
|
static type = "DocumentUpsertTask";
|
|
2818
3366
|
static category = "Document";
|
|
2819
3367
|
static title = "Add Document";
|
|
2820
3368
|
static description = "Persist a parsed document tree to a knowledge base";
|
|
2821
3369
|
static cacheable = false;
|
|
2822
3370
|
static inputSchema() {
|
|
2823
|
-
return
|
|
3371
|
+
return inputSchema6;
|
|
2824
3372
|
}
|
|
2825
3373
|
static outputSchema() {
|
|
2826
|
-
return
|
|
3374
|
+
return outputSchema6;
|
|
2827
3375
|
}
|
|
2828
3376
|
async execute(input, context) {
|
|
2829
3377
|
const { knowledgeBase, doc_id, documentTree, title, metadata } = input;
|
|
@@ -2846,15 +3394,15 @@ class DocumentUpsertTask extends Task6 {
|
|
|
2846
3394
|
var documentUpsert = (input, config) => {
|
|
2847
3395
|
return new DocumentUpsertTask(config).run(input);
|
|
2848
3396
|
};
|
|
2849
|
-
|
|
3397
|
+
Workflow11.prototype.documentUpsert = CreateWorkflow11(DocumentUpsertTask);
|
|
2850
3398
|
|
|
2851
3399
|
// src/task/DownloadModelTask.ts
|
|
2852
|
-
import { CreateWorkflow as
|
|
2853
|
-
var
|
|
3400
|
+
import { CreateWorkflow as CreateWorkflow12, Workflow as Workflow12 } from "@workglow/task-graph";
|
|
3401
|
+
var modelSchema9 = TypeModel("model");
|
|
2854
3402
|
var DownloadModelInputSchema = {
|
|
2855
3403
|
type: "object",
|
|
2856
3404
|
properties: {
|
|
2857
|
-
model:
|
|
3405
|
+
model: modelSchema9
|
|
2858
3406
|
},
|
|
2859
3407
|
required: ["model"],
|
|
2860
3408
|
additionalProperties: false
|
|
@@ -2862,7 +3410,7 @@ var DownloadModelInputSchema = {
|
|
|
2862
3410
|
var DownloadModelOutputSchema = {
|
|
2863
3411
|
type: "object",
|
|
2864
3412
|
properties: {
|
|
2865
|
-
model:
|
|
3413
|
+
model: modelSchema9
|
|
2866
3414
|
},
|
|
2867
3415
|
required: ["model"],
|
|
2868
3416
|
additionalProperties: false
|
|
@@ -2916,10 +3464,10 @@ class DownloadModelTask extends AiTask {
|
|
|
2916
3464
|
var downloadModel = (input, config) => {
|
|
2917
3465
|
return new DownloadModelTask(config).run(input);
|
|
2918
3466
|
};
|
|
2919
|
-
|
|
3467
|
+
Workflow12.prototype.downloadModel = CreateWorkflow12(DownloadModelTask);
|
|
2920
3468
|
|
|
2921
3469
|
// src/task/generation/ImageEditTask.ts
|
|
2922
|
-
import { CreateWorkflow as
|
|
3470
|
+
import { CreateWorkflow as CreateWorkflow13, Workflow as Workflow13 } from "@workglow/task-graph";
|
|
2923
3471
|
import { ImageValueSchema as ImageValueSchema3 } from "@workglow/util/media";
|
|
2924
3472
|
|
|
2925
3473
|
// src/task/base/AiImageOutputTask.ts
|
|
@@ -3051,11 +3599,11 @@ var AiImageOutputSchema = {
|
|
|
3051
3599
|
};
|
|
3052
3600
|
|
|
3053
3601
|
// src/task/generation/ImageEditTask.ts
|
|
3054
|
-
var
|
|
3602
|
+
var modelSchema10 = TypeModel("model:ImageEditTask");
|
|
3055
3603
|
var ImageEditInputSchema = {
|
|
3056
3604
|
type: "object",
|
|
3057
3605
|
properties: {
|
|
3058
|
-
model:
|
|
3606
|
+
model: modelSchema10,
|
|
3059
3607
|
prompt: {
|
|
3060
3608
|
type: "string",
|
|
3061
3609
|
title: "Prompt",
|
|
@@ -3103,11 +3651,11 @@ class ImageEditTask extends AiImageOutputTask {
|
|
|
3103
3651
|
}
|
|
3104
3652
|
}
|
|
3105
3653
|
var imageEdit = (input, config) => new ImageEditTask(config).run(input);
|
|
3106
|
-
|
|
3654
|
+
Workflow13.prototype.imageEdit = CreateWorkflow13(ImageEditTask);
|
|
3107
3655
|
|
|
3108
3656
|
// src/task/FaceDetectorTask.ts
|
|
3109
|
-
import { CreateWorkflow as
|
|
3110
|
-
var
|
|
3657
|
+
import { CreateWorkflow as CreateWorkflow14, Workflow as Workflow14 } from "@workglow/task-graph";
|
|
3658
|
+
var modelSchema11 = TypeModel("model:FaceDetectorTask");
|
|
3111
3659
|
var TypeBoundingBox2 = {
|
|
3112
3660
|
type: "object",
|
|
3113
3661
|
properties: {
|
|
@@ -3180,7 +3728,7 @@ var FaceDetectorInputSchema = {
|
|
|
3180
3728
|
type: "object",
|
|
3181
3729
|
properties: {
|
|
3182
3730
|
image: TypeImageInput,
|
|
3183
|
-
model:
|
|
3731
|
+
model: modelSchema11,
|
|
3184
3732
|
minDetectionConfidence: {
|
|
3185
3733
|
type: "number",
|
|
3186
3734
|
minimum: 0,
|
|
@@ -3234,11 +3782,11 @@ class FaceDetectorTask extends AiVisionTask {
|
|
|
3234
3782
|
var faceDetector = (input, config) => {
|
|
3235
3783
|
return new FaceDetectorTask(config).run(input);
|
|
3236
3784
|
};
|
|
3237
|
-
|
|
3785
|
+
Workflow14.prototype.faceDetector = CreateWorkflow14(FaceDetectorTask);
|
|
3238
3786
|
|
|
3239
3787
|
// src/task/FaceLandmarkerTask.ts
|
|
3240
|
-
import { CreateWorkflow as
|
|
3241
|
-
var
|
|
3788
|
+
import { CreateWorkflow as CreateWorkflow15, Workflow as Workflow15 } from "@workglow/task-graph";
|
|
3789
|
+
var modelSchema12 = TypeModel("model:FaceLandmarkerTask");
|
|
3242
3790
|
var TypeBlendshape = {
|
|
3243
3791
|
type: "object",
|
|
3244
3792
|
properties: {
|
|
@@ -3289,7 +3837,7 @@ var FaceLandmarkerInputSchema = {
|
|
|
3289
3837
|
type: "object",
|
|
3290
3838
|
properties: {
|
|
3291
3839
|
image: TypeImageInput,
|
|
3292
|
-
model:
|
|
3840
|
+
model: modelSchema12,
|
|
3293
3841
|
numFaces: {
|
|
3294
3842
|
type: "number",
|
|
3295
3843
|
minimum: 1,
|
|
@@ -3375,15 +3923,15 @@ class FaceLandmarkerTask extends AiVisionTask {
|
|
|
3375
3923
|
var faceLandmarker = (input, config) => {
|
|
3376
3924
|
return new FaceLandmarkerTask(config).run(input);
|
|
3377
3925
|
};
|
|
3378
|
-
|
|
3926
|
+
Workflow15.prototype.faceLandmarker = CreateWorkflow15(FaceLandmarkerTask);
|
|
3379
3927
|
|
|
3380
3928
|
// src/task/generation/ImageGenerateTask.ts
|
|
3381
|
-
import { CreateWorkflow as
|
|
3382
|
-
var
|
|
3929
|
+
import { CreateWorkflow as CreateWorkflow16, Workflow as Workflow16 } from "@workglow/task-graph";
|
|
3930
|
+
var modelSchema13 = TypeModel("model:ImageGenerateTask");
|
|
3383
3931
|
var ImageGenerateInputSchema = {
|
|
3384
3932
|
type: "object",
|
|
3385
3933
|
properties: {
|
|
3386
|
-
model:
|
|
3934
|
+
model: modelSchema13,
|
|
3387
3935
|
prompt: {
|
|
3388
3936
|
type: "string",
|
|
3389
3937
|
title: "Prompt",
|
|
@@ -3417,11 +3965,11 @@ class ImageGenerateTask extends AiImageOutputTask {
|
|
|
3417
3965
|
}
|
|
3418
3966
|
}
|
|
3419
3967
|
var imageGenerate = (input, config) => new ImageGenerateTask(config).run(input);
|
|
3420
|
-
|
|
3968
|
+
Workflow16.prototype.imageGenerate = CreateWorkflow16(ImageGenerateTask);
|
|
3421
3969
|
|
|
3422
3970
|
// src/task/GestureRecognizerTask.ts
|
|
3423
|
-
import { CreateWorkflow as
|
|
3424
|
-
var
|
|
3971
|
+
import { CreateWorkflow as CreateWorkflow17, Workflow as Workflow17 } from "@workglow/task-graph";
|
|
3972
|
+
var modelSchema14 = TypeModel("model:GestureRecognizerTask");
|
|
3425
3973
|
var TypeGesture = {
|
|
3426
3974
|
type: "object",
|
|
3427
3975
|
properties: {
|
|
@@ -3493,7 +4041,7 @@ var GestureRecognizerInputSchema = {
|
|
|
3493
4041
|
type: "object",
|
|
3494
4042
|
properties: {
|
|
3495
4043
|
image: TypeImageInput,
|
|
3496
|
-
model:
|
|
4044
|
+
model: modelSchema14,
|
|
3497
4045
|
numHands: {
|
|
3498
4046
|
type: "number",
|
|
3499
4047
|
minimum: 1,
|
|
@@ -3565,11 +4113,11 @@ class GestureRecognizerTask extends AiVisionTask {
|
|
|
3565
4113
|
var gestureRecognizer = (input, config) => {
|
|
3566
4114
|
return new GestureRecognizerTask(config).run(input);
|
|
3567
4115
|
};
|
|
3568
|
-
|
|
4116
|
+
Workflow17.prototype.gestureRecognizer = CreateWorkflow17(GestureRecognizerTask);
|
|
3569
4117
|
|
|
3570
4118
|
// src/task/HandLandmarkerTask.ts
|
|
3571
|
-
import { CreateWorkflow as
|
|
3572
|
-
var
|
|
4119
|
+
import { CreateWorkflow as CreateWorkflow18, Workflow as Workflow18 } from "@workglow/task-graph";
|
|
4120
|
+
var modelSchema15 = TypeModel("model:HandLandmarkerTask");
|
|
3573
4121
|
var TypeHandedness2 = {
|
|
3574
4122
|
type: "object",
|
|
3575
4123
|
properties: {
|
|
@@ -3618,7 +4166,7 @@ var HandLandmarkerInputSchema = {
|
|
|
3618
4166
|
type: "object",
|
|
3619
4167
|
properties: {
|
|
3620
4168
|
image: TypeImageInput,
|
|
3621
|
-
model:
|
|
4169
|
+
model: modelSchema15,
|
|
3622
4170
|
numHands: {
|
|
3623
4171
|
type: "number",
|
|
3624
4172
|
minimum: 1,
|
|
@@ -3690,22 +4238,23 @@ class HandLandmarkerTask extends AiVisionTask {
|
|
|
3690
4238
|
var handLandmarker = (input, config) => {
|
|
3691
4239
|
return new HandLandmarkerTask(config).run(input);
|
|
3692
4240
|
};
|
|
3693
|
-
|
|
4241
|
+
Workflow18.prototype.handLandmarker = CreateWorkflow18(HandLandmarkerTask);
|
|
3694
4242
|
|
|
3695
4243
|
// src/task/HierarchicalChunkerTask.ts
|
|
3696
4244
|
import {
|
|
3697
4245
|
ChunkRecordSchema,
|
|
3698
4246
|
estimateTokens as estimateTokens2,
|
|
3699
4247
|
getChildren as getChildren2,
|
|
3700
|
-
hasChildren as hasChildren2
|
|
4248
|
+
hasChildren as hasChildren2,
|
|
4249
|
+
NodeKind
|
|
3701
4250
|
} from "@workglow/knowledge-base";
|
|
3702
|
-
import { CreateWorkflow as
|
|
4251
|
+
import { CreateWorkflow as CreateWorkflow19, Task as Task8, Workflow as Workflow19 } from "@workglow/task-graph";
|
|
3703
4252
|
import { uuid4 } from "@workglow/util";
|
|
3704
|
-
var
|
|
4253
|
+
var modelSchema16 = TypeModel("model", {
|
|
3705
4254
|
title: "Model",
|
|
3706
4255
|
description: "Model to use for token counting"
|
|
3707
4256
|
});
|
|
3708
|
-
var
|
|
4257
|
+
var inputSchema7 = {
|
|
3709
4258
|
type: "object",
|
|
3710
4259
|
properties: {
|
|
3711
4260
|
doc_id: {
|
|
@@ -3747,12 +4296,12 @@ var inputSchema6 = {
|
|
|
3747
4296
|
description: "Strategy for chunking",
|
|
3748
4297
|
default: "hierarchical"
|
|
3749
4298
|
},
|
|
3750
|
-
model:
|
|
4299
|
+
model: modelSchema16
|
|
3751
4300
|
},
|
|
3752
4301
|
required: ["doc_id", "documentTree"],
|
|
3753
4302
|
additionalProperties: false
|
|
3754
4303
|
};
|
|
3755
|
-
var
|
|
4304
|
+
var outputSchema7 = {
|
|
3756
4305
|
type: "object",
|
|
3757
4306
|
properties: {
|
|
3758
4307
|
doc_id: {
|
|
@@ -3782,17 +4331,17 @@ var outputSchema6 = {
|
|
|
3782
4331
|
additionalProperties: false
|
|
3783
4332
|
};
|
|
3784
4333
|
|
|
3785
|
-
class HierarchicalChunkerTask extends
|
|
4334
|
+
class HierarchicalChunkerTask extends Task8 {
|
|
3786
4335
|
static type = "HierarchicalChunkerTask";
|
|
3787
4336
|
static category = "Document";
|
|
3788
4337
|
static title = "Hierarchical Chunker";
|
|
3789
4338
|
static description = "Chunk documents hierarchically respecting token budgets";
|
|
3790
4339
|
static cacheable = true;
|
|
3791
4340
|
static inputSchema() {
|
|
3792
|
-
return
|
|
4341
|
+
return inputSchema7;
|
|
3793
4342
|
}
|
|
3794
4343
|
static outputSchema() {
|
|
3795
|
-
return
|
|
4344
|
+
return outputSchema7;
|
|
3796
4345
|
}
|
|
3797
4346
|
async execute(input, context) {
|
|
3798
4347
|
const {
|
|
@@ -3829,7 +4378,7 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3829
4378
|
}
|
|
3830
4379
|
const chunks = [];
|
|
3831
4380
|
if (strategy === "hierarchical") {
|
|
3832
|
-
await this.chunkHierarchically(root, [], doc_id, tokenBudget, chunks, countFn);
|
|
4381
|
+
await this.chunkHierarchically(root, [], [], doc_id, tokenBudget, chunks, countFn);
|
|
3833
4382
|
} else {
|
|
3834
4383
|
await this.chunkFlat(root, doc_id, tokenBudget, chunks, countFn);
|
|
3835
4384
|
}
|
|
@@ -3840,23 +4389,35 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3840
4389
|
count: chunks.length
|
|
3841
4390
|
};
|
|
3842
4391
|
}
|
|
3843
|
-
async chunkHierarchically(node, nodePath, doc_id, tokenBudget, chunks, countFn) {
|
|
4392
|
+
async chunkHierarchically(node, nodePath, headingPath, doc_id, tokenBudget, chunks, countFn) {
|
|
3844
4393
|
const currentPath = [...nodePath, node.nodeId];
|
|
4394
|
+
const currentHeadings = node.kind === NodeKind.SECTION && typeof node.title === "string" && node.title.trim().length > 0 ? [...headingPath, node.title.trim()] : headingPath;
|
|
3845
4395
|
if (!hasChildren2(node)) {
|
|
3846
|
-
await this.chunkText(node.text, currentPath, doc_id, tokenBudget, chunks, node.nodeId, countFn);
|
|
4396
|
+
await this.chunkText(node.text, currentPath, currentHeadings, doc_id, tokenBudget, chunks, node.nodeId, countFn);
|
|
3847
4397
|
return;
|
|
3848
4398
|
}
|
|
3849
4399
|
const children = getChildren2(node);
|
|
3850
4400
|
for (const child of children) {
|
|
3851
|
-
await this.chunkHierarchically(child, currentPath, doc_id, tokenBudget, chunks, countFn);
|
|
4401
|
+
await this.chunkHierarchically(child, currentPath, currentHeadings, doc_id, tokenBudget, chunks, countFn);
|
|
3852
4402
|
}
|
|
3853
4403
|
}
|
|
3854
|
-
async chunkText(text, nodePath, doc_id, tokenBudget, chunks, leafNodeId, countFn) {
|
|
3855
|
-
|
|
4404
|
+
async chunkText(text, nodePath, headingPath, doc_id, tokenBudget, chunks, leafNodeId, countFn) {
|
|
4405
|
+
if (text.trim().length === 0)
|
|
4406
|
+
return;
|
|
4407
|
+
const budgetAfterReserved = tokenBudget.maxTokensPerChunk - tokenBudget.reservedTokens;
|
|
3856
4408
|
const overlapTokens = tokenBudget.overlapTokens;
|
|
3857
|
-
if (
|
|
4409
|
+
if (budgetAfterReserved <= 0) {
|
|
3858
4410
|
throw new Error(`Invalid token budget: reservedTokens (${tokenBudget.reservedTokens}) must be less than maxTokensPerChunk (${tokenBudget.maxTokensPerChunk})`);
|
|
3859
4411
|
}
|
|
4412
|
+
const breadcrumb = headingPath.join(" > ");
|
|
4413
|
+
const candidatePrefix = breadcrumb ? `${breadcrumb}
|
|
4414
|
+
|
|
4415
|
+
` : "";
|
|
4416
|
+
const prefixTokens = candidatePrefix ? await countFn(candidatePrefix) : 0;
|
|
4417
|
+
const usePrefix = prefixTokens > 0 && prefixTokens < budgetAfterReserved;
|
|
4418
|
+
const prefix = usePrefix ? candidatePrefix : "";
|
|
4419
|
+
const effectivePrefixTokens = usePrefix ? prefixTokens : 0;
|
|
4420
|
+
const maxTokens = budgetAfterReserved - effectivePrefixTokens;
|
|
3860
4421
|
if (overlapTokens >= maxTokens) {
|
|
3861
4422
|
throw new Error(`Invalid token budget: overlapTokens (${overlapTokens}) must be less than effective maxTokens (${maxTokens})`);
|
|
3862
4423
|
}
|
|
@@ -3865,9 +4426,11 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3865
4426
|
chunks.push({
|
|
3866
4427
|
chunkId: uuid4(),
|
|
3867
4428
|
doc_id,
|
|
3868
|
-
text,
|
|
4429
|
+
text: prefix + text,
|
|
3869
4430
|
nodePath,
|
|
3870
|
-
depth: nodePath.length
|
|
4431
|
+
depth: nodePath.length,
|
|
4432
|
+
leafNodeId,
|
|
4433
|
+
sectionTitles: [...headingPath]
|
|
3871
4434
|
});
|
|
3872
4435
|
return;
|
|
3873
4436
|
}
|
|
@@ -3892,9 +4455,11 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3892
4455
|
chunks.push({
|
|
3893
4456
|
chunkId: uuid4(),
|
|
3894
4457
|
doc_id,
|
|
3895
|
-
text: text.substring(startOffset, endOffset),
|
|
4458
|
+
text: prefix + text.substring(startOffset, endOffset),
|
|
3896
4459
|
nodePath,
|
|
3897
|
-
depth: nodePath.length
|
|
4460
|
+
depth: nodePath.length,
|
|
4461
|
+
leafNodeId,
|
|
4462
|
+
sectionTitles: [...headingPath]
|
|
3898
4463
|
});
|
|
3899
4464
|
if (endOffset >= text.length)
|
|
3900
4465
|
break;
|
|
@@ -3904,7 +4469,7 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3904
4469
|
}
|
|
3905
4470
|
async chunkFlat(root, doc_id, tokenBudget, chunks, countFn) {
|
|
3906
4471
|
const allText = this.collectAllText(root);
|
|
3907
|
-
await this.chunkText(allText, [root.nodeId], doc_id, tokenBudget, chunks, root.nodeId, countFn);
|
|
4472
|
+
await this.chunkText(allText, [root.nodeId], [], doc_id, tokenBudget, chunks, root.nodeId, countFn);
|
|
3908
4473
|
}
|
|
3909
4474
|
collectAllText(node) {
|
|
3910
4475
|
const texts = [];
|
|
@@ -3926,15 +4491,15 @@ class HierarchicalChunkerTask extends Task7 {
|
|
|
3926
4491
|
var hierarchicalChunker = (input, config) => {
|
|
3927
4492
|
return new HierarchicalChunkerTask(config).run(input);
|
|
3928
4493
|
};
|
|
3929
|
-
|
|
4494
|
+
Workflow19.prototype.hierarchicalChunker = CreateWorkflow19(HierarchicalChunkerTask);
|
|
3930
4495
|
|
|
3931
4496
|
// src/task/HierarchyJoinTask.ts
|
|
3932
|
-
import { ChunkRecordArraySchema as ChunkRecordArraySchema2, TypeKnowledgeBase as
|
|
3933
|
-
import { CreateWorkflow as
|
|
3934
|
-
var
|
|
4497
|
+
import { ChunkRecordArraySchema as ChunkRecordArraySchema2, TypeKnowledgeBase as TypeKnowledgeBase5 } from "@workglow/knowledge-base";
|
|
4498
|
+
import { CreateWorkflow as CreateWorkflow20, Task as Task9, Workflow as Workflow20 } from "@workglow/task-graph";
|
|
4499
|
+
var inputSchema8 = {
|
|
3935
4500
|
type: "object",
|
|
3936
4501
|
properties: {
|
|
3937
|
-
knowledgeBase:
|
|
4502
|
+
knowledgeBase: TypeKnowledgeBase5({
|
|
3938
4503
|
title: "Knowledge Base",
|
|
3939
4504
|
description: "The knowledge base to query for hierarchy"
|
|
3940
4505
|
}),
|
|
@@ -3973,7 +4538,7 @@ var inputSchema7 = {
|
|
|
3973
4538
|
required: ["knowledgeBase", "metadata"],
|
|
3974
4539
|
additionalProperties: false
|
|
3975
4540
|
};
|
|
3976
|
-
var
|
|
4541
|
+
var outputSchema8 = {
|
|
3977
4542
|
type: "object",
|
|
3978
4543
|
properties: {
|
|
3979
4544
|
metadata: ChunkRecordArraySchema2,
|
|
@@ -4005,17 +4570,17 @@ var outputSchema7 = {
|
|
|
4005
4570
|
additionalProperties: false
|
|
4006
4571
|
};
|
|
4007
4572
|
|
|
4008
|
-
class HierarchyJoinTask extends
|
|
4573
|
+
class HierarchyJoinTask extends Task9 {
|
|
4009
4574
|
static type = "HierarchyJoinTask";
|
|
4010
4575
|
static category = "RAG";
|
|
4011
4576
|
static title = "Hierarchy Join";
|
|
4012
4577
|
static description = "Enrich retrieval metadata with document hierarchy context";
|
|
4013
4578
|
static cacheable = false;
|
|
4014
4579
|
static inputSchema() {
|
|
4015
|
-
return
|
|
4580
|
+
return inputSchema8;
|
|
4016
4581
|
}
|
|
4017
4582
|
static outputSchema() {
|
|
4018
|
-
return
|
|
4583
|
+
return outputSchema8;
|
|
4019
4584
|
}
|
|
4020
4585
|
async execute(input, context) {
|
|
4021
4586
|
const {
|
|
@@ -4101,15 +4666,15 @@ class HierarchyJoinTask extends Task8 {
|
|
|
4101
4666
|
var hierarchyJoin = (input, config) => {
|
|
4102
4667
|
return new HierarchyJoinTask(config).run(input);
|
|
4103
4668
|
};
|
|
4104
|
-
|
|
4669
|
+
Workflow20.prototype.hierarchyJoin = CreateWorkflow20(HierarchyJoinTask);
|
|
4105
4670
|
|
|
4106
4671
|
// src/task/KbToDocumentsTask.ts
|
|
4107
|
-
import { TypeKnowledgeBase as
|
|
4108
|
-
import { CreateWorkflow as
|
|
4109
|
-
var
|
|
4672
|
+
import { TypeKnowledgeBase as TypeKnowledgeBase6 } from "@workglow/knowledge-base";
|
|
4673
|
+
import { CreateWorkflow as CreateWorkflow21, Task as Task10, Workflow as Workflow21 } from "@workglow/task-graph";
|
|
4674
|
+
var inputSchema9 = {
|
|
4110
4675
|
type: "object",
|
|
4111
4676
|
properties: {
|
|
4112
|
-
knowledgeBase:
|
|
4677
|
+
knowledgeBase: TypeKnowledgeBase6({
|
|
4113
4678
|
title: "Knowledge Base",
|
|
4114
4679
|
description: "The knowledge base instance to list documents from"
|
|
4115
4680
|
}),
|
|
@@ -4123,7 +4688,7 @@ var inputSchema8 = {
|
|
|
4123
4688
|
required: ["knowledgeBase"],
|
|
4124
4689
|
additionalProperties: false
|
|
4125
4690
|
};
|
|
4126
|
-
var
|
|
4691
|
+
var outputSchema9 = {
|
|
4127
4692
|
type: "object",
|
|
4128
4693
|
properties: {
|
|
4129
4694
|
doc_id: {
|
|
@@ -4149,17 +4714,17 @@ var outputSchema8 = {
|
|
|
4149
4714
|
additionalProperties: false
|
|
4150
4715
|
};
|
|
4151
4716
|
|
|
4152
|
-
class KbToDocumentsTask extends
|
|
4717
|
+
class KbToDocumentsTask extends Task10 {
|
|
4153
4718
|
static type = "KbToDocumentsTask";
|
|
4154
4719
|
static category = "Document";
|
|
4155
4720
|
static title = "Knowledge Base to Documents";
|
|
4156
4721
|
static description = "List documents from a knowledge base, optionally filtering to only those that need embedding";
|
|
4157
4722
|
static cacheable = false;
|
|
4158
4723
|
static inputSchema() {
|
|
4159
|
-
return
|
|
4724
|
+
return inputSchema9;
|
|
4160
4725
|
}
|
|
4161
4726
|
static outputSchema() {
|
|
4162
|
-
return
|
|
4727
|
+
return outputSchema9;
|
|
4163
4728
|
}
|
|
4164
4729
|
async execute(input, context) {
|
|
4165
4730
|
const { knowledgeBase, onlyStale = true } = input;
|
|
@@ -4190,16 +4755,16 @@ class KbToDocumentsTask extends Task9 {
|
|
|
4190
4755
|
var kbToDocuments = (input, config) => {
|
|
4191
4756
|
return new KbToDocumentsTask(config).run(input);
|
|
4192
4757
|
};
|
|
4193
|
-
|
|
4758
|
+
Workflow21.prototype.kbToDocuments = CreateWorkflow21(KbToDocumentsTask);
|
|
4194
4759
|
|
|
4195
4760
|
// src/task/ImageClassificationTask.ts
|
|
4196
|
-
import { CreateWorkflow as
|
|
4197
|
-
var
|
|
4761
|
+
import { CreateWorkflow as CreateWorkflow22, Workflow as Workflow22 } from "@workglow/task-graph";
|
|
4762
|
+
var modelSchema17 = TypeModel("model:ImageClassificationTask");
|
|
4198
4763
|
var ImageClassificationInputSchema = {
|
|
4199
4764
|
type: "object",
|
|
4200
4765
|
properties: {
|
|
4201
4766
|
image: TypeImageInput,
|
|
4202
|
-
model:
|
|
4767
|
+
model: modelSchema17,
|
|
4203
4768
|
categories: {
|
|
4204
4769
|
type: "array",
|
|
4205
4770
|
items: {
|
|
@@ -4253,19 +4818,19 @@ class ImageClassificationTask extends AiVisionTask {
|
|
|
4253
4818
|
var imageClassification = (input, config) => {
|
|
4254
4819
|
return new ImageClassificationTask(config).run(input);
|
|
4255
4820
|
};
|
|
4256
|
-
|
|
4821
|
+
Workflow22.prototype.imageClassification = CreateWorkflow22(ImageClassificationTask);
|
|
4257
4822
|
|
|
4258
4823
|
// src/task/ImageEmbeddingTask.ts
|
|
4259
|
-
import { CreateWorkflow as
|
|
4824
|
+
import { CreateWorkflow as CreateWorkflow23, Workflow as Workflow23 } from "@workglow/task-graph";
|
|
4260
4825
|
import {
|
|
4261
4826
|
TypedArraySchema as TypedArraySchema4
|
|
4262
4827
|
} from "@workglow/util/schema";
|
|
4263
|
-
var
|
|
4828
|
+
var modelSchema18 = TypeModel("model:ImageEmbeddingTask");
|
|
4264
4829
|
var ImageEmbeddingInputSchema = {
|
|
4265
4830
|
type: "object",
|
|
4266
4831
|
properties: {
|
|
4267
4832
|
image: TypeSingleOrArray(TypeImageInput),
|
|
4268
|
-
model:
|
|
4833
|
+
model: modelSchema18
|
|
4269
4834
|
},
|
|
4270
4835
|
required: ["image", "model"],
|
|
4271
4836
|
additionalProperties: false
|
|
@@ -4297,16 +4862,16 @@ class ImageEmbeddingTask extends AiVisionTask {
|
|
|
4297
4862
|
var imageEmbedding = (input, config) => {
|
|
4298
4863
|
return new ImageEmbeddingTask(config).run(input);
|
|
4299
4864
|
};
|
|
4300
|
-
|
|
4865
|
+
Workflow23.prototype.imageEmbedding = CreateWorkflow23(ImageEmbeddingTask);
|
|
4301
4866
|
|
|
4302
4867
|
// src/task/ImageSegmentationTask.ts
|
|
4303
|
-
import { CreateWorkflow as
|
|
4304
|
-
var
|
|
4868
|
+
import { CreateWorkflow as CreateWorkflow24, Workflow as Workflow24 } from "@workglow/task-graph";
|
|
4869
|
+
var modelSchema19 = TypeModel("model:ImageSegmentationTask");
|
|
4305
4870
|
var ImageSegmentationInputSchema = {
|
|
4306
4871
|
type: "object",
|
|
4307
4872
|
properties: {
|
|
4308
4873
|
image: TypeImageInput,
|
|
4309
|
-
model:
|
|
4874
|
+
model: modelSchema19,
|
|
4310
4875
|
threshold: {
|
|
4311
4876
|
type: "number",
|
|
4312
4877
|
title: "Threshold",
|
|
@@ -4385,11 +4950,11 @@ class ImageSegmentationTask extends AiVisionTask {
|
|
|
4385
4950
|
var imageSegmentation = (input, config) => {
|
|
4386
4951
|
return new ImageSegmentationTask(config).run(input);
|
|
4387
4952
|
};
|
|
4388
|
-
|
|
4953
|
+
Workflow24.prototype.imageSegmentation = CreateWorkflow24(ImageSegmentationTask);
|
|
4389
4954
|
|
|
4390
4955
|
// src/task/ImageToTextTask.ts
|
|
4391
|
-
import { CreateWorkflow as
|
|
4392
|
-
var
|
|
4956
|
+
import { CreateWorkflow as CreateWorkflow25, Workflow as Workflow25 } from "@workglow/task-graph";
|
|
4957
|
+
var modelSchema20 = TypeModel("model:ImageToTextTask");
|
|
4393
4958
|
var generatedTextSchema = {
|
|
4394
4959
|
type: "string",
|
|
4395
4960
|
title: "Text",
|
|
@@ -4399,7 +4964,7 @@ var ImageToTextInputSchema = {
|
|
|
4399
4964
|
type: "object",
|
|
4400
4965
|
properties: {
|
|
4401
4966
|
image: TypeImageInput,
|
|
4402
|
-
model:
|
|
4967
|
+
model: modelSchema20,
|
|
4403
4968
|
maxTokens: {
|
|
4404
4969
|
type: "number",
|
|
4405
4970
|
title: "Max Tokens",
|
|
@@ -4440,15 +5005,15 @@ class ImageToTextTask extends AiVisionTask {
|
|
|
4440
5005
|
var imageToText = (input, config) => {
|
|
4441
5006
|
return new ImageToTextTask(config).run(input);
|
|
4442
5007
|
};
|
|
4443
|
-
|
|
5008
|
+
Workflow25.prototype.imageToText = CreateWorkflow25(ImageToTextTask);
|
|
4444
5009
|
|
|
4445
5010
|
// src/task/ModelInfoTask.ts
|
|
4446
|
-
import { CreateWorkflow as
|
|
4447
|
-
var
|
|
5011
|
+
import { CreateWorkflow as CreateWorkflow26, Workflow as Workflow26 } from "@workglow/task-graph";
|
|
5012
|
+
var modelSchema21 = TypeModel("model");
|
|
4448
5013
|
var ModelInfoInputSchema = {
|
|
4449
5014
|
type: "object",
|
|
4450
5015
|
properties: {
|
|
4451
|
-
model:
|
|
5016
|
+
model: modelSchema21,
|
|
4452
5017
|
detail: {
|
|
4453
5018
|
type: "string",
|
|
4454
5019
|
enum: ["cached_status", "files", "files_with_metadata", "dimensions"],
|
|
@@ -4461,7 +5026,7 @@ var ModelInfoInputSchema = {
|
|
|
4461
5026
|
var ModelInfoOutputSchema = {
|
|
4462
5027
|
type: "object",
|
|
4463
5028
|
properties: {
|
|
4464
|
-
model:
|
|
5029
|
+
model: modelSchema21,
|
|
4465
5030
|
is_local: { type: "boolean" },
|
|
4466
5031
|
is_remote: { type: "boolean" },
|
|
4467
5032
|
supports_browser: { type: "boolean" },
|
|
@@ -4519,10 +5084,10 @@ class ModelInfoTask extends AiTask {
|
|
|
4519
5084
|
var modelInfo = (input, config) => {
|
|
4520
5085
|
return new ModelInfoTask(config).run(input);
|
|
4521
5086
|
};
|
|
4522
|
-
|
|
5087
|
+
Workflow26.prototype.modelInfo = CreateWorkflow26(ModelInfoTask);
|
|
4523
5088
|
|
|
4524
5089
|
// src/task/ModelSearchTask.ts
|
|
4525
|
-
import { CreateWorkflow as
|
|
5090
|
+
import { CreateWorkflow as CreateWorkflow27, Task as Task11, Workflow as Workflow27 } from "@workglow/task-graph";
|
|
4526
5091
|
var ModelSearchInputSchema = {
|
|
4527
5092
|
type: "object",
|
|
4528
5093
|
properties: {
|
|
@@ -4595,7 +5160,7 @@ var ModelSearchOutputSchema = {
|
|
|
4595
5160
|
additionalProperties: false
|
|
4596
5161
|
};
|
|
4597
5162
|
|
|
4598
|
-
class ModelSearchTask extends
|
|
5163
|
+
class ModelSearchTask extends Task11 {
|
|
4599
5164
|
static type = "ModelSearchTask";
|
|
4600
5165
|
static category = "AI Model";
|
|
4601
5166
|
static title = "Model Search";
|
|
@@ -4621,11 +5186,11 @@ class ModelSearchTask extends Task10 {
|
|
|
4621
5186
|
var modelSearch = (input, config) => {
|
|
4622
5187
|
return new ModelSearchTask(config).run(input);
|
|
4623
5188
|
};
|
|
4624
|
-
|
|
5189
|
+
Workflow27.prototype.modelSearch = CreateWorkflow27(ModelSearchTask);
|
|
4625
5190
|
|
|
4626
5191
|
// src/task/ObjectDetectionTask.ts
|
|
4627
|
-
import { CreateWorkflow as
|
|
4628
|
-
var
|
|
5192
|
+
import { CreateWorkflow as CreateWorkflow28, Workflow as Workflow28 } from "@workglow/task-graph";
|
|
5193
|
+
var modelSchema22 = TypeModel("model:ObjectDetectionTask");
|
|
4629
5194
|
var detectionSchema = {
|
|
4630
5195
|
type: "object",
|
|
4631
5196
|
properties: {
|
|
@@ -4650,7 +5215,7 @@ var ObjectDetectionInputSchema = {
|
|
|
4650
5215
|
type: "object",
|
|
4651
5216
|
properties: {
|
|
4652
5217
|
image: TypeImageInput,
|
|
4653
|
-
model:
|
|
5218
|
+
model: modelSchema22,
|
|
4654
5219
|
labels: {
|
|
4655
5220
|
type: "array",
|
|
4656
5221
|
items: {
|
|
@@ -4704,11 +5269,11 @@ class ObjectDetectionTask extends AiVisionTask {
|
|
|
4704
5269
|
var objectDetection = (input, config) => {
|
|
4705
5270
|
return new ObjectDetectionTask(config).run(input);
|
|
4706
5271
|
};
|
|
4707
|
-
|
|
5272
|
+
Workflow28.prototype.objectDetection = CreateWorkflow28(ObjectDetectionTask);
|
|
4708
5273
|
|
|
4709
5274
|
// src/task/PoseLandmarkerTask.ts
|
|
4710
|
-
import { CreateWorkflow as
|
|
4711
|
-
var
|
|
5275
|
+
import { CreateWorkflow as CreateWorkflow29, Workflow as Workflow29 } from "@workglow/task-graph";
|
|
5276
|
+
var modelSchema23 = TypeModel("model:PoseLandmarkerTask");
|
|
4712
5277
|
var TypeSegmentationMask = {
|
|
4713
5278
|
type: "object",
|
|
4714
5279
|
properties: {
|
|
@@ -4755,7 +5320,7 @@ var PoseLandmarkerInputSchema = {
|
|
|
4755
5320
|
type: "object",
|
|
4756
5321
|
properties: {
|
|
4757
5322
|
image: TypeImageInput,
|
|
4758
|
-
model:
|
|
5323
|
+
model: modelSchema23,
|
|
4759
5324
|
numPoses: {
|
|
4760
5325
|
type: "number",
|
|
4761
5326
|
minimum: 1,
|
|
@@ -4834,15 +5399,15 @@ class PoseLandmarkerTask extends AiVisionTask {
|
|
|
4834
5399
|
var poseLandmarker = (input, config) => {
|
|
4835
5400
|
return new PoseLandmarkerTask(config).run(input);
|
|
4836
5401
|
};
|
|
4837
|
-
|
|
5402
|
+
Workflow29.prototype.poseLandmarker = CreateWorkflow29(PoseLandmarkerTask);
|
|
4838
5403
|
|
|
4839
5404
|
// src/task/QueryExpanderTask.ts
|
|
4840
|
-
import { CreateWorkflow as
|
|
5405
|
+
import { CreateWorkflow as CreateWorkflow30, Task as Task12, Workflow as Workflow30 } from "@workglow/task-graph";
|
|
4841
5406
|
var QueryExpansionMethod = {
|
|
4842
5407
|
MULTI_QUERY: "multi-query",
|
|
4843
5408
|
SYNONYMS: "synonyms"
|
|
4844
5409
|
};
|
|
4845
|
-
var
|
|
5410
|
+
var inputSchema10 = {
|
|
4846
5411
|
type: "object",
|
|
4847
5412
|
properties: {
|
|
4848
5413
|
query: {
|
|
@@ -4869,7 +5434,7 @@ var inputSchema9 = {
|
|
|
4869
5434
|
required: ["query"],
|
|
4870
5435
|
additionalProperties: false
|
|
4871
5436
|
};
|
|
4872
|
-
var
|
|
5437
|
+
var outputSchema10 = {
|
|
4873
5438
|
type: "object",
|
|
4874
5439
|
properties: {
|
|
4875
5440
|
query: {
|
|
@@ -4898,17 +5463,17 @@ var outputSchema9 = {
|
|
|
4898
5463
|
additionalProperties: false
|
|
4899
5464
|
};
|
|
4900
5465
|
|
|
4901
|
-
class QueryExpanderTask extends
|
|
5466
|
+
class QueryExpanderTask extends Task12 {
|
|
4902
5467
|
static type = "QueryExpanderTask";
|
|
4903
5468
|
static category = "RAG";
|
|
4904
5469
|
static title = "Query Expander";
|
|
4905
5470
|
static description = "Expand queries to improve retrieval coverage";
|
|
4906
5471
|
static cacheable = true;
|
|
4907
5472
|
static inputSchema() {
|
|
4908
|
-
return
|
|
5473
|
+
return inputSchema10;
|
|
4909
5474
|
}
|
|
4910
5475
|
static outputSchema() {
|
|
4911
|
-
return
|
|
5476
|
+
return outputSchema10;
|
|
4912
5477
|
}
|
|
4913
5478
|
async execute(input, context) {
|
|
4914
5479
|
const { query, method = QueryExpansionMethod.MULTI_QUERY, numVariations = 3 } = input;
|
|
@@ -5007,11 +5572,11 @@ class QueryExpanderTask extends Task11 {
|
|
|
5007
5572
|
var queryExpander = (input, config) => {
|
|
5008
5573
|
return new QueryExpanderTask(config).run(input);
|
|
5009
5574
|
};
|
|
5010
|
-
|
|
5575
|
+
Workflow30.prototype.queryExpander = CreateWorkflow30(QueryExpanderTask);
|
|
5011
5576
|
|
|
5012
5577
|
// src/task/RerankerTask.ts
|
|
5013
|
-
import { CreateWorkflow as
|
|
5014
|
-
var
|
|
5578
|
+
import { CreateWorkflow as CreateWorkflow31, Task as Task13, Workflow as Workflow31 } from "@workglow/task-graph";
|
|
5579
|
+
var inputSchema11 = {
|
|
5015
5580
|
type: "object",
|
|
5016
5581
|
properties: {
|
|
5017
5582
|
query: {
|
|
@@ -5058,7 +5623,7 @@ var inputSchema10 = {
|
|
|
5058
5623
|
required: ["query", "chunks"],
|
|
5059
5624
|
additionalProperties: false
|
|
5060
5625
|
};
|
|
5061
|
-
var
|
|
5626
|
+
var outputSchema11 = {
|
|
5062
5627
|
type: "object",
|
|
5063
5628
|
properties: {
|
|
5064
5629
|
chunks: {
|
|
@@ -5099,17 +5664,17 @@ var outputSchema10 = {
|
|
|
5099
5664
|
additionalProperties: false
|
|
5100
5665
|
};
|
|
5101
5666
|
|
|
5102
|
-
class RerankerTask extends
|
|
5667
|
+
class RerankerTask extends Task13 {
|
|
5103
5668
|
static type = "RerankerTask";
|
|
5104
5669
|
static category = "RAG";
|
|
5105
5670
|
static title = "Reranker";
|
|
5106
5671
|
static description = "Rerank retrieved chunks to improve relevance";
|
|
5107
5672
|
static cacheable = true;
|
|
5108
5673
|
static inputSchema() {
|
|
5109
|
-
return
|
|
5674
|
+
return inputSchema11;
|
|
5110
5675
|
}
|
|
5111
5676
|
static outputSchema() {
|
|
5112
|
-
return
|
|
5677
|
+
return outputSchema11;
|
|
5113
5678
|
}
|
|
5114
5679
|
async execute(input, context) {
|
|
5115
5680
|
const { query, chunks, scores = [], metadata = [], topK, method = "simple" } = input;
|
|
@@ -5172,13 +5737,13 @@ class RerankerTask extends Task12 {
|
|
|
5172
5737
|
var reranker = (input, config) => {
|
|
5173
5738
|
return new RerankerTask(config).run(input);
|
|
5174
5739
|
};
|
|
5175
|
-
|
|
5740
|
+
Workflow31.prototype.reranker = CreateWorkflow31(RerankerTask);
|
|
5176
5741
|
|
|
5177
5742
|
// src/task/StructuralParserTask.ts
|
|
5178
5743
|
import { StructuralParser } from "@workglow/knowledge-base";
|
|
5179
|
-
import { CreateWorkflow as
|
|
5744
|
+
import { CreateWorkflow as CreateWorkflow32, Task as Task14, Workflow as Workflow32 } from "@workglow/task-graph";
|
|
5180
5745
|
import { uuid4 as uuid42 } from "@workglow/util";
|
|
5181
|
-
var
|
|
5746
|
+
var inputSchema12 = {
|
|
5182
5747
|
type: "object",
|
|
5183
5748
|
properties: {
|
|
5184
5749
|
text: {
|
|
@@ -5212,7 +5777,7 @@ var inputSchema11 = {
|
|
|
5212
5777
|
required: ["text", "title"],
|
|
5213
5778
|
additionalProperties: false
|
|
5214
5779
|
};
|
|
5215
|
-
var
|
|
5780
|
+
var outputSchema12 = {
|
|
5216
5781
|
type: "object",
|
|
5217
5782
|
properties: {
|
|
5218
5783
|
doc_id: {
|
|
@@ -5236,17 +5801,17 @@ var outputSchema11 = {
|
|
|
5236
5801
|
additionalProperties: false
|
|
5237
5802
|
};
|
|
5238
5803
|
|
|
5239
|
-
class StructuralParserTask extends
|
|
5804
|
+
class StructuralParserTask extends Task14 {
|
|
5240
5805
|
static type = "StructuralParserTask";
|
|
5241
5806
|
static category = "Document";
|
|
5242
5807
|
static title = "Structural Parser";
|
|
5243
5808
|
static description = "Parse documents into hierarchical tree structure";
|
|
5244
5809
|
static cacheable = true;
|
|
5245
5810
|
static inputSchema() {
|
|
5246
|
-
return
|
|
5811
|
+
return inputSchema12;
|
|
5247
5812
|
}
|
|
5248
5813
|
static outputSchema() {
|
|
5249
|
-
return
|
|
5814
|
+
return outputSchema12;
|
|
5250
5815
|
}
|
|
5251
5816
|
async execute(input, context) {
|
|
5252
5817
|
const { text, title, format = "auto", sourceUri, doc_id: providedDocId } = input;
|
|
@@ -5279,16 +5844,16 @@ class StructuralParserTask extends Task13 {
|
|
|
5279
5844
|
var structuralParser = (input, config) => {
|
|
5280
5845
|
return new StructuralParserTask(config).run(input);
|
|
5281
5846
|
};
|
|
5282
|
-
|
|
5847
|
+
Workflow32.prototype.structuralParser = CreateWorkflow32(StructuralParserTask);
|
|
5283
5848
|
|
|
5284
5849
|
// src/task/StructuredGenerationTask.ts
|
|
5285
|
-
import { CreateWorkflow as
|
|
5850
|
+
import { CreateWorkflow as CreateWorkflow33, TaskConfigurationError as TaskConfigurationError4, TaskError, Workflow as Workflow33 } from "@workglow/task-graph";
|
|
5286
5851
|
import { compileSchema as compileSchema2 } from "@workglow/util/schema";
|
|
5287
|
-
var
|
|
5852
|
+
var modelSchema24 = TypeModel("model:StructuredGenerationTask");
|
|
5288
5853
|
var StructuredGenerationInputSchema = {
|
|
5289
5854
|
type: "object",
|
|
5290
5855
|
properties: {
|
|
5291
|
-
model:
|
|
5856
|
+
model: modelSchema24,
|
|
5292
5857
|
prompt: {
|
|
5293
5858
|
type: "string",
|
|
5294
5859
|
title: "Prompt",
|
|
@@ -5457,18 +6022,18 @@ class StructuredGenerationTask extends StreamingAiTask {
|
|
|
5457
6022
|
var structuredGeneration = (input, config) => {
|
|
5458
6023
|
return new StructuredGenerationTask(config).run(input);
|
|
5459
6024
|
};
|
|
5460
|
-
|
|
6025
|
+
Workflow33.prototype.structuredGeneration = CreateWorkflow33(StructuredGenerationTask);
|
|
5461
6026
|
|
|
5462
6027
|
// src/task/TextChunkerTask.ts
|
|
5463
6028
|
import { ChunkRecordArraySchema as ChunkRecordArraySchema3 } from "@workglow/knowledge-base";
|
|
5464
|
-
import { CreateWorkflow as
|
|
6029
|
+
import { CreateWorkflow as CreateWorkflow34, Task as Task15, Workflow as Workflow34 } from "@workglow/task-graph";
|
|
5465
6030
|
var ChunkingStrategy = {
|
|
5466
6031
|
FIXED: "fixed",
|
|
5467
6032
|
SENTENCE: "sentence",
|
|
5468
6033
|
PARAGRAPH: "paragraph",
|
|
5469
6034
|
SEMANTIC: "semantic"
|
|
5470
6035
|
};
|
|
5471
|
-
var
|
|
6036
|
+
var inputSchema13 = {
|
|
5472
6037
|
type: "object",
|
|
5473
6038
|
properties: {
|
|
5474
6039
|
text: {
|
|
@@ -5506,7 +6071,7 @@ var inputSchema12 = {
|
|
|
5506
6071
|
required: ["text"],
|
|
5507
6072
|
additionalProperties: false
|
|
5508
6073
|
};
|
|
5509
|
-
var
|
|
6074
|
+
var outputSchema13 = {
|
|
5510
6075
|
type: "object",
|
|
5511
6076
|
properties: {
|
|
5512
6077
|
doc_id: {
|
|
@@ -5531,17 +6096,17 @@ var outputSchema12 = {
|
|
|
5531
6096
|
additionalProperties: false
|
|
5532
6097
|
};
|
|
5533
6098
|
|
|
5534
|
-
class TextChunkerTask extends
|
|
6099
|
+
class TextChunkerTask extends Task15 {
|
|
5535
6100
|
static type = "TextChunkerTask";
|
|
5536
6101
|
static category = "Document";
|
|
5537
6102
|
static title = "Text Chunker";
|
|
5538
6103
|
static description = "Splits text into chunks using various strategies (fixed, sentence, paragraph)";
|
|
5539
6104
|
static cacheable = true;
|
|
5540
6105
|
static inputSchema() {
|
|
5541
|
-
return
|
|
6106
|
+
return inputSchema13;
|
|
5542
6107
|
}
|
|
5543
6108
|
static outputSchema() {
|
|
5544
|
-
return
|
|
6109
|
+
return outputSchema13;
|
|
5545
6110
|
}
|
|
5546
6111
|
async execute(input, context) {
|
|
5547
6112
|
const {
|
|
@@ -5711,11 +6276,11 @@ class TextChunkerTask extends Task14 {
|
|
|
5711
6276
|
var textChunker = (input, config) => {
|
|
5712
6277
|
return new TextChunkerTask(config).run(input);
|
|
5713
6278
|
};
|
|
5714
|
-
|
|
6279
|
+
Workflow34.prototype.textChunker = CreateWorkflow34(TextChunkerTask);
|
|
5715
6280
|
|
|
5716
6281
|
// src/task/TextClassificationTask.ts
|
|
5717
|
-
import { CreateWorkflow as
|
|
5718
|
-
var
|
|
6282
|
+
import { CreateWorkflow as CreateWorkflow35, Workflow as Workflow35 } from "@workglow/task-graph";
|
|
6283
|
+
var modelSchema25 = TypeModel("model:TextClassificationTask");
|
|
5719
6284
|
var TextClassificationInputSchema = {
|
|
5720
6285
|
type: "object",
|
|
5721
6286
|
properties: {
|
|
@@ -5742,7 +6307,7 @@ var TextClassificationInputSchema = {
|
|
|
5742
6307
|
description: "The maximum number of categories to return",
|
|
5743
6308
|
"x-ui-group": "Configuration"
|
|
5744
6309
|
},
|
|
5745
|
-
model:
|
|
6310
|
+
model: modelSchema25
|
|
5746
6311
|
},
|
|
5747
6312
|
required: ["text", "model"],
|
|
5748
6313
|
additionalProperties: false
|
|
@@ -5792,11 +6357,11 @@ class TextClassificationTask extends AiTask {
|
|
|
5792
6357
|
var textClassification = (input, config) => {
|
|
5793
6358
|
return new TextClassificationTask(config).run(input);
|
|
5794
6359
|
};
|
|
5795
|
-
|
|
6360
|
+
Workflow35.prototype.textClassification = CreateWorkflow35(TextClassificationTask);
|
|
5796
6361
|
|
|
5797
6362
|
// src/task/TextFillMaskTask.ts
|
|
5798
|
-
import { CreateWorkflow as
|
|
5799
|
-
var
|
|
6363
|
+
import { CreateWorkflow as CreateWorkflow36, Workflow as Workflow36 } from "@workglow/task-graph";
|
|
6364
|
+
var modelSchema26 = TypeModel("model:TextFillMaskTask");
|
|
5800
6365
|
var TextFillMaskInputSchema = {
|
|
5801
6366
|
type: "object",
|
|
5802
6367
|
properties: {
|
|
@@ -5805,7 +6370,7 @@ var TextFillMaskInputSchema = {
|
|
|
5805
6370
|
title: "Text",
|
|
5806
6371
|
description: "The text with a mask token to fill"
|
|
5807
6372
|
},
|
|
5808
|
-
model:
|
|
6373
|
+
model: modelSchema26
|
|
5809
6374
|
},
|
|
5810
6375
|
required: ["text", "model"],
|
|
5811
6376
|
additionalProperties: false
|
|
@@ -5860,21 +6425,21 @@ class TextFillMaskTask extends AiTask {
|
|
|
5860
6425
|
var textFillMask = (input, config) => {
|
|
5861
6426
|
return new TextFillMaskTask(config).run(input);
|
|
5862
6427
|
};
|
|
5863
|
-
|
|
6428
|
+
Workflow36.prototype.textFillMask = CreateWorkflow36(TextFillMaskTask);
|
|
5864
6429
|
|
|
5865
6430
|
// src/task/TextGenerationTask.ts
|
|
5866
|
-
import { CreateWorkflow as
|
|
6431
|
+
import { CreateWorkflow as CreateWorkflow37, Workflow as Workflow37 } from "@workglow/task-graph";
|
|
5867
6432
|
var generatedTextSchema2 = {
|
|
5868
6433
|
type: "string",
|
|
5869
6434
|
title: "Text",
|
|
5870
6435
|
description: "The generated text",
|
|
5871
6436
|
"x-stream": "append"
|
|
5872
6437
|
};
|
|
5873
|
-
var
|
|
6438
|
+
var modelSchema27 = TypeModel("model:TextGenerationTask");
|
|
5874
6439
|
var TextGenerationInputSchema = {
|
|
5875
6440
|
type: "object",
|
|
5876
6441
|
properties: {
|
|
5877
|
-
model:
|
|
6442
|
+
model: modelSchema27,
|
|
5878
6443
|
prompt: {
|
|
5879
6444
|
type: "string",
|
|
5880
6445
|
title: "Prompt",
|
|
@@ -5949,11 +6514,11 @@ class TextGenerationTask extends StreamingAiTask {
|
|
|
5949
6514
|
var textGeneration = (input, config) => {
|
|
5950
6515
|
return new TextGenerationTask(config).run(input);
|
|
5951
6516
|
};
|
|
5952
|
-
|
|
6517
|
+
Workflow37.prototype.textGeneration = CreateWorkflow37(TextGenerationTask);
|
|
5953
6518
|
|
|
5954
6519
|
// src/task/TextLanguageDetectionTask.ts
|
|
5955
|
-
import { CreateWorkflow as
|
|
5956
|
-
var
|
|
6520
|
+
import { CreateWorkflow as CreateWorkflow38, Workflow as Workflow38 } from "@workglow/task-graph";
|
|
6521
|
+
var modelSchema28 = TypeModel("model:TextLanguageDetectionTask");
|
|
5957
6522
|
var TextLanguageDetectionInputSchema = {
|
|
5958
6523
|
type: "object",
|
|
5959
6524
|
properties: {
|
|
@@ -5970,7 +6535,7 @@ var TextLanguageDetectionInputSchema = {
|
|
|
5970
6535
|
title: "Max Languages",
|
|
5971
6536
|
description: "The maximum number of languages to return"
|
|
5972
6537
|
},
|
|
5973
|
-
model:
|
|
6538
|
+
model: modelSchema28
|
|
5974
6539
|
},
|
|
5975
6540
|
required: ["text", "model"],
|
|
5976
6541
|
additionalProperties: false
|
|
@@ -6020,10 +6585,10 @@ class TextLanguageDetectionTask extends AiTask {
|
|
|
6020
6585
|
var textLanguageDetection = (input, config) => {
|
|
6021
6586
|
return new TextLanguageDetectionTask(config).run(input);
|
|
6022
6587
|
};
|
|
6023
|
-
|
|
6588
|
+
Workflow38.prototype.textLanguageDetection = CreateWorkflow38(TextLanguageDetectionTask);
|
|
6024
6589
|
|
|
6025
6590
|
// src/task/TextQuestionAnswerTask.ts
|
|
6026
|
-
import { CreateWorkflow as
|
|
6591
|
+
import { CreateWorkflow as CreateWorkflow39, Workflow as Workflow39 } from "@workglow/task-graph";
|
|
6027
6592
|
var contextSchema = {
|
|
6028
6593
|
type: "string",
|
|
6029
6594
|
title: "Context",
|
|
@@ -6040,13 +6605,13 @@ var textSchema = {
|
|
|
6040
6605
|
description: "The generated text",
|
|
6041
6606
|
"x-stream": "append"
|
|
6042
6607
|
};
|
|
6043
|
-
var
|
|
6608
|
+
var modelSchema29 = TypeModel("model:TextQuestionAnswerTask");
|
|
6044
6609
|
var TextQuestionAnswerInputSchema = {
|
|
6045
6610
|
type: "object",
|
|
6046
6611
|
properties: {
|
|
6047
6612
|
context: contextSchema,
|
|
6048
6613
|
question: questionSchema,
|
|
6049
|
-
model:
|
|
6614
|
+
model: modelSchema29
|
|
6050
6615
|
},
|
|
6051
6616
|
required: ["context", "question", "model"],
|
|
6052
6617
|
additionalProperties: false
|
|
@@ -6076,11 +6641,11 @@ class TextQuestionAnswerTask extends StreamingAiTask {
|
|
|
6076
6641
|
var textQuestionAnswer = (input, config) => {
|
|
6077
6642
|
return new TextQuestionAnswerTask(config).run(input);
|
|
6078
6643
|
};
|
|
6079
|
-
|
|
6644
|
+
Workflow39.prototype.textQuestionAnswer = CreateWorkflow39(TextQuestionAnswerTask);
|
|
6080
6645
|
|
|
6081
6646
|
// src/task/TextRewriterTask.ts
|
|
6082
|
-
import { CreateWorkflow as
|
|
6083
|
-
var
|
|
6647
|
+
import { CreateWorkflow as CreateWorkflow40, Workflow as Workflow40 } from "@workglow/task-graph";
|
|
6648
|
+
var modelSchema30 = TypeModel("model:TextRewriterTask");
|
|
6084
6649
|
var TextRewriterInputSchema = {
|
|
6085
6650
|
type: "object",
|
|
6086
6651
|
properties: {
|
|
@@ -6094,7 +6659,7 @@ var TextRewriterInputSchema = {
|
|
|
6094
6659
|
title: "Prompt",
|
|
6095
6660
|
description: "The prompt to direct the rewriting"
|
|
6096
6661
|
},
|
|
6097
|
-
model:
|
|
6662
|
+
model: modelSchema30
|
|
6098
6663
|
},
|
|
6099
6664
|
required: ["text", "prompt", "model"],
|
|
6100
6665
|
additionalProperties: false
|
|
@@ -6129,11 +6694,11 @@ class TextRewriterTask extends StreamingAiTask {
|
|
|
6129
6694
|
var textRewriter = (input, config) => {
|
|
6130
6695
|
return new TextRewriterTask(config).run(input);
|
|
6131
6696
|
};
|
|
6132
|
-
|
|
6697
|
+
Workflow40.prototype.textRewriter = CreateWorkflow40(TextRewriterTask);
|
|
6133
6698
|
|
|
6134
6699
|
// src/task/TextTranslationTask.ts
|
|
6135
|
-
import { CreateWorkflow as
|
|
6136
|
-
var
|
|
6700
|
+
import { CreateWorkflow as CreateWorkflow41, Workflow as Workflow41 } from "@workglow/task-graph";
|
|
6701
|
+
var modelSchema31 = TypeModel("model:TextTranslationTask");
|
|
6137
6702
|
var translationTextSchema = {
|
|
6138
6703
|
type: "string",
|
|
6139
6704
|
title: "Text",
|
|
@@ -6160,7 +6725,7 @@ var TextTranslationInputSchema = {
|
|
|
6160
6725
|
minLength: 2,
|
|
6161
6726
|
maxLength: 2
|
|
6162
6727
|
}),
|
|
6163
|
-
model:
|
|
6728
|
+
model: modelSchema31
|
|
6164
6729
|
},
|
|
6165
6730
|
required: ["text", "source_lang", "target_lang", "model"],
|
|
6166
6731
|
additionalProperties: false
|
|
@@ -6196,10 +6761,10 @@ class TextTranslationTask extends StreamingAiTask {
|
|
|
6196
6761
|
var textTranslation = (input, config) => {
|
|
6197
6762
|
return new TextTranslationTask(config).run(input);
|
|
6198
6763
|
};
|
|
6199
|
-
|
|
6764
|
+
Workflow41.prototype.textTranslation = CreateWorkflow41(TextTranslationTask);
|
|
6200
6765
|
|
|
6201
6766
|
// src/task/ToolCallingTask.ts
|
|
6202
|
-
import { CreateWorkflow as
|
|
6767
|
+
import { CreateWorkflow as CreateWorkflow42, getTaskConstructors, Workflow as Workflow42 } from "@workglow/task-graph";
|
|
6203
6768
|
import { makeFingerprint } from "@workglow/util";
|
|
6204
6769
|
function taskTypesToTools(taskNames, registry) {
|
|
6205
6770
|
const constructors = getTaskConstructors(registry);
|
|
@@ -6283,11 +6848,11 @@ var ToolCallSchema = {
|
|
|
6283
6848
|
required: ["id", "name", "input"],
|
|
6284
6849
|
additionalProperties: false
|
|
6285
6850
|
};
|
|
6286
|
-
var
|
|
6851
|
+
var modelSchema32 = TypeModel("model:ToolCallingTask");
|
|
6287
6852
|
var ToolCallingInputSchema = {
|
|
6288
6853
|
type: "object",
|
|
6289
6854
|
properties: {
|
|
6290
|
-
model:
|
|
6855
|
+
model: modelSchema32,
|
|
6291
6856
|
prompt: {
|
|
6292
6857
|
oneOf: [
|
|
6293
6858
|
{ type: "string", title: "Prompt", description: "The prompt to send to the model" },
|
|
@@ -6432,16 +6997,16 @@ class ToolCallingTask extends StreamingAiTask {
|
|
|
6432
6997
|
var toolCalling = (input, config) => {
|
|
6433
6998
|
return new ToolCallingTask(config).run(input);
|
|
6434
6999
|
};
|
|
6435
|
-
|
|
7000
|
+
Workflow42.prototype.toolCalling = CreateWorkflow42(ToolCallingTask);
|
|
6436
7001
|
|
|
6437
7002
|
// src/task/TopicSegmenterTask.ts
|
|
6438
|
-
import { CreateWorkflow as
|
|
7003
|
+
import { CreateWorkflow as CreateWorkflow43, Task as Task16, Workflow as Workflow43 } from "@workglow/task-graph";
|
|
6439
7004
|
var SegmentationMethod = {
|
|
6440
7005
|
HEURISTIC: "heuristic",
|
|
6441
7006
|
EMBEDDING_SIMILARITY: "embedding-similarity",
|
|
6442
7007
|
HYBRID: "hybrid"
|
|
6443
7008
|
};
|
|
6444
|
-
var
|
|
7009
|
+
var inputSchema14 = {
|
|
6445
7010
|
type: "object",
|
|
6446
7011
|
properties: {
|
|
6447
7012
|
text: {
|
|
@@ -6482,7 +7047,7 @@ var inputSchema13 = {
|
|
|
6482
7047
|
required: ["text"],
|
|
6483
7048
|
additionalProperties: false
|
|
6484
7049
|
};
|
|
6485
|
-
var
|
|
7050
|
+
var outputSchema14 = {
|
|
6486
7051
|
type: "object",
|
|
6487
7052
|
properties: {
|
|
6488
7053
|
segments: {
|
|
@@ -6510,7 +7075,7 @@ var outputSchema13 = {
|
|
|
6510
7075
|
additionalProperties: false
|
|
6511
7076
|
};
|
|
6512
7077
|
|
|
6513
|
-
class TopicSegmenterTask extends
|
|
7078
|
+
class TopicSegmenterTask extends Task16 {
|
|
6514
7079
|
static type = "TopicSegmenterTask";
|
|
6515
7080
|
static category = "Document";
|
|
6516
7081
|
static title = "Topic Segmenter";
|
|
@@ -6518,10 +7083,10 @@ class TopicSegmenterTask extends Task15 {
|
|
|
6518
7083
|
static cacheable = true;
|
|
6519
7084
|
static EMBEDDING_DIMENSIONS = 256;
|
|
6520
7085
|
static inputSchema() {
|
|
6521
|
-
return
|
|
7086
|
+
return inputSchema14;
|
|
6522
7087
|
}
|
|
6523
7088
|
static outputSchema() {
|
|
6524
|
-
return
|
|
7089
|
+
return outputSchema14;
|
|
6525
7090
|
}
|
|
6526
7091
|
async execute(input, context) {
|
|
6527
7092
|
const {
|
|
@@ -6715,15 +7280,15 @@ class TopicSegmenterTask extends Task15 {
|
|
|
6715
7280
|
var topicSegmenter = (input, config) => {
|
|
6716
7281
|
return new TopicSegmenterTask(config).run(input);
|
|
6717
7282
|
};
|
|
6718
|
-
|
|
7283
|
+
Workflow43.prototype.topicSegmenter = CreateWorkflow43(TopicSegmenterTask);
|
|
6719
7284
|
|
|
6720
7285
|
// src/task/UnloadModelTask.ts
|
|
6721
|
-
import { CreateWorkflow as
|
|
6722
|
-
var
|
|
7286
|
+
import { CreateWorkflow as CreateWorkflow44, Workflow as Workflow44 } from "@workglow/task-graph";
|
|
7287
|
+
var modelSchema33 = TypeModel("model");
|
|
6723
7288
|
var UnloadModelInputSchema = {
|
|
6724
7289
|
type: "object",
|
|
6725
7290
|
properties: {
|
|
6726
|
-
model:
|
|
7291
|
+
model: modelSchema33
|
|
6727
7292
|
},
|
|
6728
7293
|
required: ["model"],
|
|
6729
7294
|
additionalProperties: false
|
|
@@ -6731,7 +7296,7 @@ var UnloadModelInputSchema = {
|
|
|
6731
7296
|
var UnloadModelOutputSchema = {
|
|
6732
7297
|
type: "object",
|
|
6733
7298
|
properties: {
|
|
6734
|
-
model:
|
|
7299
|
+
model: modelSchema33
|
|
6735
7300
|
},
|
|
6736
7301
|
required: ["model"],
|
|
6737
7302
|
additionalProperties: false
|
|
@@ -6753,16 +7318,16 @@ class UnloadModelTask extends AiTask {
|
|
|
6753
7318
|
var unloadModel = (input, config) => {
|
|
6754
7319
|
return new UnloadModelTask(config).run(input);
|
|
6755
7320
|
};
|
|
6756
|
-
|
|
7321
|
+
Workflow44.prototype.unloadModel = CreateWorkflow44(UnloadModelTask);
|
|
6757
7322
|
|
|
6758
7323
|
// src/task/VectorQuantizeTask.ts
|
|
6759
|
-
import { CreateWorkflow as
|
|
7324
|
+
import { CreateWorkflow as CreateWorkflow45, Task as Task17, Workflow as Workflow45 } from "@workglow/task-graph";
|
|
6760
7325
|
import {
|
|
6761
7326
|
normalizeNumberArray,
|
|
6762
7327
|
TensorType,
|
|
6763
7328
|
TypedArraySchema as TypedArraySchema5
|
|
6764
7329
|
} from "@workglow/util/schema";
|
|
6765
|
-
var
|
|
7330
|
+
var inputSchema15 = {
|
|
6766
7331
|
type: "object",
|
|
6767
7332
|
properties: {
|
|
6768
7333
|
vector: {
|
|
@@ -6799,7 +7364,7 @@ var inputSchema14 = {
|
|
|
6799
7364
|
required: ["vector", "targetType"],
|
|
6800
7365
|
additionalProperties: false
|
|
6801
7366
|
};
|
|
6802
|
-
var
|
|
7367
|
+
var outputSchema15 = {
|
|
6803
7368
|
type: "object",
|
|
6804
7369
|
properties: {
|
|
6805
7370
|
vector: {
|
|
@@ -6836,17 +7401,17 @@ var outputSchema14 = {
|
|
|
6836
7401
|
additionalProperties: false
|
|
6837
7402
|
};
|
|
6838
7403
|
|
|
6839
|
-
class VectorQuantizeTask extends
|
|
7404
|
+
class VectorQuantizeTask extends Task17 {
|
|
6840
7405
|
static type = "VectorQuantizeTask";
|
|
6841
7406
|
static category = "Vector";
|
|
6842
7407
|
static title = "Quantize";
|
|
6843
7408
|
static description = "Quantize vectors to reduce storage and improve performance";
|
|
6844
7409
|
static cacheable = true;
|
|
6845
7410
|
static inputSchema() {
|
|
6846
|
-
return
|
|
7411
|
+
return inputSchema15;
|
|
6847
7412
|
}
|
|
6848
7413
|
static outputSchema() {
|
|
6849
|
-
return
|
|
7414
|
+
return outputSchema15;
|
|
6850
7415
|
}
|
|
6851
7416
|
async execute(input) {
|
|
6852
7417
|
return this.executePreview(input);
|
|
@@ -6939,10 +7504,10 @@ class VectorQuantizeTask extends Task16 {
|
|
|
6939
7504
|
var vectorQuantize = (input, config) => {
|
|
6940
7505
|
return new VectorQuantizeTask(config).run(input);
|
|
6941
7506
|
};
|
|
6942
|
-
|
|
7507
|
+
Workflow45.prototype.vectorQuantize = CreateWorkflow45(VectorQuantizeTask);
|
|
6943
7508
|
|
|
6944
7509
|
// src/task/VectorSimilarityTask.ts
|
|
6945
|
-
import { CreateWorkflow as
|
|
7510
|
+
import { CreateWorkflow as CreateWorkflow46, GraphAsTask, Workflow as Workflow46 } from "@workglow/task-graph";
|
|
6946
7511
|
import {
|
|
6947
7512
|
cosineSimilarity,
|
|
6948
7513
|
hammingSimilarity,
|
|
@@ -7056,7 +7621,7 @@ class VectorSimilarityTask extends GraphAsTask {
|
|
|
7056
7621
|
var similarity = (input, config) => {
|
|
7057
7622
|
return new VectorSimilarityTask(config).run(input);
|
|
7058
7623
|
};
|
|
7059
|
-
|
|
7624
|
+
Workflow46.prototype.similarity = CreateWorkflow46(VectorSimilarityTask);
|
|
7060
7625
|
// src/task/MessageConversion.ts
|
|
7061
7626
|
function getInputMessages(input) {
|
|
7062
7627
|
const messages = input.messages;
|
|
@@ -7209,6 +7774,7 @@ function toTextFlatMessages(input) {
|
|
|
7209
7774
|
var registerAiTasks = () => {
|
|
7210
7775
|
const tasks = [
|
|
7211
7776
|
AiChatTask,
|
|
7777
|
+
AiChatWithKbTask,
|
|
7212
7778
|
BackgroundRemovalTask,
|
|
7213
7779
|
CountTokensTask,
|
|
7214
7780
|
ContextBuilderTask,
|
|
@@ -7225,6 +7791,7 @@ var registerAiTasks = () => {
|
|
|
7225
7791
|
HandLandmarkerTask,
|
|
7226
7792
|
HierarchicalChunkerTask,
|
|
7227
7793
|
HierarchyJoinTask,
|
|
7794
|
+
KbSearchTask,
|
|
7228
7795
|
KbToDocumentsTask,
|
|
7229
7796
|
ImageClassificationTask,
|
|
7230
7797
|
ImageEmbeddingTask,
|
|
@@ -7294,6 +7861,7 @@ export {
|
|
|
7294
7861
|
modelSearch,
|
|
7295
7862
|
modelInfo,
|
|
7296
7863
|
kbToDocuments,
|
|
7864
|
+
kbSearch,
|
|
7297
7865
|
isContentBlockInToolResultBody,
|
|
7298
7866
|
isContentBlock,
|
|
7299
7867
|
isChatMessage,
|
|
@@ -7322,6 +7890,7 @@ export {
|
|
|
7322
7890
|
chunkVectorUpsert,
|
|
7323
7891
|
chunkRetrieval,
|
|
7324
7892
|
buildToolDescription,
|
|
7893
|
+
buildResponseFormatAddendum,
|
|
7325
7894
|
backgroundRemoval,
|
|
7326
7895
|
VectorSimilarityTask,
|
|
7327
7896
|
VectorQuantizeTask,
|
|
@@ -7401,6 +7970,8 @@ export {
|
|
|
7401
7970
|
ModelConfigSchema,
|
|
7402
7971
|
MODEL_REPOSITORY,
|
|
7403
7972
|
KbToDocumentsTask,
|
|
7973
|
+
KbSearchTask,
|
|
7974
|
+
KB_INLINE_CITATION_DIRECTIVE,
|
|
7404
7975
|
InMemoryModelRepository,
|
|
7405
7976
|
ImageToTextTask,
|
|
7406
7977
|
ImageToTextOutputSchema,
|
|
@@ -7459,10 +8030,13 @@ export {
|
|
|
7459
8030
|
AiProvider,
|
|
7460
8031
|
AiJob,
|
|
7461
8032
|
AiImageOutputTask,
|
|
8033
|
+
AiChatWithKbTask,
|
|
8034
|
+
AiChatWithKbOutputSchema,
|
|
8035
|
+
AiChatWithKbInputSchema,
|
|
7462
8036
|
AiChatTask,
|
|
7463
8037
|
AiChatOutputSchema,
|
|
7464
8038
|
AiChatInputSchema,
|
|
7465
8039
|
AI_PROVIDER_REGISTRY
|
|
7466
8040
|
};
|
|
7467
8041
|
|
|
7468
|
-
//# debugId=
|
|
8042
|
+
//# debugId=79B4A8A40215A1E864756E2164756E21
|