@upstash/workflow 0.2.5-agents → 0.2.5-agents-2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/astro.d.mts +1 -2
- package/astro.d.ts +1 -2
- package/astro.js +89 -36
- package/astro.mjs +1 -2
- package/{chunk-RFX5YRRT.mjs → chunk-VOM3CFYZ.mjs} +321 -27
- package/cloudflare.d.mts +1 -2
- package/cloudflare.d.ts +1 -2
- package/cloudflare.js +89 -36
- package/cloudflare.mjs +1 -2
- package/express.d.mts +1 -2
- package/express.d.ts +1 -2
- package/express.js +89 -36
- package/express.mjs +3 -5
- package/h3.d.mts +1 -2
- package/h3.d.ts +1 -2
- package/h3.js +89 -36
- package/h3.mjs +1 -2
- package/hono.d.mts +1 -2
- package/hono.d.ts +1 -2
- package/hono.js +89 -36
- package/hono.mjs +1 -2
- package/index.d.mts +2 -3
- package/index.d.ts +2 -3
- package/index.js +89 -36
- package/index.mjs +1 -2
- package/nextjs.d.mts +1 -2
- package/nextjs.d.ts +1 -2
- package/nextjs.js +89 -36
- package/nextjs.mjs +1 -2
- package/package.json +1 -1
- package/solidjs.d.mts +1 -2
- package/solidjs.d.ts +1 -2
- package/solidjs.js +89 -36
- package/solidjs.mjs +1 -2
- package/svelte.d.mts +1 -2
- package/svelte.d.ts +1 -2
- package/svelte.js +89 -36
- package/svelte.mjs +1 -2
- package/{types-BEyIoCRe.d.mts → types-D9gwTj2n.d.mts} +175 -13
- package/{types-BEyIoCRe.d.ts → types-D9gwTj2n.d.ts} +175 -13
- package/agents.d.mts +0 -5
- package/agents.d.ts +0 -5
- package/agents.js +0 -245
- package/agents.mjs +0 -12
- package/chunk-PU5J4TNC.mjs +0 -251
package/solidjs.js
CHANGED
|
@@ -826,29 +826,16 @@ var triggerWorkflowDelete = async (workflowContext, debug, cancel = false) => {
|
|
|
826
826
|
await debug?.log("SUBMIT", "SUBMIT_CLEANUP", {
|
|
827
827
|
deletedWorkflowRunId: workflowContext.workflowRunId
|
|
828
828
|
});
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
);
|
|
840
|
-
return { deleted: true };
|
|
841
|
-
} catch (error) {
|
|
842
|
-
if (error instanceof import_qstash3.QstashError && error.status === 404) {
|
|
843
|
-
await debug?.log("WARN", "SUBMIT_CLEANUP", {
|
|
844
|
-
message: `Failed to remove workflow run ${workflowContext.workflowRunId} as it doesn't exist.`,
|
|
845
|
-
name: error.name,
|
|
846
|
-
errorMessage: error.message
|
|
847
|
-
});
|
|
848
|
-
return { deleted: false };
|
|
849
|
-
}
|
|
850
|
-
throw error;
|
|
851
|
-
}
|
|
829
|
+
await workflowContext.qstashClient.http.request({
|
|
830
|
+
path: ["v2", "workflows", "runs", `${workflowContext.workflowRunId}?cancel=${cancel}`],
|
|
831
|
+
method: "DELETE",
|
|
832
|
+
parseResponseAsJson: false
|
|
833
|
+
});
|
|
834
|
+
await debug?.log(
|
|
835
|
+
"SUBMIT",
|
|
836
|
+
"SUBMIT_CLEANUP",
|
|
837
|
+
`workflow run ${workflowContext.workflowRunId} deleted.`
|
|
838
|
+
);
|
|
852
839
|
};
|
|
853
840
|
var recreateUserHeaders = (headers) => {
|
|
854
841
|
const filteredHeaders = new Headers();
|
|
@@ -1635,7 +1622,22 @@ var WorkflowApi = class extends BaseWorkflowApi {
|
|
|
1635
1622
|
// src/agents/adapters.ts
|
|
1636
1623
|
var import_openai2 = require("@ai-sdk/openai");
|
|
1637
1624
|
var import_ai = require("ai");
|
|
1625
|
+
|
|
1626
|
+
// src/agents/constants.ts
|
|
1638
1627
|
var AGENT_NAME_HEADER = "upstash-agent-name";
|
|
1628
|
+
var MANAGER_AGENT_PROMPT = `You are an agent orchestrating other AI Agents.
|
|
1629
|
+
|
|
1630
|
+
These other agents have tools available to them.
|
|
1631
|
+
|
|
1632
|
+
Given a prompt, utilize these agents to address requests.
|
|
1633
|
+
|
|
1634
|
+
Don't always call all the agents provided to you at the same time. You can call one and use it's response to call another.
|
|
1635
|
+
|
|
1636
|
+
Avoid calling the same agent twice in one turn. Instead, prefer to call it once but provide everything
|
|
1637
|
+
you need from that agent.
|
|
1638
|
+
`;
|
|
1639
|
+
|
|
1640
|
+
// src/agents/adapters.ts
|
|
1639
1641
|
var createWorkflowOpenAI = (context) => {
|
|
1640
1642
|
return (0, import_openai2.createOpenAI)({
|
|
1641
1643
|
compatibility: "strict",
|
|
@@ -1702,8 +1704,7 @@ var convertLangchainTool = (langchainTool) => {
|
|
|
1702
1704
|
return (0, import_ai.tool)({
|
|
1703
1705
|
description: langchainTool.description,
|
|
1704
1706
|
parameters: langchainTool.schema,
|
|
1705
|
-
|
|
1706
|
-
execute: async (param) => langchainTool.invoke(param)
|
|
1707
|
+
execute: async (...param) => langchainTool.invoke(...param)
|
|
1707
1708
|
});
|
|
1708
1709
|
};
|
|
1709
1710
|
|
|
@@ -1716,16 +1717,24 @@ var Agent = class {
|
|
|
1716
1717
|
maxSteps;
|
|
1717
1718
|
background;
|
|
1718
1719
|
model;
|
|
1719
|
-
|
|
1720
|
+
temparature;
|
|
1721
|
+
constructor({ tools, maxSteps, background, name, model, temparature = 0.1 }) {
|
|
1720
1722
|
this.name = name;
|
|
1721
1723
|
this.tools = tools ?? {};
|
|
1722
1724
|
this.maxSteps = maxSteps;
|
|
1723
1725
|
this.background = background;
|
|
1724
1726
|
this.model = model;
|
|
1727
|
+
this.temparature = temparature;
|
|
1725
1728
|
}
|
|
1729
|
+
/**
|
|
1730
|
+
* Trigger the agent by passing a prompt
|
|
1731
|
+
*
|
|
1732
|
+
* @param prompt task to assign to the agent
|
|
1733
|
+
* @returns Response as `{ text: string }`
|
|
1734
|
+
*/
|
|
1726
1735
|
async call({ prompt }) {
|
|
1727
1736
|
try {
|
|
1728
|
-
|
|
1737
|
+
const result = await (0, import_ai2.generateText)({
|
|
1729
1738
|
model: this.model,
|
|
1730
1739
|
tools: this.tools,
|
|
1731
1740
|
maxSteps: this.maxSteps,
|
|
@@ -1733,8 +1742,10 @@ var Agent = class {
|
|
|
1733
1742
|
prompt,
|
|
1734
1743
|
headers: {
|
|
1735
1744
|
[AGENT_NAME_HEADER]: this.name
|
|
1736
|
-
}
|
|
1745
|
+
},
|
|
1746
|
+
temperature: this.temparature
|
|
1737
1747
|
});
|
|
1748
|
+
return { text: result.text };
|
|
1738
1749
|
} catch (error) {
|
|
1739
1750
|
if (error instanceof import_ai2.ToolExecutionError) {
|
|
1740
1751
|
if (error.cause instanceof Error && error.cause.name === "WorkflowAbort") {
|
|
@@ -1749,6 +1760,11 @@ var Agent = class {
|
|
|
1749
1760
|
}
|
|
1750
1761
|
}
|
|
1751
1762
|
}
|
|
1763
|
+
/**
|
|
1764
|
+
* Convert the agent to a tool which can be used by other agents.
|
|
1765
|
+
*
|
|
1766
|
+
* @returns the agent as a tool
|
|
1767
|
+
*/
|
|
1752
1768
|
asTool() {
|
|
1753
1769
|
const toolDescriptions = Object.values(this.tools).map((tool3) => tool3.description).join("\n");
|
|
1754
1770
|
return (0, import_ai2.tool)({
|
|
@@ -1760,18 +1776,25 @@ var Agent = class {
|
|
|
1760
1776
|
});
|
|
1761
1777
|
}
|
|
1762
1778
|
};
|
|
1763
|
-
var MANAGER_AGENT_PROMPT = `You are an AI agent who orchestrates other AI Agents.
|
|
1764
|
-
These other agents have tools available to them.
|
|
1765
|
-
Given a prompt, utilize these agents to address requests.
|
|
1766
|
-
Don't always call all the agents provided to you at the same time. You can call one and use it's response to call another.
|
|
1767
|
-
`;
|
|
1768
1779
|
var ManagerAgent = class extends Agent {
|
|
1769
1780
|
agents;
|
|
1781
|
+
/**
|
|
1782
|
+
* A manager agent which coordinates agents available to it to achieve a
|
|
1783
|
+
* given task
|
|
1784
|
+
*
|
|
1785
|
+
* @param name Name of the agent
|
|
1786
|
+
* @param background Background of the agent. If not passed, default will be used.
|
|
1787
|
+
* @param model LLM model to use
|
|
1788
|
+
* @param agents: List of agents available to the agent
|
|
1789
|
+
* @param maxSteps number of times the manager agent can call the LLM at most.
|
|
1790
|
+
* If the agent abruptly stops execution after calling other agents, you may
|
|
1791
|
+
* need to increase maxSteps
|
|
1792
|
+
*/
|
|
1770
1793
|
constructor({
|
|
1771
|
-
maxSteps,
|
|
1772
|
-
background = MANAGER_AGENT_PROMPT,
|
|
1773
1794
|
agents,
|
|
1795
|
+
background = MANAGER_AGENT_PROMPT,
|
|
1774
1796
|
model,
|
|
1797
|
+
maxSteps,
|
|
1775
1798
|
name = "manager llm"
|
|
1776
1799
|
}) {
|
|
1777
1800
|
super({
|
|
@@ -1796,6 +1819,11 @@ var Task = class {
|
|
|
1796
1819
|
this.context = context;
|
|
1797
1820
|
this.taskParameters = taskParameters;
|
|
1798
1821
|
}
|
|
1822
|
+
/**
|
|
1823
|
+
* Run the agents to complete the task
|
|
1824
|
+
*
|
|
1825
|
+
* @returns Result of the task as { text: string }
|
|
1826
|
+
*/
|
|
1799
1827
|
async run() {
|
|
1800
1828
|
const { prompt, ...otherParams } = this.taskParameters;
|
|
1801
1829
|
const safePrompt = await this.context.run("Get Prompt", () => prompt);
|
|
@@ -1826,6 +1854,29 @@ var WorkflowAgents = class {
|
|
|
1826
1854
|
constructor({ context }) {
|
|
1827
1855
|
this.context = context;
|
|
1828
1856
|
}
|
|
1857
|
+
/**
|
|
1858
|
+
* Defines an agent
|
|
1859
|
+
*
|
|
1860
|
+
* ```ts
|
|
1861
|
+
* const researcherAgent = context.agents.agent({
|
|
1862
|
+
* model,
|
|
1863
|
+
* name: 'academic',
|
|
1864
|
+
* maxSteps: 2,
|
|
1865
|
+
* tools: {
|
|
1866
|
+
* wikiTool: new WikipediaQueryRun({
|
|
1867
|
+
* topKResults: 1,
|
|
1868
|
+
* maxDocContentLength: 500,
|
|
1869
|
+
* })
|
|
1870
|
+
* },
|
|
1871
|
+
* background:
|
|
1872
|
+
* 'You are researcher agent with access to Wikipedia. ' +
|
|
1873
|
+
* 'Utilize Wikipedia as much as possible for correct information',
|
|
1874
|
+
* });
|
|
1875
|
+
* ```
|
|
1876
|
+
*
|
|
1877
|
+
* @param params agent parameters
|
|
1878
|
+
* @returns
|
|
1879
|
+
*/
|
|
1829
1880
|
agent(params) {
|
|
1830
1881
|
const wrappedTools = wrapTools({ context: this.context, tools: params.tools });
|
|
1831
1882
|
return new Agent({
|
|
@@ -1836,6 +1887,9 @@ var WorkflowAgents = class {
|
|
|
1836
1887
|
task(taskParameters) {
|
|
1837
1888
|
return new Task({ context: this.context, taskParameters });
|
|
1838
1889
|
}
|
|
1890
|
+
/**
|
|
1891
|
+
* creates an openai model for agents
|
|
1892
|
+
*/
|
|
1839
1893
|
openai(...params) {
|
|
1840
1894
|
const openai2 = createWorkflowOpenAI(this.context);
|
|
1841
1895
|
return openai2(...params);
|
|
@@ -2436,7 +2490,6 @@ var checkIfLastOneIsDuplicate = async (steps, debug) => {
|
|
|
2436
2490
|
if (step.stepId === lastStepId && step.targetStep === lastTargetStepId) {
|
|
2437
2491
|
const message = `Upstash Workflow: The step '${step.stepName}' with id '${step.stepId}' has run twice during workflow execution. Rest of the workflow will continue running as usual.`;
|
|
2438
2492
|
await debug?.log("WARN", "RESPONSE_DEFAULT", message);
|
|
2439
|
-
console.log(steps);
|
|
2440
2493
|
console.warn(message);
|
|
2441
2494
|
return true;
|
|
2442
2495
|
}
|
package/solidjs.mjs
CHANGED
package/svelte.d.mts
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
import { RequestHandler } from '@sveltejs/kit';
|
|
2
|
-
import { R as RouteFunction, j as PublicServeOptions } from './types-
|
|
2
|
+
import { R as RouteFunction, j as PublicServeOptions } from './types-D9gwTj2n.mjs';
|
|
3
3
|
import '@upstash/qstash';
|
|
4
4
|
import 'ai';
|
|
5
5
|
import '@ai-sdk/openai';
|
|
6
|
-
import 'langchain/tools';
|
|
7
6
|
|
|
8
7
|
/**
|
|
9
8
|
* Serve method to serve a Upstash Workflow in a Nextjs project
|
package/svelte.d.ts
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
import { RequestHandler } from '@sveltejs/kit';
|
|
2
|
-
import { R as RouteFunction, j as PublicServeOptions } from './types-
|
|
2
|
+
import { R as RouteFunction, j as PublicServeOptions } from './types-D9gwTj2n.js';
|
|
3
3
|
import '@upstash/qstash';
|
|
4
4
|
import 'ai';
|
|
5
5
|
import '@ai-sdk/openai';
|
|
6
|
-
import 'langchain/tools';
|
|
7
6
|
|
|
8
7
|
/**
|
|
9
8
|
* Serve method to serve a Upstash Workflow in a Nextjs project
|
package/svelte.js
CHANGED
|
@@ -826,29 +826,16 @@ var triggerWorkflowDelete = async (workflowContext, debug, cancel = false) => {
|
|
|
826
826
|
await debug?.log("SUBMIT", "SUBMIT_CLEANUP", {
|
|
827
827
|
deletedWorkflowRunId: workflowContext.workflowRunId
|
|
828
828
|
});
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
);
|
|
840
|
-
return { deleted: true };
|
|
841
|
-
} catch (error) {
|
|
842
|
-
if (error instanceof import_qstash3.QstashError && error.status === 404) {
|
|
843
|
-
await debug?.log("WARN", "SUBMIT_CLEANUP", {
|
|
844
|
-
message: `Failed to remove workflow run ${workflowContext.workflowRunId} as it doesn't exist.`,
|
|
845
|
-
name: error.name,
|
|
846
|
-
errorMessage: error.message
|
|
847
|
-
});
|
|
848
|
-
return { deleted: false };
|
|
849
|
-
}
|
|
850
|
-
throw error;
|
|
851
|
-
}
|
|
829
|
+
await workflowContext.qstashClient.http.request({
|
|
830
|
+
path: ["v2", "workflows", "runs", `${workflowContext.workflowRunId}?cancel=${cancel}`],
|
|
831
|
+
method: "DELETE",
|
|
832
|
+
parseResponseAsJson: false
|
|
833
|
+
});
|
|
834
|
+
await debug?.log(
|
|
835
|
+
"SUBMIT",
|
|
836
|
+
"SUBMIT_CLEANUP",
|
|
837
|
+
`workflow run ${workflowContext.workflowRunId} deleted.`
|
|
838
|
+
);
|
|
852
839
|
};
|
|
853
840
|
var recreateUserHeaders = (headers) => {
|
|
854
841
|
const filteredHeaders = new Headers();
|
|
@@ -1635,7 +1622,22 @@ var WorkflowApi = class extends BaseWorkflowApi {
|
|
|
1635
1622
|
// src/agents/adapters.ts
|
|
1636
1623
|
var import_openai2 = require("@ai-sdk/openai");
|
|
1637
1624
|
var import_ai = require("ai");
|
|
1625
|
+
|
|
1626
|
+
// src/agents/constants.ts
|
|
1638
1627
|
var AGENT_NAME_HEADER = "upstash-agent-name";
|
|
1628
|
+
var MANAGER_AGENT_PROMPT = `You are an agent orchestrating other AI Agents.
|
|
1629
|
+
|
|
1630
|
+
These other agents have tools available to them.
|
|
1631
|
+
|
|
1632
|
+
Given a prompt, utilize these agents to address requests.
|
|
1633
|
+
|
|
1634
|
+
Don't always call all the agents provided to you at the same time. You can call one and use it's response to call another.
|
|
1635
|
+
|
|
1636
|
+
Avoid calling the same agent twice in one turn. Instead, prefer to call it once but provide everything
|
|
1637
|
+
you need from that agent.
|
|
1638
|
+
`;
|
|
1639
|
+
|
|
1640
|
+
// src/agents/adapters.ts
|
|
1639
1641
|
var createWorkflowOpenAI = (context) => {
|
|
1640
1642
|
return (0, import_openai2.createOpenAI)({
|
|
1641
1643
|
compatibility: "strict",
|
|
@@ -1702,8 +1704,7 @@ var convertLangchainTool = (langchainTool) => {
|
|
|
1702
1704
|
return (0, import_ai.tool)({
|
|
1703
1705
|
description: langchainTool.description,
|
|
1704
1706
|
parameters: langchainTool.schema,
|
|
1705
|
-
|
|
1706
|
-
execute: async (param) => langchainTool.invoke(param)
|
|
1707
|
+
execute: async (...param) => langchainTool.invoke(...param)
|
|
1707
1708
|
});
|
|
1708
1709
|
};
|
|
1709
1710
|
|
|
@@ -1716,16 +1717,24 @@ var Agent = class {
|
|
|
1716
1717
|
maxSteps;
|
|
1717
1718
|
background;
|
|
1718
1719
|
model;
|
|
1719
|
-
|
|
1720
|
+
temparature;
|
|
1721
|
+
constructor({ tools, maxSteps, background, name, model, temparature = 0.1 }) {
|
|
1720
1722
|
this.name = name;
|
|
1721
1723
|
this.tools = tools ?? {};
|
|
1722
1724
|
this.maxSteps = maxSteps;
|
|
1723
1725
|
this.background = background;
|
|
1724
1726
|
this.model = model;
|
|
1727
|
+
this.temparature = temparature;
|
|
1725
1728
|
}
|
|
1729
|
+
/**
|
|
1730
|
+
* Trigger the agent by passing a prompt
|
|
1731
|
+
*
|
|
1732
|
+
* @param prompt task to assign to the agent
|
|
1733
|
+
* @returns Response as `{ text: string }`
|
|
1734
|
+
*/
|
|
1726
1735
|
async call({ prompt }) {
|
|
1727
1736
|
try {
|
|
1728
|
-
|
|
1737
|
+
const result = await (0, import_ai2.generateText)({
|
|
1729
1738
|
model: this.model,
|
|
1730
1739
|
tools: this.tools,
|
|
1731
1740
|
maxSteps: this.maxSteps,
|
|
@@ -1733,8 +1742,10 @@ var Agent = class {
|
|
|
1733
1742
|
prompt,
|
|
1734
1743
|
headers: {
|
|
1735
1744
|
[AGENT_NAME_HEADER]: this.name
|
|
1736
|
-
}
|
|
1745
|
+
},
|
|
1746
|
+
temperature: this.temparature
|
|
1737
1747
|
});
|
|
1748
|
+
return { text: result.text };
|
|
1738
1749
|
} catch (error) {
|
|
1739
1750
|
if (error instanceof import_ai2.ToolExecutionError) {
|
|
1740
1751
|
if (error.cause instanceof Error && error.cause.name === "WorkflowAbort") {
|
|
@@ -1749,6 +1760,11 @@ var Agent = class {
|
|
|
1749
1760
|
}
|
|
1750
1761
|
}
|
|
1751
1762
|
}
|
|
1763
|
+
/**
|
|
1764
|
+
* Convert the agent to a tool which can be used by other agents.
|
|
1765
|
+
*
|
|
1766
|
+
* @returns the agent as a tool
|
|
1767
|
+
*/
|
|
1752
1768
|
asTool() {
|
|
1753
1769
|
const toolDescriptions = Object.values(this.tools).map((tool3) => tool3.description).join("\n");
|
|
1754
1770
|
return (0, import_ai2.tool)({
|
|
@@ -1760,18 +1776,25 @@ var Agent = class {
|
|
|
1760
1776
|
});
|
|
1761
1777
|
}
|
|
1762
1778
|
};
|
|
1763
|
-
var MANAGER_AGENT_PROMPT = `You are an AI agent who orchestrates other AI Agents.
|
|
1764
|
-
These other agents have tools available to them.
|
|
1765
|
-
Given a prompt, utilize these agents to address requests.
|
|
1766
|
-
Don't always call all the agents provided to you at the same time. You can call one and use it's response to call another.
|
|
1767
|
-
`;
|
|
1768
1779
|
var ManagerAgent = class extends Agent {
|
|
1769
1780
|
agents;
|
|
1781
|
+
/**
|
|
1782
|
+
* A manager agent which coordinates agents available to it to achieve a
|
|
1783
|
+
* given task
|
|
1784
|
+
*
|
|
1785
|
+
* @param name Name of the agent
|
|
1786
|
+
* @param background Background of the agent. If not passed, default will be used.
|
|
1787
|
+
* @param model LLM model to use
|
|
1788
|
+
* @param agents: List of agents available to the agent
|
|
1789
|
+
* @param maxSteps number of times the manager agent can call the LLM at most.
|
|
1790
|
+
* If the agent abruptly stops execution after calling other agents, you may
|
|
1791
|
+
* need to increase maxSteps
|
|
1792
|
+
*/
|
|
1770
1793
|
constructor({
|
|
1771
|
-
maxSteps,
|
|
1772
|
-
background = MANAGER_AGENT_PROMPT,
|
|
1773
1794
|
agents,
|
|
1795
|
+
background = MANAGER_AGENT_PROMPT,
|
|
1774
1796
|
model,
|
|
1797
|
+
maxSteps,
|
|
1775
1798
|
name = "manager llm"
|
|
1776
1799
|
}) {
|
|
1777
1800
|
super({
|
|
@@ -1796,6 +1819,11 @@ var Task = class {
|
|
|
1796
1819
|
this.context = context;
|
|
1797
1820
|
this.taskParameters = taskParameters;
|
|
1798
1821
|
}
|
|
1822
|
+
/**
|
|
1823
|
+
* Run the agents to complete the task
|
|
1824
|
+
*
|
|
1825
|
+
* @returns Result of the task as { text: string }
|
|
1826
|
+
*/
|
|
1799
1827
|
async run() {
|
|
1800
1828
|
const { prompt, ...otherParams } = this.taskParameters;
|
|
1801
1829
|
const safePrompt = await this.context.run("Get Prompt", () => prompt);
|
|
@@ -1826,6 +1854,29 @@ var WorkflowAgents = class {
|
|
|
1826
1854
|
constructor({ context }) {
|
|
1827
1855
|
this.context = context;
|
|
1828
1856
|
}
|
|
1857
|
+
/**
|
|
1858
|
+
* Defines an agent
|
|
1859
|
+
*
|
|
1860
|
+
* ```ts
|
|
1861
|
+
* const researcherAgent = context.agents.agent({
|
|
1862
|
+
* model,
|
|
1863
|
+
* name: 'academic',
|
|
1864
|
+
* maxSteps: 2,
|
|
1865
|
+
* tools: {
|
|
1866
|
+
* wikiTool: new WikipediaQueryRun({
|
|
1867
|
+
* topKResults: 1,
|
|
1868
|
+
* maxDocContentLength: 500,
|
|
1869
|
+
* })
|
|
1870
|
+
* },
|
|
1871
|
+
* background:
|
|
1872
|
+
* 'You are researcher agent with access to Wikipedia. ' +
|
|
1873
|
+
* 'Utilize Wikipedia as much as possible for correct information',
|
|
1874
|
+
* });
|
|
1875
|
+
* ```
|
|
1876
|
+
*
|
|
1877
|
+
* @param params agent parameters
|
|
1878
|
+
* @returns
|
|
1879
|
+
*/
|
|
1829
1880
|
agent(params) {
|
|
1830
1881
|
const wrappedTools = wrapTools({ context: this.context, tools: params.tools });
|
|
1831
1882
|
return new Agent({
|
|
@@ -1836,6 +1887,9 @@ var WorkflowAgents = class {
|
|
|
1836
1887
|
task(taskParameters) {
|
|
1837
1888
|
return new Task({ context: this.context, taskParameters });
|
|
1838
1889
|
}
|
|
1890
|
+
/**
|
|
1891
|
+
* creates an openai model for agents
|
|
1892
|
+
*/
|
|
1839
1893
|
openai(...params) {
|
|
1840
1894
|
const openai2 = createWorkflowOpenAI(this.context);
|
|
1841
1895
|
return openai2(...params);
|
|
@@ -2436,7 +2490,6 @@ var checkIfLastOneIsDuplicate = async (steps, debug) => {
|
|
|
2436
2490
|
if (step.stepId === lastStepId && step.targetStep === lastTargetStepId) {
|
|
2437
2491
|
const message = `Upstash Workflow: The step '${step.stepName}' with id '${step.stepId}' has run twice during workflow execution. Rest of the workflow will continue running as usual.`;
|
|
2438
2492
|
await debug?.log("WARN", "RESPONSE_DEFAULT", message);
|
|
2439
|
-
console.log(steps);
|
|
2440
2493
|
console.warn(message);
|
|
2441
2494
|
return true;
|
|
2442
2495
|
}
|