@mozaik-ai/core 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +275 -0
- package/dist/index.d.mts +110 -0
- package/dist/index.d.ts +110 -0
- package/dist/index.js +1039 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +998 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +62 -0
package/README.md
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
# Mozaik
|
|
2
|
+
|
|
3
|
+
Mozaik is a TypeScript library for orchestrating AI agents, supporting both manually defined and AI-generated workflows.
|
|
4
|
+
|
|
5
|
+

|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## 📦 Installation
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
yarn add @jigjoy-ai/mosaic
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## API Key Configuration
|
|
16
|
+
|
|
17
|
+
Make sure to set your API keys in a `.env` file at the root of your project:
|
|
18
|
+
|
|
19
|
+
```env
|
|
20
|
+
# For OpenAI
|
|
21
|
+
OPENAI_API_KEY=your-openai-key-here
|
|
22
|
+
|
|
23
|
+
# For Anthropic Claude
|
|
24
|
+
ANTHROPIC_API_KEY=your-anthropic-key-here
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Supported Models
|
|
28
|
+
|
|
29
|
+
The system supports OpenAI models (gpt-5, gpt-5-mini, gpt-5-nano, gpt-5.1) and Anthropic Claude models (Claude Sonnet, Haiku, and Opus 4.5) out of the box.
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## Features
|
|
34
|
+
|
|
35
|
+
### AI Agents
|
|
36
|
+
|
|
37
|
+
This feature lets developers create AI agents through a single unified request definition, making it easy to compose tasks and leverage multiple models. You can mix providers, choose the best model for each task, and build agents that work across different capabilities.
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
import 'dotenv/config'
|
|
41
|
+
import { Agent, Command } from '@jigjoy-ai/mosaic'
|
|
42
|
+
|
|
43
|
+
const command: Command = {
|
|
44
|
+
model: 'claude-sonnet-4.5'
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const agent = new Agent(command)
|
|
48
|
+
const codingResponse = await agent.act('Write a React component for a todo list')
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### Structured Output
|
|
52
|
+
|
|
53
|
+
Structured output lets you enforce exact response formats—using schemas like Zod—so AI returns predictable, validated data every time.
|
|
54
|
+
|
|
55
|
+
```typescript
|
|
56
|
+
import { z } from 'zod'
|
|
57
|
+
import { Agent, Command } from '@jigjoy-ai/mosaic'
|
|
58
|
+
|
|
59
|
+
const mealPlanSchema = z.object({
|
|
60
|
+
calories: z.number(),
|
|
61
|
+
meals: z.array(
|
|
62
|
+
z.object({
|
|
63
|
+
name: z.string(),
|
|
64
|
+
description: z.string(),
|
|
65
|
+
ingredients: z.array(z.string()).min(3)
|
|
66
|
+
})
|
|
67
|
+
).length(3),
|
|
68
|
+
shoppingList: z.array(z.string())
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
const command: Command = {
|
|
72
|
+
model: 'gpt-5-mini',
|
|
73
|
+
task: 'Create a 1-day vegetarian meal plan with breakfast, lunch, and dinner.',
|
|
74
|
+
structuredOutput: mealPlanSchema
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const agent = new Agent(command)
|
|
78
|
+
const response = await agent.act()
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Multi-turn Conversation
|
|
82
|
+
|
|
83
|
+
Multi-turn conversation allows developers to provide chat history so the AI agent can maintain context and generate more relevant, continuous responses.
|
|
84
|
+
|
|
85
|
+
```typescript
|
|
86
|
+
import { Agent, Command } from '@jigjoy-ai/mosaic'
|
|
87
|
+
|
|
88
|
+
const command: Command = {
|
|
89
|
+
messages: [
|
|
90
|
+
{ role: 'system', content: 'You are a coding assistant' },
|
|
91
|
+
{ role: 'user', content: 'How do I sort an array in TypeScript?' },
|
|
92
|
+
{ role: 'assistant', content: 'You can use the .sort() method...' }
|
|
93
|
+
],
|
|
94
|
+
model: 'claude-haiku-4.5'
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const agent = new Agent(command)
|
|
98
|
+
const response = await agent.act('Can you show me an example?')
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Tool Calling
|
|
102
|
+
|
|
103
|
+
Tool calling allows the agent to invoke real functions in your environment—letting it perform actual actions (like writing files, calling APIs, or modifying state) instead of merely generating text.
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
import { promises as fs } from 'fs'
|
|
107
|
+
import { Agent, Command, Tool } from '@jigjoy-ai/mosaic'
|
|
108
|
+
|
|
109
|
+
const tools: Tool[] = [
|
|
110
|
+
{
|
|
111
|
+
name: 'write_file',
|
|
112
|
+
description: 'Write text to a file.',
|
|
113
|
+
schema: {
|
|
114
|
+
type: 'object',
|
|
115
|
+
properties: {
|
|
116
|
+
filename: { type: 'string' },
|
|
117
|
+
content: { type: 'string' }
|
|
118
|
+
},
|
|
119
|
+
required: ['filename', 'content']
|
|
120
|
+
},
|
|
121
|
+
async invoke({ filename, content }) {
|
|
122
|
+
await fs.writeFile(filename, content, 'utf8')
|
|
123
|
+
return { ok: true }
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
const command: Command = {
|
|
129
|
+
model: 'gpt-5.1',
|
|
130
|
+
tools,
|
|
131
|
+
messages: [
|
|
132
|
+
{
|
|
133
|
+
role: 'system',
|
|
134
|
+
content: 'Save notes to disk using the tool, then confirm where the file was written.'
|
|
135
|
+
}
|
|
136
|
+
],
|
|
137
|
+
task: 'Create a two-bullet trip prep checklist for Belgrade and save it as trip-checklist.txt.'
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const agent = new Agent(command)
|
|
141
|
+
await agent.act()
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### Vision
|
|
145
|
+
|
|
146
|
+
Vision support allows AI agents to interpret images alongside text, enabling richer understanding and multimodal interactions.
|
|
147
|
+
|
|
148
|
+
```typescript
|
|
149
|
+
import { Agent, Command } from '@jigjoy-ai/mosaic'
|
|
150
|
+
|
|
151
|
+
const command: Command = {
|
|
152
|
+
messages: [{
|
|
153
|
+
role: 'user',
|
|
154
|
+
content: [
|
|
155
|
+
{
|
|
156
|
+
type: 'image_url',
|
|
157
|
+
url: 'data:image/jpeg;base64,/9j/4AAQSkZJRg...'
|
|
158
|
+
},
|
|
159
|
+
{
|
|
160
|
+
type: 'text',
|
|
161
|
+
text: 'What is in this image?'
|
|
162
|
+
}
|
|
163
|
+
]
|
|
164
|
+
}],
|
|
165
|
+
model: 'claude-opus-4.5'
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
const agent = new Agent(command)
|
|
169
|
+
const response = await agent.act()
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
### Parallel Task Execution
|
|
173
|
+
|
|
174
|
+
This example demonstrates how to use standard JavaScript/TypeScript concurrency (Promise.all) to run multiple AI agents in parallel and compare or combine their responses.
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
import 'dotenv/config'
|
|
178
|
+
import { Agent, Command } from '@jigjoy-ai/mosaic'
|
|
179
|
+
|
|
180
|
+
const openaiCommand: Command = {
|
|
181
|
+
model: 'gpt-5'
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const anthropicCommand: Command = {
|
|
185
|
+
model: 'claude-sonnet-4.5'
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
const openaiAgent = new Agent(openaiCommand)
|
|
189
|
+
const anthropicAgent = new Agent(anthropicCommand)
|
|
190
|
+
|
|
191
|
+
const task = 'What are the key differences between TypeScript and JavaScript?'
|
|
192
|
+
|
|
193
|
+
// Execute both agents in parallel using Promise.all()
|
|
194
|
+
const [openaiResponse, anthropicResponse] = await Promise.all([
|
|
195
|
+
openaiAgent.act(task),
|
|
196
|
+
anthropicAgent.act(task)
|
|
197
|
+
])
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
### Workflow
|
|
201
|
+
|
|
202
|
+
A workflow defines how tasks are executed together, either sequentially (one after another) or in parallel. Each task or workflow is a `WorkUnit`, which allows workflows to be composed and nested to build more complex execution pipelines.
|
|
203
|
+
|
|
204
|
+
```typescript
|
|
205
|
+
const workflow = new Workflow("sequential", [
|
|
206
|
+
new Task("Analyze requirements", "gpt-5"),
|
|
207
|
+
new Workflow("parallel", [
|
|
208
|
+
new Task("Generate API schema", "gpt-5-mini"),
|
|
209
|
+
new Task("Draft documentation", "gpt-5-nano")
|
|
210
|
+
]),
|
|
211
|
+
new Task("Review and finalize", "gpt-5")
|
|
212
|
+
])
|
|
213
|
+
|
|
214
|
+
await workflow.execute()
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### AI Autonomy
|
|
218
|
+
|
|
219
|
+
Developers can create autonomous agents using an AI planner agent. The planner works as a meta-agent: it breaks a high-level goal into smaller tasks, assigns each task to a specialized agent, and coordinates their execution through a workflow.
|
|
220
|
+
|
|
221
|
+
For example, given the goal `"Implement login functionality"`, the planner can generate the following workflow:
|
|
222
|
+
|
|
223
|
+
```typescript
|
|
224
|
+
Workflow(sequential, [
|
|
225
|
+
Task("Design login form UI", "gpt-5"),
|
|
226
|
+
Task("Implement authentication logic", "claude-sonnet-4.5"),
|
|
227
|
+
Workflow(parallel, [
|
|
228
|
+
Task("Add input validation", "gpt-5-mini"),
|
|
229
|
+
Task("Style the login form", "gpt-5-nano")
|
|
230
|
+
]),
|
|
231
|
+
Task("Write unit tests", "gpt-5")
|
|
232
|
+
])
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
### Autonomy Slider
|
|
236
|
+
|
|
237
|
+
By combining manually created workflows with the AI Planner, you can build hybrid workflows and control the level of autonomy, deciding which steps are fixed and which are planned automatically.
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
Working examples are available on the [GitHub repo](https://github.com/jigjoy-ai/mosaic-examples).
|
|
242
|
+
|
|
243
|
+
---
|
|
244
|
+
|
|
245
|
+
### Execution Hooks
|
|
246
|
+
|
|
247
|
+
Execution hooks allow you to attach custom behavior to workflow and task execution without changing the workflow logic itself. Hooks are invoked at key lifecycle moments (before/after task or workflow execution) and are passed into `execute()`.
|
|
248
|
+
|
|
249
|
+
A default hook cluster is provided out of the box, but you can extend or replace it to add logging, metrics, tracing, or other instrumentation.
|
|
250
|
+
|
|
251
|
+
#### Extending the default hooks
|
|
252
|
+
|
|
253
|
+
You can add your own hooks by creating a new cluster or extending the default one:
|
|
254
|
+
|
|
255
|
+
```ts
|
|
256
|
+
import { ClusterHook } from "@core/workflow/hooks/cluster"
|
|
257
|
+
import { DEFAULT_CLUSTER_HOOK } from "@core/workflow/hooks"
|
|
258
|
+
import { MetricsHook } from "./metrics-hook"
|
|
259
|
+
|
|
260
|
+
const extendedHook = new ClusterHook([
|
|
261
|
+
DEFAULT_CLUSTER_HOOK,
|
|
262
|
+
new MetricsHook()
|
|
263
|
+
])
|
|
264
|
+
|
|
265
|
+
await workflow.execute(extendedHook)
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
If you’re building agentic systems and want to learn or connect with like-minded developers, join [our Discord](https://discord.gg/33uMhcerDU) where we share ideas and knowledge.
|
|
269
|
+
|
|
270
|
+
---
|
|
271
|
+
|
|
272
|
+
## Author & License
|
|
273
|
+
|
|
274
|
+
Created by [JigJoy](https://jigjoy.io) team
|
|
275
|
+
Licensed under the MIT License.
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { ZodObject } from 'zod';
|
|
2
|
+
|
|
3
|
+
declare class Workflow extends WorkUnit {
|
|
4
|
+
readonly mode: "parallel" | "sequential";
|
|
5
|
+
readonly units: WorkUnit[];
|
|
6
|
+
constructor(mode: "parallel" | "sequential", units: WorkUnit[]);
|
|
7
|
+
execute(hook?: ExecutionHook): Promise<any>;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
declare class Task extends WorkUnit {
|
|
11
|
+
private task;
|
|
12
|
+
private model;
|
|
13
|
+
constructor(task: string, model: Model);
|
|
14
|
+
getTask(): string;
|
|
15
|
+
getModel(): Model;
|
|
16
|
+
execute(hook?: ExecutionHook): Promise<any>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
interface ExecutionHook {
|
|
20
|
+
beforeWorkflow(wf: Workflow): void;
|
|
21
|
+
afterWorkflow(wf: Workflow, result: any): void;
|
|
22
|
+
beforeTask(task: Task): void;
|
|
23
|
+
afterTask(task: Task, result: any): void;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
declare abstract class WorkUnit {
|
|
27
|
+
constructor();
|
|
28
|
+
abstract execute(hook: ExecutionHook): Promise<any>;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
interface TextPart {
|
|
32
|
+
type: "text";
|
|
33
|
+
text: string;
|
|
34
|
+
}
|
|
35
|
+
interface ImagePart {
|
|
36
|
+
type: "image_url";
|
|
37
|
+
url: string;
|
|
38
|
+
}
|
|
39
|
+
interface Message {
|
|
40
|
+
role: "system" | "user" | "assistant" | "tool";
|
|
41
|
+
content: string | Array<TextPart | ImagePart>;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
declare const OPENAI_MODELS: readonly ["gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-5.1"];
|
|
45
|
+
type OpenAIModel = (typeof OPENAI_MODELS)[number];
|
|
46
|
+
declare const ANTHROPIC_MODELS: readonly ["claude-sonnet-4.5", "claude-haiku-4.5", "claude-opus-4.5"];
|
|
47
|
+
type AnthropicModel = (typeof ANTHROPIC_MODELS)[number];
|
|
48
|
+
type Model = OpenAIModel | AnthropicModel;
|
|
49
|
+
|
|
50
|
+
interface Tool {
|
|
51
|
+
name: string;
|
|
52
|
+
description: string;
|
|
53
|
+
schema: Record<string, any>;
|
|
54
|
+
invoke: (args: any) => Promise<any>;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
type Command = {
|
|
58
|
+
model: Model;
|
|
59
|
+
messages?: Message[];
|
|
60
|
+
task?: string;
|
|
61
|
+
structuredOutput?: ZodObject<any>;
|
|
62
|
+
tools?: Tool[];
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
declare abstract class RequestBuilder {
|
|
66
|
+
request: any;
|
|
67
|
+
initialize(): void;
|
|
68
|
+
abstract addModel(model: string): RequestBuilder;
|
|
69
|
+
abstract addTask(task: string): RequestBuilder;
|
|
70
|
+
abstract addMessages(messages: Message[]): RequestBuilder;
|
|
71
|
+
abstract addStructuredOutput(schema: ZodObject<any>): RequestBuilder;
|
|
72
|
+
abstract addTools(tools: Tool[]): RequestBuilder;
|
|
73
|
+
build(): any;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
declare abstract class Endpoint {
|
|
77
|
+
abstract requestBuilder: RequestBuilder;
|
|
78
|
+
command: Command | null;
|
|
79
|
+
buildRequest(command: Command): any;
|
|
80
|
+
abstract sendRequest(providerRequest: any): any;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
declare abstract class EndpointResolver {
|
|
84
|
+
abstract resolve(model: string): Endpoint;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
declare class RequestGateway {
|
|
88
|
+
readonly endpointResolver: EndpointResolver;
|
|
89
|
+
endpoint: Endpoint;
|
|
90
|
+
constructor(endpointResolver: EndpointResolver);
|
|
91
|
+
invoke(command: Command): Promise<any>;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
declare class Agent {
|
|
95
|
+
private command;
|
|
96
|
+
gateway: RequestGateway;
|
|
97
|
+
constructor(command: Command);
|
|
98
|
+
setModel(model: Model): void;
|
|
99
|
+
setMessages(messages: Message[]): void;
|
|
100
|
+
setTask(task: string): void;
|
|
101
|
+
setStructuredOutput(schema: ZodObject<any>): void;
|
|
102
|
+
act(task?: string): Promise<any>;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
declare class PlanningAgent extends Agent {
|
|
106
|
+
constructor(command: Command);
|
|
107
|
+
planFromGoal(goal: string): Promise<Workflow>;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export { Agent, type Command, type Message, type Model, PlanningAgent, Task, type Tool, WorkUnit, Workflow };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { ZodObject } from 'zod';
|
|
2
|
+
|
|
3
|
+
declare class Workflow extends WorkUnit {
|
|
4
|
+
readonly mode: "parallel" | "sequential";
|
|
5
|
+
readonly units: WorkUnit[];
|
|
6
|
+
constructor(mode: "parallel" | "sequential", units: WorkUnit[]);
|
|
7
|
+
execute(hook?: ExecutionHook): Promise<any>;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
declare class Task extends WorkUnit {
|
|
11
|
+
private task;
|
|
12
|
+
private model;
|
|
13
|
+
constructor(task: string, model: Model);
|
|
14
|
+
getTask(): string;
|
|
15
|
+
getModel(): Model;
|
|
16
|
+
execute(hook?: ExecutionHook): Promise<any>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
interface ExecutionHook {
|
|
20
|
+
beforeWorkflow(wf: Workflow): void;
|
|
21
|
+
afterWorkflow(wf: Workflow, result: any): void;
|
|
22
|
+
beforeTask(task: Task): void;
|
|
23
|
+
afterTask(task: Task, result: any): void;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
declare abstract class WorkUnit {
|
|
27
|
+
constructor();
|
|
28
|
+
abstract execute(hook: ExecutionHook): Promise<any>;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
interface TextPart {
|
|
32
|
+
type: "text";
|
|
33
|
+
text: string;
|
|
34
|
+
}
|
|
35
|
+
interface ImagePart {
|
|
36
|
+
type: "image_url";
|
|
37
|
+
url: string;
|
|
38
|
+
}
|
|
39
|
+
interface Message {
|
|
40
|
+
role: "system" | "user" | "assistant" | "tool";
|
|
41
|
+
content: string | Array<TextPart | ImagePart>;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
declare const OPENAI_MODELS: readonly ["gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-5.1"];
|
|
45
|
+
type OpenAIModel = (typeof OPENAI_MODELS)[number];
|
|
46
|
+
declare const ANTHROPIC_MODELS: readonly ["claude-sonnet-4.5", "claude-haiku-4.5", "claude-opus-4.5"];
|
|
47
|
+
type AnthropicModel = (typeof ANTHROPIC_MODELS)[number];
|
|
48
|
+
type Model = OpenAIModel | AnthropicModel;
|
|
49
|
+
|
|
50
|
+
interface Tool {
|
|
51
|
+
name: string;
|
|
52
|
+
description: string;
|
|
53
|
+
schema: Record<string, any>;
|
|
54
|
+
invoke: (args: any) => Promise<any>;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
type Command = {
|
|
58
|
+
model: Model;
|
|
59
|
+
messages?: Message[];
|
|
60
|
+
task?: string;
|
|
61
|
+
structuredOutput?: ZodObject<any>;
|
|
62
|
+
tools?: Tool[];
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
declare abstract class RequestBuilder {
|
|
66
|
+
request: any;
|
|
67
|
+
initialize(): void;
|
|
68
|
+
abstract addModel(model: string): RequestBuilder;
|
|
69
|
+
abstract addTask(task: string): RequestBuilder;
|
|
70
|
+
abstract addMessages(messages: Message[]): RequestBuilder;
|
|
71
|
+
abstract addStructuredOutput(schema: ZodObject<any>): RequestBuilder;
|
|
72
|
+
abstract addTools(tools: Tool[]): RequestBuilder;
|
|
73
|
+
build(): any;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
declare abstract class Endpoint {
|
|
77
|
+
abstract requestBuilder: RequestBuilder;
|
|
78
|
+
command: Command | null;
|
|
79
|
+
buildRequest(command: Command): any;
|
|
80
|
+
abstract sendRequest(providerRequest: any): any;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
declare abstract class EndpointResolver {
|
|
84
|
+
abstract resolve(model: string): Endpoint;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
declare class RequestGateway {
|
|
88
|
+
readonly endpointResolver: EndpointResolver;
|
|
89
|
+
endpoint: Endpoint;
|
|
90
|
+
constructor(endpointResolver: EndpointResolver);
|
|
91
|
+
invoke(command: Command): Promise<any>;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
declare class Agent {
|
|
95
|
+
private command;
|
|
96
|
+
gateway: RequestGateway;
|
|
97
|
+
constructor(command: Command);
|
|
98
|
+
setModel(model: Model): void;
|
|
99
|
+
setMessages(messages: Message[]): void;
|
|
100
|
+
setTask(task: string): void;
|
|
101
|
+
setStructuredOutput(schema: ZodObject<any>): void;
|
|
102
|
+
act(task?: string): Promise<any>;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
declare class PlanningAgent extends Agent {
|
|
106
|
+
constructor(command: Command);
|
|
107
|
+
planFromGoal(goal: string): Promise<Workflow>;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export { Agent, type Command, type Message, type Model, PlanningAgent, Task, type Tool, WorkUnit, Workflow };
|