@mozaik-ai/core 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +275 -0
- package/dist/index.d.mts +110 -0
- package/dist/index.d.ts +110 -0
- package/dist/index.js +1039 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +998 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +62 -0
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,998 @@
|
|
|
1
|
+
// src/core/workflow/work-unit.ts
|
|
2
|
+
var WorkUnit = class {
|
|
3
|
+
constructor() {
|
|
4
|
+
}
|
|
5
|
+
};
|
|
6
|
+
|
|
7
|
+
// src/core/endpoint/request-gateway.ts
|
|
8
|
+
var RequestGateway = class {
|
|
9
|
+
constructor(endpointResolver) {
|
|
10
|
+
this.endpointResolver = endpointResolver;
|
|
11
|
+
}
|
|
12
|
+
async invoke(command) {
|
|
13
|
+
this.endpoint = this.endpointResolver.resolve(command.model);
|
|
14
|
+
return await this.endpoint.sendRequest(command);
|
|
15
|
+
}
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
// src/core/endpoint/endpoint-resolver.ts
|
|
19
|
+
var EndpointResolver = class {
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
// src/types/model.ts
|
|
23
|
+
var OPENAI_MODELS = ["gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-5.1"];
|
|
24
|
+
var latest_sonnet = "claude-sonnet-4-5-20250929";
|
|
25
|
+
var latest_haiku = "claude-haiku-4-5-20251001";
|
|
26
|
+
var latest_opus = "claude-opus-4-5-20251101";
|
|
27
|
+
var ANTHROPIC_MODELS = ["claude-sonnet-4.5", "claude-haiku-4.5", "claude-opus-4.5"];
|
|
28
|
+
var ANTHROPIC_MODEL_MAP = {
|
|
29
|
+
"claude-sonnet-4.5": latest_sonnet,
|
|
30
|
+
"claude-haiku-4.5": latest_haiku,
|
|
31
|
+
"claude-opus-4.5": latest_opus
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
// src/core/command-handler/capability.ts
|
|
35
|
+
var CapabilityHandler = class {
|
|
36
|
+
setNextHandler(capability) {
|
|
37
|
+
this.nextHandler = capability;
|
|
38
|
+
return this.nextHandler;
|
|
39
|
+
}
|
|
40
|
+
handle(command, builder) {
|
|
41
|
+
this.apply(command, builder);
|
|
42
|
+
if (this.nextHandler) {
|
|
43
|
+
this.nextHandler.handle(command, builder);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
// src/core/command-handler/messages.ts
|
|
49
|
+
var MessagesHandler = class extends CapabilityHandler {
|
|
50
|
+
apply(command, builder) {
|
|
51
|
+
if (command.messages) {
|
|
52
|
+
builder.addMessages(command.messages);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
// src/core/command-handler/task.ts
|
|
58
|
+
var TaskHandler = class extends CapabilityHandler {
|
|
59
|
+
apply(command, builder) {
|
|
60
|
+
if (command.task) {
|
|
61
|
+
builder.addTask(command.task);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
// src/core/command-handler/model.ts
|
|
67
|
+
var ModelHandler = class extends CapabilityHandler {
|
|
68
|
+
apply(command, builder) {
|
|
69
|
+
if (command.model) {
|
|
70
|
+
builder.addModel(command.model);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
// src/core/command-handler/structured-output.ts
|
|
76
|
+
var StructuredOutputlHandler = class extends CapabilityHandler {
|
|
77
|
+
apply(command, builder) {
|
|
78
|
+
if (command.structuredOutput) {
|
|
79
|
+
builder.addStructuredOutput(command.structuredOutput);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
// src/core/command-handler/tools.ts
|
|
85
|
+
var ToolsHandler = class extends CapabilityHandler {
|
|
86
|
+
apply(command, requestBuilder) {
|
|
87
|
+
if (command.tools && command.tools.length > 0) {
|
|
88
|
+
requestBuilder.addTools(command.tools);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
// src/core/endpoint/endpoint.ts
|
|
94
|
+
var Endpoint = class {
|
|
95
|
+
constructor() {
|
|
96
|
+
this.command = null;
|
|
97
|
+
}
|
|
98
|
+
buildRequest(command) {
|
|
99
|
+
this.command = command;
|
|
100
|
+
this.requestBuilder.initialize();
|
|
101
|
+
const messagesHandler = new MessagesHandler();
|
|
102
|
+
const taskHandler = new TaskHandler();
|
|
103
|
+
const modelHandler = new ModelHandler();
|
|
104
|
+
const structuredOutputHandler = new StructuredOutputlHandler();
|
|
105
|
+
const toolsHandler = new ToolsHandler();
|
|
106
|
+
messagesHandler.setNextHandler(taskHandler).setNextHandler(modelHandler).setNextHandler(structuredOutputHandler).setNextHandler(toolsHandler);
|
|
107
|
+
messagesHandler.handle(command, this.requestBuilder);
|
|
108
|
+
return this.requestBuilder.build();
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
// src/core/endpoint/request-builder.ts
|
|
113
|
+
var RequestBuilder = class {
|
|
114
|
+
initialize() {
|
|
115
|
+
this.request = {};
|
|
116
|
+
}
|
|
117
|
+
build() {
|
|
118
|
+
return this.request;
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
// src/providers/openai/mapper.ts
|
|
123
|
+
var OpenAIResponsesMapper = class {
|
|
124
|
+
/**
|
|
125
|
+
* Convert domain Messages to Responses API instructions format
|
|
126
|
+
*
|
|
127
|
+
* The Responses API uses 'instructions' (system prompt) rather than messages array.
|
|
128
|
+
* For multi-turn conversations, use conversation.id or previous_response_id parameters.
|
|
129
|
+
*/
|
|
130
|
+
toInstructions(messages) {
|
|
131
|
+
if (messages.length === 0) {
|
|
132
|
+
throw new Error("[ResponsesMapper] Cannot create instructions from empty messages");
|
|
133
|
+
}
|
|
134
|
+
const systemMessage = messages.find((m) => m.role === "system");
|
|
135
|
+
if (systemMessage) {
|
|
136
|
+
return typeof systemMessage.content === "string" ? systemMessage.content : this.extractTextFromContent(systemMessage.content);
|
|
137
|
+
}
|
|
138
|
+
return "You are a helpful assistant.";
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Extract the user's input from messages
|
|
142
|
+
* Returns the last user message content for use in prompt variables.
|
|
143
|
+
* In Responses API, the actual user query is typically passed via prompt variables
|
|
144
|
+
* or handled through conversation continuation.
|
|
145
|
+
*/
|
|
146
|
+
extractUserInput(messages) {
|
|
147
|
+
const userMessages = messages.filter((m) => m.role === "user");
|
|
148
|
+
if (userMessages.length === 0) {
|
|
149
|
+
return "";
|
|
150
|
+
}
|
|
151
|
+
const lastUserMessage = userMessages[userMessages.length - 1];
|
|
152
|
+
return typeof lastUserMessage.content === "string" ? lastUserMessage.content : this.extractTextFromContent(lastUserMessage.content);
|
|
153
|
+
}
|
|
154
|
+
extractTextFromContent(content) {
|
|
155
|
+
return content.filter((part) => part.type === "text").map((part) => part.text).join(" ");
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Map tools to Responses API format
|
|
159
|
+
*/
|
|
160
|
+
toTools(tools) {
|
|
161
|
+
return tools.map((t) => ({
|
|
162
|
+
type: "function",
|
|
163
|
+
name: t.name,
|
|
164
|
+
description: t.description,
|
|
165
|
+
parameters: t.schema,
|
|
166
|
+
strict: true
|
|
167
|
+
// Enable strict mode by default for better validation
|
|
168
|
+
}));
|
|
169
|
+
}
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
// src/providers/openai/builder.ts
|
|
173
|
+
import { zodTextFormat } from "openai/helpers/zod";
|
|
174
|
+
var OpenAIResponsesBuilder = class extends RequestBuilder {
|
|
175
|
+
constructor(mapper = new OpenAIResponsesMapper()) {
|
|
176
|
+
super();
|
|
177
|
+
this.mapper = mapper;
|
|
178
|
+
}
|
|
179
|
+
addModel(model) {
|
|
180
|
+
this.request.model = model;
|
|
181
|
+
return this;
|
|
182
|
+
}
|
|
183
|
+
addTask(task) {
|
|
184
|
+
this.request.input = task;
|
|
185
|
+
return this;
|
|
186
|
+
}
|
|
187
|
+
addMessages(messages) {
|
|
188
|
+
const instructions = this.mapper.toInstructions(messages);
|
|
189
|
+
if (instructions && instructions !== "You are a helpful assistant.") {
|
|
190
|
+
this.request.instructions = instructions;
|
|
191
|
+
}
|
|
192
|
+
return this;
|
|
193
|
+
}
|
|
194
|
+
addStructuredOutput(schema) {
|
|
195
|
+
this.request.text = {
|
|
196
|
+
format: zodTextFormat(schema, "outputSchema")
|
|
197
|
+
};
|
|
198
|
+
return this;
|
|
199
|
+
}
|
|
200
|
+
addTools(tools) {
|
|
201
|
+
this.request.tools = tools.map((tool) => ({
|
|
202
|
+
type: "function",
|
|
203
|
+
name: tool.name,
|
|
204
|
+
description: tool.description,
|
|
205
|
+
parameters: tool.schema
|
|
206
|
+
}));
|
|
207
|
+
return this;
|
|
208
|
+
}
|
|
209
|
+
};
|
|
210
|
+
|
|
211
|
+
// src/providers/openai/client/default.ts
|
|
212
|
+
import OpenAI from "openai";
|
|
213
|
+
var OpenAIDefaultClient = class {
|
|
214
|
+
constructor(client = new OpenAI()) {
|
|
215
|
+
this.client = client;
|
|
216
|
+
}
|
|
217
|
+
async send(request) {
|
|
218
|
+
return await this.client.responses.create(request);
|
|
219
|
+
}
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
// src/providers/openai/client/parser.ts
|
|
223
|
+
import OpenAI2 from "openai";
|
|
224
|
+
var OpenAIParserClient = class {
|
|
225
|
+
constructor(client = new OpenAI2()) {
|
|
226
|
+
this.client = client;
|
|
227
|
+
}
|
|
228
|
+
async send(request) {
|
|
229
|
+
return await this.client.responses.parse(request);
|
|
230
|
+
}
|
|
231
|
+
};
|
|
232
|
+
|
|
233
|
+
// src/providers/openai/client/resolver.ts
|
|
234
|
+
var OpenAIClientResolver = class {
|
|
235
|
+
static resolve(request) {
|
|
236
|
+
if (request.text && request.text.format) {
|
|
237
|
+
return new OpenAIParserClient();
|
|
238
|
+
} else {
|
|
239
|
+
return new OpenAIDefaultClient();
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
};
|
|
243
|
+
|
|
244
|
+
// src/core/endpoint/response-handler.ts
|
|
245
|
+
var ResponseHandler = class {
|
|
246
|
+
setNextHandler(responseHandler) {
|
|
247
|
+
this.nextHandler = responseHandler;
|
|
248
|
+
return this.nextHandler;
|
|
249
|
+
}
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
// src/providers/openai/response-handler/output-parsed.ts
|
|
253
|
+
var OutputParsedHandler = class extends ResponseHandler {
|
|
254
|
+
async handle(responseContext) {
|
|
255
|
+
const providerResponse = responseContext.providerResponse;
|
|
256
|
+
if (providerResponse.output_parsed) {
|
|
257
|
+
responseContext.setResponse(providerResponse.output_parsed);
|
|
258
|
+
return responseContext;
|
|
259
|
+
}
|
|
260
|
+
return await this.nextHandler.handle(responseContext);
|
|
261
|
+
}
|
|
262
|
+
};
|
|
263
|
+
|
|
264
|
+
// src/providers/openai/response-handler/content.ts
|
|
265
|
+
var ContentHandler = class extends ResponseHandler {
|
|
266
|
+
async handle(responseContext) {
|
|
267
|
+
var _a, _b;
|
|
268
|
+
const providerResponse = responseContext.providerResponse;
|
|
269
|
+
const firstOutput = (_a = providerResponse.output) == null ? void 0 : _a[0];
|
|
270
|
+
if (firstOutput && "content" in firstOutput) {
|
|
271
|
+
const firstContent = (_b = firstOutput.content) == null ? void 0 : _b[0];
|
|
272
|
+
if (firstContent && "text" in firstContent) {
|
|
273
|
+
responseContext.setResponse(firstContent.text);
|
|
274
|
+
return responseContext;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
return await this.nextHandler.handle(responseContext);
|
|
278
|
+
}
|
|
279
|
+
};
|
|
280
|
+
|
|
281
|
+
// src/providers/openai/response-handler/output-text.ts
|
|
282
|
+
var OutputTextHandler = class extends ResponseHandler {
|
|
283
|
+
async handle(responseContext) {
|
|
284
|
+
const providerResponse = responseContext.providerResponse;
|
|
285
|
+
if (providerResponse.output_text) {
|
|
286
|
+
responseContext.setResponse(providerResponse.output_text);
|
|
287
|
+
return responseContext;
|
|
288
|
+
}
|
|
289
|
+
return await this.nextHandler.handle(responseContext);
|
|
290
|
+
}
|
|
291
|
+
};
|
|
292
|
+
|
|
293
|
+
// src/core/endpoint/usage.ts
|
|
294
|
+
var UsageEntry = class {
|
|
295
|
+
constructor(inputTokens, outputTokens, model) {
|
|
296
|
+
this.inputTokens = inputTokens;
|
|
297
|
+
this.outputTokens = outputTokens;
|
|
298
|
+
this.model = model;
|
|
299
|
+
this.totalTokens = inputTokens + outputTokens;
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
|
|
303
|
+
// src/providers/openai/response-handler/function-calls.ts
|
|
304
|
+
var FunctionCallsHandler = class extends ResponseHandler {
|
|
305
|
+
constructor(request, tools) {
|
|
306
|
+
super();
|
|
307
|
+
this.request = request;
|
|
308
|
+
this.tools = tools;
|
|
309
|
+
this.client = new OpenAIDefaultClient();
|
|
310
|
+
}
|
|
311
|
+
hasToolCalls(response) {
|
|
312
|
+
var _a;
|
|
313
|
+
return (_a = response.output) == null ? void 0 : _a.some((item) => item.type === "function_call");
|
|
314
|
+
}
|
|
315
|
+
extractToolCalls(response) {
|
|
316
|
+
var _a;
|
|
317
|
+
return ((_a = response.output) == null ? void 0 : _a.filter((item) => item.type === "function_call")) || [];
|
|
318
|
+
}
|
|
319
|
+
resolveTool(call, tools) {
|
|
320
|
+
var _a;
|
|
321
|
+
return (_a = tools.find((t) => t.name === call.name)) != null ? _a : null;
|
|
322
|
+
}
|
|
323
|
+
async executeTool(tool, rawArgs) {
|
|
324
|
+
const args = typeof rawArgs === "string" ? JSON.parse(rawArgs) : rawArgs;
|
|
325
|
+
return await tool.invoke(args);
|
|
326
|
+
}
|
|
327
|
+
formatToolResult(callId, payload) {
|
|
328
|
+
return {
|
|
329
|
+
type: "function_call_output",
|
|
330
|
+
call_id: callId,
|
|
331
|
+
output: JSON.stringify(payload)
|
|
332
|
+
};
|
|
333
|
+
}
|
|
334
|
+
async executeToolCalls(toolCalls, tools) {
|
|
335
|
+
var _a;
|
|
336
|
+
const results = [];
|
|
337
|
+
for (const call of toolCalls) {
|
|
338
|
+
const tool = this.resolveTool(call, tools);
|
|
339
|
+
if (!tool) {
|
|
340
|
+
results.push(
|
|
341
|
+
this.formatToolResult(call.call_id, {
|
|
342
|
+
error: `Unknown tool: ${call.name}`
|
|
343
|
+
})
|
|
344
|
+
);
|
|
345
|
+
continue;
|
|
346
|
+
}
|
|
347
|
+
try {
|
|
348
|
+
const result = await this.executeTool(tool, call.arguments);
|
|
349
|
+
results.push(this.formatToolResult(call.call_id, result));
|
|
350
|
+
} catch (err) {
|
|
351
|
+
results.push(
|
|
352
|
+
this.formatToolResult(call.call_id, {
|
|
353
|
+
error: (_a = err.message) != null ? _a : "Tool execution failed"
|
|
354
|
+
})
|
|
355
|
+
);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
return results;
|
|
359
|
+
}
|
|
360
|
+
async handle(responseContext) {
|
|
361
|
+
let providerResponse = responseContext.providerResponse;
|
|
362
|
+
let toolCallingResponse;
|
|
363
|
+
while (this.hasToolCalls(providerResponse)) {
|
|
364
|
+
const toolCalls = this.extractToolCalls(providerResponse);
|
|
365
|
+
const toolResults = await this.executeToolCalls(toolCalls, this.tools);
|
|
366
|
+
this.request.input = toolResults;
|
|
367
|
+
this.request.previous_response_id = providerResponse.id;
|
|
368
|
+
toolCallingResponse = await this.client.send(this.request);
|
|
369
|
+
responseContext.addUsageEntry(
|
|
370
|
+
new UsageEntry(
|
|
371
|
+
toolCallingResponse.usage.input_tokens,
|
|
372
|
+
toolCallingResponse.usage.output_tokens,
|
|
373
|
+
toolCallingResponse.model
|
|
374
|
+
)
|
|
375
|
+
);
|
|
376
|
+
responseContext.setProviderResponse(toolCallingResponse);
|
|
377
|
+
responseContext.setResponse(toolCallingResponse);
|
|
378
|
+
providerResponse = toolCallingResponse;
|
|
379
|
+
}
|
|
380
|
+
return await this.nextHandler.handle(responseContext);
|
|
381
|
+
}
|
|
382
|
+
};
|
|
383
|
+
|
|
384
|
+
// src/core/endpoint/response-context.ts
|
|
385
|
+
var ResponseContext = class {
|
|
386
|
+
constructor() {
|
|
387
|
+
this.usageEntries = [];
|
|
388
|
+
}
|
|
389
|
+
setProviderResponse(providerResponse) {
|
|
390
|
+
this.providerResponse = providerResponse;
|
|
391
|
+
return this;
|
|
392
|
+
}
|
|
393
|
+
addUsageEntry(usageEntry) {
|
|
394
|
+
this.usageEntries.push(usageEntry);
|
|
395
|
+
return this;
|
|
396
|
+
}
|
|
397
|
+
setResponse(response) {
|
|
398
|
+
this.response = response;
|
|
399
|
+
return this;
|
|
400
|
+
}
|
|
401
|
+
};
|
|
402
|
+
|
|
403
|
+
// src/providers/openai/response-handler/usage.ts
|
|
404
|
+
var UsageHandler = class extends ResponseHandler {
|
|
405
|
+
async handle(responseContext) {
|
|
406
|
+
const providerResponse = responseContext.providerResponse;
|
|
407
|
+
const usage = providerResponse.usage;
|
|
408
|
+
if (usage) {
|
|
409
|
+
responseContext.addUsageEntry(
|
|
410
|
+
new UsageEntry(usage.input_tokens, usage.output_tokens, providerResponse.model)
|
|
411
|
+
);
|
|
412
|
+
}
|
|
413
|
+
return await this.nextHandler.handle(responseContext);
|
|
414
|
+
}
|
|
415
|
+
};
|
|
416
|
+
|
|
417
|
+
// src/providers/openai/endpoint.ts
|
|
418
|
+
var OpenAIResponses = class extends Endpoint {
|
|
419
|
+
constructor() {
|
|
420
|
+
super();
|
|
421
|
+
this.requestBuilder = new OpenAIResponsesBuilder();
|
|
422
|
+
}
|
|
423
|
+
async sendRequest(command) {
|
|
424
|
+
try {
|
|
425
|
+
const request = this.buildRequest(command);
|
|
426
|
+
const client = OpenAIClientResolver.resolve(request);
|
|
427
|
+
const response = await client.send(request);
|
|
428
|
+
const responseContext = new ResponseContext();
|
|
429
|
+
responseContext.setProviderResponse(response);
|
|
430
|
+
const usageHandler = new UsageHandler();
|
|
431
|
+
const functionCallsHandler = new FunctionCallsHandler(
|
|
432
|
+
request,
|
|
433
|
+
command.tools ? command.tools : []
|
|
434
|
+
);
|
|
435
|
+
const outputParsedHandler = new OutputParsedHandler();
|
|
436
|
+
const outputTextHandler = new OutputTextHandler();
|
|
437
|
+
const contentHandler = new ContentHandler();
|
|
438
|
+
usageHandler.setNextHandler(functionCallsHandler).setNextHandler(outputParsedHandler).setNextHandler(outputTextHandler).setNextHandler(contentHandler);
|
|
439
|
+
const responseHandler = usageHandler;
|
|
440
|
+
return await responseHandler.handle(responseContext);
|
|
441
|
+
} catch (error) {
|
|
442
|
+
console.warn("[OpenAIProvider] Responses API request failed:", error);
|
|
443
|
+
throw error;
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
};
|
|
447
|
+
|
|
448
|
+
// src/providers/anthropic/mapper.ts
|
|
449
|
+
var AnthropicMapper = class {
|
|
450
|
+
/**
|
|
451
|
+
* Transforms domain messages to Anthropic API format.
|
|
452
|
+
* Separates system messages from conversation messages.
|
|
453
|
+
*/
|
|
454
|
+
toMessages(messages) {
|
|
455
|
+
const anthropicMessages = [];
|
|
456
|
+
let systemPrompt;
|
|
457
|
+
for (const msg of messages) {
|
|
458
|
+
if (msg.role === "system") {
|
|
459
|
+
const content = typeof msg.content === "string" ? msg.content : msg.content.map((p) => p.text).join("\n");
|
|
460
|
+
systemPrompt = systemPrompt ? `${systemPrompt}
|
|
461
|
+
|
|
462
|
+
${content}` : content;
|
|
463
|
+
continue;
|
|
464
|
+
}
|
|
465
|
+
if (msg.role === "tool") {
|
|
466
|
+
continue;
|
|
467
|
+
}
|
|
468
|
+
if (msg.role === "user" || msg.role === "assistant") {
|
|
469
|
+
if (typeof msg.content === "string") {
|
|
470
|
+
anthropicMessages.push({
|
|
471
|
+
role: msg.role,
|
|
472
|
+
content: msg.content
|
|
473
|
+
});
|
|
474
|
+
} else {
|
|
475
|
+
const contentBlocks = msg.content.map((part) => {
|
|
476
|
+
if (part.type === "text") {
|
|
477
|
+
return {
|
|
478
|
+
type: "text",
|
|
479
|
+
text: part.text
|
|
480
|
+
};
|
|
481
|
+
} else if (part.type === "image_url") {
|
|
482
|
+
const imageUrl = part.url;
|
|
483
|
+
if (imageUrl.startsWith("data:image")) {
|
|
484
|
+
const matches = imageUrl.match(/data:image\/(\w+);base64,(.+)/);
|
|
485
|
+
if (matches) {
|
|
486
|
+
const [, mediaType, data] = matches;
|
|
487
|
+
return {
|
|
488
|
+
type: "image",
|
|
489
|
+
source: {
|
|
490
|
+
type: "base64",
|
|
491
|
+
media_type: `image/${mediaType}`,
|
|
492
|
+
data
|
|
493
|
+
}
|
|
494
|
+
};
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
throw new Error(
|
|
498
|
+
"[AnthropicMapper] Image URLs must be base64-encoded. Convert to data:image/[type];base64,[data] format."
|
|
499
|
+
);
|
|
500
|
+
}
|
|
501
|
+
return { type: "text", text: "" };
|
|
502
|
+
});
|
|
503
|
+
anthropicMessages.push({
|
|
504
|
+
role: msg.role,
|
|
505
|
+
content: contentBlocks
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
const validated = this.ensureAlternatingRoles(anthropicMessages);
|
|
511
|
+
return {
|
|
512
|
+
messages: validated,
|
|
513
|
+
system: systemPrompt
|
|
514
|
+
};
|
|
515
|
+
}
|
|
516
|
+
/**
|
|
517
|
+
* Ensures messages alternate between user and assistant roles.
|
|
518
|
+
* Anthropic API requires this constraint.
|
|
519
|
+
*/
|
|
520
|
+
ensureAlternatingRoles(messages) {
|
|
521
|
+
if (messages.length === 0) return messages;
|
|
522
|
+
const result = [];
|
|
523
|
+
let lastRole;
|
|
524
|
+
for (const msg of messages) {
|
|
525
|
+
if (lastRole === msg.role && result.length > 0) {
|
|
526
|
+
const lastMsg = result[result.length - 1];
|
|
527
|
+
if (typeof lastMsg.content === "string" && typeof msg.content === "string") {
|
|
528
|
+
lastMsg.content = `${lastMsg.content}
|
|
529
|
+
${msg.content}`;
|
|
530
|
+
} else {
|
|
531
|
+
const lastContent = Array.isArray(lastMsg.content) ? lastMsg.content : [{ type: "text", text: lastMsg.content }];
|
|
532
|
+
const newContent = Array.isArray(msg.content) ? msg.content : [{ type: "text", text: msg.content }];
|
|
533
|
+
lastMsg.content = [...lastContent, ...newContent];
|
|
534
|
+
}
|
|
535
|
+
} else {
|
|
536
|
+
result.push(msg);
|
|
537
|
+
lastRole = msg.role;
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
if (result.length > 0 && result[0].role !== "user") {
|
|
541
|
+
throw new Error(
|
|
542
|
+
"[AnthropicMapper] First message must be from user. Anthropic API requires conversations to start with a user message."
|
|
543
|
+
);
|
|
544
|
+
}
|
|
545
|
+
return result;
|
|
546
|
+
}
|
|
547
|
+
};
|
|
548
|
+
|
|
549
|
+
// src/providers/anthropic/builder.ts
|
|
550
|
+
import { betaZodOutputFormat } from "@anthropic-ai/sdk/helpers/beta/zod";
|
|
551
|
+
var AnthropicRequestBuilder = class extends RequestBuilder {
|
|
552
|
+
constructor() {
|
|
553
|
+
super(...arguments);
|
|
554
|
+
this.mapper = new AnthropicMapper();
|
|
555
|
+
}
|
|
556
|
+
addModel(model) {
|
|
557
|
+
this.request.model = ANTHROPIC_MODEL_MAP[model];
|
|
558
|
+
return this;
|
|
559
|
+
}
|
|
560
|
+
addTask(task) {
|
|
561
|
+
const message = {
|
|
562
|
+
role: "user",
|
|
563
|
+
content: task
|
|
564
|
+
};
|
|
565
|
+
if (!this.request.messages) {
|
|
566
|
+
this.request.messages = [];
|
|
567
|
+
}
|
|
568
|
+
this.request.messages.push(message);
|
|
569
|
+
return this;
|
|
570
|
+
}
|
|
571
|
+
addMessages(messages) {
|
|
572
|
+
const { messages: anthropicMessages, system } = this.mapper.toMessages(messages);
|
|
573
|
+
this.request.messages = anthropicMessages;
|
|
574
|
+
if (system) {
|
|
575
|
+
this.request.system = system;
|
|
576
|
+
}
|
|
577
|
+
return this;
|
|
578
|
+
}
|
|
579
|
+
addStructuredOutput(schema) {
|
|
580
|
+
this.request.betas = ["structured-outputs-2025-11-13"];
|
|
581
|
+
this.request.output_format = betaZodOutputFormat(schema);
|
|
582
|
+
return this;
|
|
583
|
+
}
|
|
584
|
+
addTools(tools) {
|
|
585
|
+
this.request.tools = tools.map((tool) => ({
|
|
586
|
+
name: tool.name,
|
|
587
|
+
description: tool.description,
|
|
588
|
+
input_schema: tool.schema
|
|
589
|
+
}));
|
|
590
|
+
return this;
|
|
591
|
+
}
|
|
592
|
+
build() {
|
|
593
|
+
if (!this.request.max_tokens) {
|
|
594
|
+
this.request.max_tokens = 1024;
|
|
595
|
+
}
|
|
596
|
+
return this.request;
|
|
597
|
+
}
|
|
598
|
+
};
|
|
599
|
+
|
|
600
|
+
// src/providers/anthropic/client/default.ts
|
|
601
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
602
|
+
var AnthropicDefaultClient = class {
|
|
603
|
+
constructor(client = new Anthropic()) {
|
|
604
|
+
this.client = client;
|
|
605
|
+
}
|
|
606
|
+
async send(request) {
|
|
607
|
+
return await this.client.messages.create(request);
|
|
608
|
+
}
|
|
609
|
+
};
|
|
610
|
+
|
|
611
|
+
// src/providers/anthropic/client/parser.ts
|
|
612
|
+
import Anthropic2 from "@anthropic-ai/sdk";
|
|
613
|
+
var AnthropicParserClient = class {
|
|
614
|
+
constructor(client = new Anthropic2()) {
|
|
615
|
+
this.client = client;
|
|
616
|
+
}
|
|
617
|
+
async send(request) {
|
|
618
|
+
return await this.client.beta.messages.parse(request);
|
|
619
|
+
}
|
|
620
|
+
};
|
|
621
|
+
|
|
622
|
+
// src/providers/anthropic/client/resolver.ts
|
|
623
|
+
var AnthropicClientResolver = class {
|
|
624
|
+
static resolve(request) {
|
|
625
|
+
return request.output_format ? new AnthropicParserClient() : new AnthropicDefaultClient();
|
|
626
|
+
}
|
|
627
|
+
};
|
|
628
|
+
|
|
629
|
+
// src/providers/anthropic/response-handler/parsed-output.ts
|
|
630
|
+
var ParsedOutputHandler = class extends ResponseHandler {
|
|
631
|
+
async handle(responseContext) {
|
|
632
|
+
const providerResponse = responseContext.providerResponse;
|
|
633
|
+
if (providerResponse.parsed_output) {
|
|
634
|
+
responseContext.setResponse(providerResponse.parsed_output);
|
|
635
|
+
return responseContext;
|
|
636
|
+
}
|
|
637
|
+
return await this.nextHandler.handle(responseContext);
|
|
638
|
+
}
|
|
639
|
+
};
|
|
640
|
+
|
|
641
|
+
// src/providers/anthropic/response-handler/content.ts
|
|
642
|
+
var ContentHandler2 = class extends ResponseHandler {
|
|
643
|
+
async handle(responseContext) {
|
|
644
|
+
const providerResponse = responseContext.providerResponse;
|
|
645
|
+
const content = providerResponse.content.filter((block) => block.type === "text").map((block) => block.type === "text" ? block.text : "").join("");
|
|
646
|
+
if (content) {
|
|
647
|
+
responseContext.setResponse(content);
|
|
648
|
+
return responseContext;
|
|
649
|
+
}
|
|
650
|
+
return await this.nextHandler.handle(responseContext);
|
|
651
|
+
}
|
|
652
|
+
};
|
|
653
|
+
|
|
654
|
+
// src/providers/anthropic/response-handler/tool-use.ts
|
|
655
|
+
var ToolUseHandler = class extends ResponseHandler {
|
|
656
|
+
constructor(request, tools) {
|
|
657
|
+
super();
|
|
658
|
+
this.tools = tools;
|
|
659
|
+
this.request = request;
|
|
660
|
+
this.client = new AnthropicDefaultClient();
|
|
661
|
+
}
|
|
662
|
+
async executeToolCalls(tools, toolUseBlocks) {
|
|
663
|
+
const results = [];
|
|
664
|
+
for (const toolUse of toolUseBlocks) {
|
|
665
|
+
const tool = tools == null ? void 0 : tools.find((t) => t.name === toolUse.name);
|
|
666
|
+
if (!tool) {
|
|
667
|
+
results.push({
|
|
668
|
+
type: "tool_result",
|
|
669
|
+
tool_use_id: toolUse.id,
|
|
670
|
+
content: JSON.stringify({ error: `Unknown tool: ${toolUse.name}` })
|
|
671
|
+
});
|
|
672
|
+
continue;
|
|
673
|
+
}
|
|
674
|
+
try {
|
|
675
|
+
const result = await tool.invoke(toolUse.input);
|
|
676
|
+
results.push({
|
|
677
|
+
type: "tool_result",
|
|
678
|
+
tool_use_id: toolUse.id,
|
|
679
|
+
content: JSON.stringify(result)
|
|
680
|
+
});
|
|
681
|
+
} catch (error) {
|
|
682
|
+
results.push({
|
|
683
|
+
type: "tool_result",
|
|
684
|
+
tool_use_id: toolUse.id,
|
|
685
|
+
content: JSON.stringify({ error: error.message })
|
|
686
|
+
});
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
return results;
|
|
690
|
+
}
|
|
691
|
+
async handle(responseContext) {
|
|
692
|
+
let providerResponse = responseContext.providerResponse;
|
|
693
|
+
while (providerResponse.stop_reason === "tool_use") {
|
|
694
|
+
const toolUseBlocks = providerResponse.content.filter(
|
|
695
|
+
(block) => block.type === "tool_use"
|
|
696
|
+
);
|
|
697
|
+
this.request.messages.push({
|
|
698
|
+
role: "assistant",
|
|
699
|
+
content: providerResponse.content
|
|
700
|
+
});
|
|
701
|
+
const toolResults = await this.executeToolCalls(this.tools, toolUseBlocks);
|
|
702
|
+
this.request.messages.push({
|
|
703
|
+
role: "user",
|
|
704
|
+
content: toolResults
|
|
705
|
+
});
|
|
706
|
+
const toolCallingResponse = await this.client.send(this.request);
|
|
707
|
+
responseContext.addUsageEntry(
|
|
708
|
+
new UsageEntry(
|
|
709
|
+
toolCallingResponse.usage.input_tokens,
|
|
710
|
+
toolCallingResponse.usage.output_tokens,
|
|
711
|
+
toolCallingResponse.model
|
|
712
|
+
)
|
|
713
|
+
);
|
|
714
|
+
responseContext.setProviderResponse(toolCallingResponse);
|
|
715
|
+
providerResponse = toolCallingResponse;
|
|
716
|
+
}
|
|
717
|
+
return await this.nextHandler.handle(responseContext);
|
|
718
|
+
}
|
|
719
|
+
};
|
|
720
|
+
|
|
721
|
+
// src/providers/anthropic/response-handler/usage.ts
|
|
722
|
+
var UsageHandler2 = class extends ResponseHandler {
|
|
723
|
+
async handle(responseContext) {
|
|
724
|
+
const providerResponse = responseContext.providerResponse;
|
|
725
|
+
const usage = providerResponse.usage;
|
|
726
|
+
if (usage) {
|
|
727
|
+
responseContext.addUsageEntry(
|
|
728
|
+
new UsageEntry(usage.input_tokens, usage.output_tokens, providerResponse.model)
|
|
729
|
+
);
|
|
730
|
+
}
|
|
731
|
+
return await this.nextHandler.handle(responseContext);
|
|
732
|
+
}
|
|
733
|
+
};
|
|
734
|
+
|
|
735
|
+
// src/providers/anthropic/response-handler/undhandled.ts
|
|
736
|
+
var UnhandledResponseHandler = class extends ResponseHandler {
|
|
737
|
+
async handle(responseContext) {
|
|
738
|
+
var _a, _b;
|
|
739
|
+
const providerResponse = responseContext.providerResponse;
|
|
740
|
+
const id = (_a = providerResponse == null ? void 0 : providerResponse.id) != null ? _a : "unknown";
|
|
741
|
+
const model = (_b = providerResponse == null ? void 0 : providerResponse.model) != null ? _b : "unknown";
|
|
742
|
+
throw new Error(`No response handler matched. response_id=${id} model=${model}`);
|
|
743
|
+
}
|
|
744
|
+
};
|
|
745
|
+
|
|
746
|
+
// src/providers/anthropic/endpoint.ts
|
|
747
|
+
var AnthropicEndpoint = class extends Endpoint {
|
|
748
|
+
constructor() {
|
|
749
|
+
super(...arguments);
|
|
750
|
+
this.requestBuilder = new AnthropicRequestBuilder();
|
|
751
|
+
}
|
|
752
|
+
async sendRequest(command) {
|
|
753
|
+
const request = this.buildRequest(command);
|
|
754
|
+
const client = AnthropicClientResolver.resolve(request);
|
|
755
|
+
const response = await client.send(request);
|
|
756
|
+
const responseContext = new ResponseContext();
|
|
757
|
+
responseContext.setProviderResponse(response);
|
|
758
|
+
const usageHandler = new UsageHandler2();
|
|
759
|
+
const toolUseHandler = new ToolUseHandler(request, command.tools ? command.tools : []);
|
|
760
|
+
const parsedOutputHandler = new ParsedOutputHandler();
|
|
761
|
+
const contentHandler = new ContentHandler2();
|
|
762
|
+
const unhandledResponseHandler = new UnhandledResponseHandler();
|
|
763
|
+
usageHandler.setNextHandler(toolUseHandler).setNextHandler(parsedOutputHandler).setNextHandler(contentHandler).setNextHandler(unhandledResponseHandler);
|
|
764
|
+
const responseHandler = usageHandler;
|
|
765
|
+
return await responseHandler.handle(responseContext);
|
|
766
|
+
}
|
|
767
|
+
};
|
|
768
|
+
|
|
769
|
+
// src/providers/endpoint-resolver.ts
|
|
770
|
+
var DefaultEndpointResolver = class extends EndpointResolver {
|
|
771
|
+
isOpenAIModel(value) {
|
|
772
|
+
return OPENAI_MODELS.includes(value);
|
|
773
|
+
}
|
|
774
|
+
isAnthropicModel(value) {
|
|
775
|
+
return ANTHROPIC_MODELS.includes(value);
|
|
776
|
+
}
|
|
777
|
+
resolve(model) {
|
|
778
|
+
if (this.isOpenAIModel(model)) {
|
|
779
|
+
return new OpenAIResponses();
|
|
780
|
+
} else if (this.isAnthropicModel(model)) {
|
|
781
|
+
return new AnthropicEndpoint();
|
|
782
|
+
}
|
|
783
|
+
throw new Error("Provider not found");
|
|
784
|
+
}
|
|
785
|
+
};
|
|
786
|
+
|
|
787
|
+
// src/core/agents/agent.ts
|
|
788
|
+
var Agent = class {
|
|
789
|
+
constructor(command) {
|
|
790
|
+
this.gateway = new RequestGateway(new DefaultEndpointResolver());
|
|
791
|
+
this.command = command;
|
|
792
|
+
}
|
|
793
|
+
setModel(model) {
|
|
794
|
+
this.command.model = model;
|
|
795
|
+
}
|
|
796
|
+
setMessages(messages) {
|
|
797
|
+
this.command.messages = messages;
|
|
798
|
+
}
|
|
799
|
+
setTask(task) {
|
|
800
|
+
this.command.task = task;
|
|
801
|
+
}
|
|
802
|
+
setStructuredOutput(schema) {
|
|
803
|
+
this.command.structuredOutput = schema;
|
|
804
|
+
}
|
|
805
|
+
async act(task) {
|
|
806
|
+
if (task) {
|
|
807
|
+
this.command.task = task;
|
|
808
|
+
}
|
|
809
|
+
return await this.gateway.invoke(this.command);
|
|
810
|
+
}
|
|
811
|
+
};
|
|
812
|
+
|
|
813
|
+
// src/core/workflow/schema/plan.ts
|
|
814
|
+
import z from "zod";
|
|
815
|
+
var ModelSchema = z.enum([...OPENAI_MODELS, ...ANTHROPIC_MODELS]);
|
|
816
|
+
var TaskPlanNodeSchema = z.object({
|
|
817
|
+
kind: z.literal("task"),
|
|
818
|
+
task: z.string(),
|
|
819
|
+
model: ModelSchema
|
|
820
|
+
});
|
|
821
|
+
var WorkflowPlanNodeSchema = z.lazy(
|
|
822
|
+
() => z.object({
|
|
823
|
+
kind: z.literal("workflow"),
|
|
824
|
+
mode: z.enum(["sequential", "parallel"]),
|
|
825
|
+
units: z.array(PlanNodeSchema)
|
|
826
|
+
})
|
|
827
|
+
);
|
|
828
|
+
var PlanNodeSchema = TaskPlanNodeSchema.or(WorkflowPlanNodeSchema);
|
|
829
|
+
var PlanSchema = z.object({
|
|
830
|
+
root: PlanNodeSchema
|
|
831
|
+
});
|
|
832
|
+
|
|
833
|
+
// src/core/workflow/execution/parallel.ts
|
|
834
|
+
var ParallelExecution = class {
|
|
835
|
+
async execute(workflow, hook) {
|
|
836
|
+
const promises = [];
|
|
837
|
+
for (const u of workflow.units) {
|
|
838
|
+
promises.push(u.execute(hook));
|
|
839
|
+
}
|
|
840
|
+
const results = await Promise.all(promises);
|
|
841
|
+
return results;
|
|
842
|
+
}
|
|
843
|
+
};
|
|
844
|
+
|
|
845
|
+
// src/core/workflow/execution/sequential.ts
|
|
846
|
+
var SequentalExecution = class {
|
|
847
|
+
async execute(workflow, hook) {
|
|
848
|
+
const results = [];
|
|
849
|
+
for (const u of workflow.units) {
|
|
850
|
+
results.push(await u.execute(hook));
|
|
851
|
+
}
|
|
852
|
+
return results;
|
|
853
|
+
}
|
|
854
|
+
};
|
|
855
|
+
|
|
856
|
+
// src/core/workflow/execution/strategy-factory.ts
|
|
857
|
+
var ExecutionStrategyFactory = class {
|
|
858
|
+
static create(mode) {
|
|
859
|
+
let strategy;
|
|
860
|
+
if (mode == "parallel") {
|
|
861
|
+
strategy = new ParallelExecution();
|
|
862
|
+
} else {
|
|
863
|
+
strategy = new SequentalExecution();
|
|
864
|
+
}
|
|
865
|
+
return strategy;
|
|
866
|
+
}
|
|
867
|
+
};
|
|
868
|
+
|
|
869
|
+
// src/core/workflow/hooks/cluster.ts
|
|
870
|
+
var ClusterHook = class {
|
|
871
|
+
constructor(hooks) {
|
|
872
|
+
this.hooks = hooks;
|
|
873
|
+
}
|
|
874
|
+
beforeTask(task) {
|
|
875
|
+
this.hooks.forEach((h) => h.beforeTask(task));
|
|
876
|
+
}
|
|
877
|
+
afterTask(task, result) {
|
|
878
|
+
this.hooks.forEach((h) => h.afterTask(task, result));
|
|
879
|
+
}
|
|
880
|
+
beforeWorkflow(wf) {
|
|
881
|
+
this.hooks.forEach((h) => h.beforeWorkflow(wf));
|
|
882
|
+
}
|
|
883
|
+
afterWorkflow(wf, result) {
|
|
884
|
+
this.hooks.forEach((h) => h.afterWorkflow(wf, result));
|
|
885
|
+
}
|
|
886
|
+
};
|
|
887
|
+
|
|
888
|
+
// src/core/workflow/hooks/logger.ts
|
|
889
|
+
var Logger = class {
|
|
890
|
+
beforeWorkflow(workflow) {
|
|
891
|
+
console.log(`[Workflow:start], Mode: ${workflow.mode}`);
|
|
892
|
+
}
|
|
893
|
+
afterWorkflow(workflow) {
|
|
894
|
+
console.log(`[Workflow:end], Mode: ${workflow.mode}`);
|
|
895
|
+
}
|
|
896
|
+
beforeTask(task) {
|
|
897
|
+
console.log(`[Task:end]: Model: ${task.getModel()} Task: ${task.getTask()}`);
|
|
898
|
+
}
|
|
899
|
+
afterTask(task, result) {
|
|
900
|
+
console.log(`[Task:end]: Model: ${task.getModel()} Task: ${task.getTask()}`);
|
|
901
|
+
}
|
|
902
|
+
};
|
|
903
|
+
|
|
904
|
+
// src/core/workflow/hooks/index.ts
|
|
905
|
+
var loggingHook = new Logger();
|
|
906
|
+
var defaultHooks = [loggingHook];
|
|
907
|
+
var clusterHook = new ClusterHook(defaultHooks);
|
|
908
|
+
var DEFAULT_CLUSTER_HOOK = clusterHook;
|
|
909
|
+
|
|
910
|
+
// src/core/workflow/workflow.ts
|
|
911
|
+
var Workflow = class extends WorkUnit {
|
|
912
|
+
constructor(mode, units) {
|
|
913
|
+
super();
|
|
914
|
+
this.mode = mode;
|
|
915
|
+
this.units = units;
|
|
916
|
+
}
|
|
917
|
+
async execute(hook = DEFAULT_CLUSTER_HOOK) {
|
|
918
|
+
hook.beforeWorkflow(this);
|
|
919
|
+
const executionStrategy = ExecutionStrategyFactory.create(this.mode);
|
|
920
|
+
const result = await executionStrategy.execute(this, hook);
|
|
921
|
+
hook.afterWorkflow(this, result);
|
|
922
|
+
return result;
|
|
923
|
+
}
|
|
924
|
+
};
|
|
925
|
+
|
|
926
|
+
// src/core/workflow/task.ts
|
|
927
|
+
var Task = class extends WorkUnit {
|
|
928
|
+
constructor(task, model) {
|
|
929
|
+
super();
|
|
930
|
+
this.task = task;
|
|
931
|
+
this.model = model;
|
|
932
|
+
}
|
|
933
|
+
getTask() {
|
|
934
|
+
return this.task;
|
|
935
|
+
}
|
|
936
|
+
getModel() {
|
|
937
|
+
return this.model;
|
|
938
|
+
}
|
|
939
|
+
async execute(hook = DEFAULT_CLUSTER_HOOK) {
|
|
940
|
+
hook.beforeTask(this);
|
|
941
|
+
const command = {
|
|
942
|
+
model: this.model,
|
|
943
|
+
task: this.task
|
|
944
|
+
};
|
|
945
|
+
const agent = new Agent(command);
|
|
946
|
+
const result = await agent.act(this.task);
|
|
947
|
+
hook.afterTask(this, result);
|
|
948
|
+
return result;
|
|
949
|
+
}
|
|
950
|
+
};
|
|
951
|
+
|
|
952
|
+
// src/core/workflow/mapper.ts
|
|
953
|
+
var PlanWorkflowMapper = class {
|
|
954
|
+
static fromPlan(plan) {
|
|
955
|
+
return this.fromNode(plan.root);
|
|
956
|
+
}
|
|
957
|
+
static fromNode(node) {
|
|
958
|
+
if (node.kind !== "workflow") {
|
|
959
|
+
throw new Error("Root must be workflow");
|
|
960
|
+
}
|
|
961
|
+
return new Workflow(node.mode, node.units.map(this.mapNode));
|
|
962
|
+
}
|
|
963
|
+
static mapNode(node) {
|
|
964
|
+
if (node.kind === "task") {
|
|
965
|
+
return new Task(node.task, node.model);
|
|
966
|
+
}
|
|
967
|
+
return new Workflow(node.mode, node.units.map(this.mapNode));
|
|
968
|
+
}
|
|
969
|
+
};
|
|
970
|
+
|
|
971
|
+
// src/core/agents/planner.ts
|
|
972
|
+
var PROMPT = `You are a planner.
|
|
973
|
+
|
|
974
|
+
Rules:
|
|
975
|
+
- Use 'parallel' for the task that can be run in parallel.
|
|
976
|
+
- Pick model based on the task complexity
|
|
977
|
+
- Keep prompts actionable.
|
|
978
|
+
- Don't ask user for any input, just do the thing with available data you have.
|
|
979
|
+
`;
|
|
980
|
+
var PlanningAgent = class extends Agent {
|
|
981
|
+
constructor(command) {
|
|
982
|
+
super(command);
|
|
983
|
+
}
|
|
984
|
+
async planFromGoal(goal) {
|
|
985
|
+
this.setStructuredOutput(PlanSchema);
|
|
986
|
+
const plan = await this.act(`${PROMPT}
|
|
987
|
+
Goal: ${goal}`);
|
|
988
|
+
return PlanWorkflowMapper.fromPlan(plan);
|
|
989
|
+
}
|
|
990
|
+
};
|
|
991
|
+
export {
|
|
992
|
+
Agent,
|
|
993
|
+
PlanningAgent,
|
|
994
|
+
Task,
|
|
995
|
+
WorkUnit,
|
|
996
|
+
Workflow
|
|
997
|
+
};
|
|
998
|
+
//# sourceMappingURL=index.mjs.map
|