@q1k-oss/behaviour-tree-workflows 0.0.2 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-sdk/index.cjs +296 -0
- package/dist/ai-sdk/index.d.cts +82 -0
- package/dist/ai-sdk/index.d.ts +82 -0
- package/dist/ai-sdk/index.js +269 -0
- package/dist/index.cjs +516 -12
- package/dist/index.d.cts +370 -821
- package/dist/index.d.ts +370 -821
- package/dist/index.js +510 -12
- package/dist/types-BJPlUisg.d.cts +931 -0
- package/dist/types-BJPlUisg.d.ts +931 -0
- package/package.json +48 -3
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/ai-sdk/index.ts
|
|
21
|
+
var ai_sdk_exports = {};
|
|
22
|
+
__export(ai_sdk_exports, {
|
|
23
|
+
createAIActivities: () => createAIActivities
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(ai_sdk_exports);
|
|
26
|
+
|
|
27
|
+
// src/ai-sdk/llm-chat-activity.ts
|
|
28
|
+
var import_ai = require("ai");
|
|
29
|
+
var import_ai2 = require("ai");
|
|
30
|
+
|
|
31
|
+
// src/ai-sdk/provider-resolver.ts
|
|
32
|
+
function resolveModel(providers, provider, modelId) {
|
|
33
|
+
const providerFn = providers[provider];
|
|
34
|
+
if (!providerFn) {
|
|
35
|
+
const configured = Object.keys(providers).filter(
|
|
36
|
+
(k) => providers[k] != null
|
|
37
|
+
);
|
|
38
|
+
throw new Error(
|
|
39
|
+
`Provider "${provider}" is not configured. Configured providers: [${configured.join(", ") || "none"}]. Pass it in createAIActivities({ providers: { ${provider}: ... } })`
|
|
40
|
+
);
|
|
41
|
+
}
|
|
42
|
+
return providerFn(modelId);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// src/ai-sdk/message-mapper.ts
|
|
46
|
+
function mapLLMMessages(messages, systemPrompt) {
|
|
47
|
+
const result = [];
|
|
48
|
+
if (systemPrompt) {
|
|
49
|
+
result.push({ role: "system", content: systemPrompt });
|
|
50
|
+
}
|
|
51
|
+
for (const msg of messages) {
|
|
52
|
+
if (msg.role === "system" && systemPrompt) continue;
|
|
53
|
+
result.push({ role: msg.role, content: msg.content });
|
|
54
|
+
}
|
|
55
|
+
return result;
|
|
56
|
+
}
|
|
57
|
+
function mapAgentMessages(messages, systemPrompt) {
|
|
58
|
+
const result = [];
|
|
59
|
+
if (systemPrompt) {
|
|
60
|
+
result.push({ role: "system", content: systemPrompt });
|
|
61
|
+
}
|
|
62
|
+
for (const msg of messages) {
|
|
63
|
+
if (msg.role === "system") {
|
|
64
|
+
if (!systemPrompt) {
|
|
65
|
+
result.push({ role: "system", content: msg.content });
|
|
66
|
+
}
|
|
67
|
+
continue;
|
|
68
|
+
}
|
|
69
|
+
if (typeof msg.content === "string") {
|
|
70
|
+
result.push({
|
|
71
|
+
role: msg.role,
|
|
72
|
+
content: msg.content
|
|
73
|
+
});
|
|
74
|
+
continue;
|
|
75
|
+
}
|
|
76
|
+
const blocks = msg.content;
|
|
77
|
+
if (msg.role === "assistant") {
|
|
78
|
+
mapAssistantMessage(blocks, result);
|
|
79
|
+
} else if (msg.role === "user") {
|
|
80
|
+
mapUserMessage(blocks, result);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
return result;
|
|
84
|
+
}
|
|
85
|
+
function mapAssistantMessage(blocks, result) {
|
|
86
|
+
const contentParts = [];
|
|
87
|
+
for (const block of blocks) {
|
|
88
|
+
if (block.type === "text") {
|
|
89
|
+
contentParts.push({ type: "text", text: block.text });
|
|
90
|
+
} else if (block.type === "tool_use") {
|
|
91
|
+
contentParts.push({
|
|
92
|
+
type: "tool-call",
|
|
93
|
+
toolCallId: block.id,
|
|
94
|
+
toolName: block.name,
|
|
95
|
+
input: block.input
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
if (contentParts.length > 0) {
|
|
100
|
+
result.push({ role: "assistant", content: contentParts });
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
function mapUserMessage(blocks, result) {
|
|
104
|
+
const toolResults = blocks.filter(
|
|
105
|
+
(b) => b.type === "tool_result"
|
|
106
|
+
);
|
|
107
|
+
if (toolResults.length > 0) {
|
|
108
|
+
const toolResultParts = toolResults.map((b) => ({
|
|
109
|
+
type: "tool-result",
|
|
110
|
+
toolCallId: b.tool_use_id,
|
|
111
|
+
toolName: b.tool_use_id,
|
|
112
|
+
// AI SDK v6 requires toolName; use toolCallId as fallback
|
|
113
|
+
output: b.is_error ? { type: "error-text", value: b.content } : { type: "text", value: b.content }
|
|
114
|
+
}));
|
|
115
|
+
result.push({ role: "tool", content: toolResultParts });
|
|
116
|
+
} else {
|
|
117
|
+
const text = blocks.filter(
|
|
118
|
+
(b) => b.type === "text"
|
|
119
|
+
).map((b) => b.text).join("\n");
|
|
120
|
+
result.push({ role: "user", content: text });
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// src/ai-sdk/llm-chat-activity.ts
|
|
125
|
+
var FINISH_REASON_MAP = {
|
|
126
|
+
stop: "stop",
|
|
127
|
+
length: "length",
|
|
128
|
+
"content-filter": "content_filter",
|
|
129
|
+
"tool-calls": "tool_calls",
|
|
130
|
+
error: "error"
|
|
131
|
+
};
|
|
132
|
+
function mapUsage(usage) {
|
|
133
|
+
return {
|
|
134
|
+
promptTokens: usage?.inputTokens ?? 0,
|
|
135
|
+
completionTokens: usage?.outputTokens ?? 0,
|
|
136
|
+
totalTokens: usage?.totalTokens ?? (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
function createLLMChatActivity(options) {
|
|
140
|
+
return async (request) => {
|
|
141
|
+
const model = resolveModel(options.providers, request.provider, request.model);
|
|
142
|
+
const messages = mapLLMMessages(request.messages, request.systemPrompt);
|
|
143
|
+
const timeout = request.timeout ?? options.defaultTimeout ?? 6e4;
|
|
144
|
+
const abortController = new AbortController();
|
|
145
|
+
const timer = setTimeout(() => abortController.abort(), timeout);
|
|
146
|
+
try {
|
|
147
|
+
if (request.responseFormat === "json" && request.jsonSchema) {
|
|
148
|
+
const result2 = await (0, import_ai.generateObject)({
|
|
149
|
+
model,
|
|
150
|
+
messages,
|
|
151
|
+
schema: (0, import_ai2.jsonSchema)(request.jsonSchema),
|
|
152
|
+
temperature: request.temperature,
|
|
153
|
+
maxOutputTokens: request.maxTokens,
|
|
154
|
+
abortSignal: abortController.signal
|
|
155
|
+
});
|
|
156
|
+
return {
|
|
157
|
+
content: JSON.stringify(result2.object),
|
|
158
|
+
parsed: result2.object,
|
|
159
|
+
usage: mapUsage(result2.usage),
|
|
160
|
+
model: request.model,
|
|
161
|
+
finishReason: FINISH_REASON_MAP[result2.finishReason] ?? "stop"
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
const result = await (0, import_ai.generateText)({
|
|
165
|
+
model,
|
|
166
|
+
messages,
|
|
167
|
+
temperature: request.temperature,
|
|
168
|
+
maxOutputTokens: request.maxTokens,
|
|
169
|
+
abortSignal: abortController.signal
|
|
170
|
+
});
|
|
171
|
+
let parsed;
|
|
172
|
+
if (request.responseFormat === "json") {
|
|
173
|
+
try {
|
|
174
|
+
parsed = JSON.parse(result.text);
|
|
175
|
+
} catch {
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
return {
|
|
179
|
+
content: result.text,
|
|
180
|
+
parsed,
|
|
181
|
+
usage: mapUsage(result.usage),
|
|
182
|
+
model: request.model,
|
|
183
|
+
finishReason: FINISH_REASON_MAP[result.finishReason] ?? "stop"
|
|
184
|
+
};
|
|
185
|
+
} finally {
|
|
186
|
+
clearTimeout(timer);
|
|
187
|
+
}
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// src/ai-sdk/agent-loop-turn-activity.ts
|
|
192
|
+
var import_ai4 = require("ai");
|
|
193
|
+
|
|
194
|
+
// src/ai-sdk/tool-mapper.ts
|
|
195
|
+
var import_ai3 = require("ai");
|
|
196
|
+
function mapToolDefinitions(tools) {
|
|
197
|
+
const result = {};
|
|
198
|
+
for (const t of tools) {
|
|
199
|
+
result[t.name] = (0, import_ai3.tool)({
|
|
200
|
+
description: t.description,
|
|
201
|
+
inputSchema: (0, import_ai3.jsonSchema)(t.inputSchema)
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
return result;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// src/ai-sdk/agent-loop-turn-activity.ts
|
|
208
|
+
function mapStopReason(finishReason) {
|
|
209
|
+
if (finishReason === "tool-calls") return "tool_use";
|
|
210
|
+
if (finishReason === "length") return "max_tokens";
|
|
211
|
+
return "end_turn";
|
|
212
|
+
}
|
|
213
|
+
function extractToolCalls(toolCalls) {
|
|
214
|
+
if (!toolCalls || toolCalls.length === 0) return void 0;
|
|
215
|
+
return toolCalls.map((tc) => ({
|
|
216
|
+
id: tc.toolCallId,
|
|
217
|
+
name: tc.toolName,
|
|
218
|
+
input: tc.input
|
|
219
|
+
}));
|
|
220
|
+
}
|
|
221
|
+
function mapUsage2(usage) {
|
|
222
|
+
return {
|
|
223
|
+
promptTokens: usage?.inputTokens ?? 0,
|
|
224
|
+
completionTokens: usage?.outputTokens ?? 0,
|
|
225
|
+
totalTokens: usage?.totalTokens ?? (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
function createAgentLoopTurnActivity(options) {
|
|
229
|
+
return async (request) => {
|
|
230
|
+
const model = resolveModel(
|
|
231
|
+
options.providers,
|
|
232
|
+
request.provider,
|
|
233
|
+
request.model
|
|
234
|
+
);
|
|
235
|
+
const messages = mapAgentMessages(request.messages, request.systemPrompt);
|
|
236
|
+
const tools = request.tools && request.tools.length > 0 ? mapToolDefinitions(request.tools) : void 0;
|
|
237
|
+
if (request.streamChannelId && options.onStreamToken) {
|
|
238
|
+
return executeWithStreaming(
|
|
239
|
+
model,
|
|
240
|
+
messages,
|
|
241
|
+
tools,
|
|
242
|
+
request,
|
|
243
|
+
options
|
|
244
|
+
);
|
|
245
|
+
}
|
|
246
|
+
const result = await (0, import_ai4.generateText)({
|
|
247
|
+
model,
|
|
248
|
+
messages,
|
|
249
|
+
tools,
|
|
250
|
+
temperature: request.temperature,
|
|
251
|
+
maxOutputTokens: request.maxTokens
|
|
252
|
+
});
|
|
253
|
+
return {
|
|
254
|
+
content: result.text,
|
|
255
|
+
toolCalls: extractToolCalls(result.toolCalls),
|
|
256
|
+
stopReason: mapStopReason(result.finishReason),
|
|
257
|
+
usage: mapUsage2(result.usage)
|
|
258
|
+
};
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
async function executeWithStreaming(model, messages, tools, request, options) {
|
|
262
|
+
const channelId = request.streamChannelId;
|
|
263
|
+
const result = (0, import_ai4.streamText)({
|
|
264
|
+
model,
|
|
265
|
+
messages,
|
|
266
|
+
tools,
|
|
267
|
+
temperature: request.temperature,
|
|
268
|
+
maxOutputTokens: request.maxTokens
|
|
269
|
+
});
|
|
270
|
+
for await (const chunk of result.textStream) {
|
|
271
|
+
options.onStreamToken(channelId, chunk);
|
|
272
|
+
}
|
|
273
|
+
options.onStreamComplete?.(channelId);
|
|
274
|
+
const finalText = await result.text;
|
|
275
|
+
const finalToolCalls = await result.toolCalls;
|
|
276
|
+
const finalFinishReason = await result.finishReason;
|
|
277
|
+
const finalUsage = await result.usage;
|
|
278
|
+
return {
|
|
279
|
+
content: finalText,
|
|
280
|
+
toolCalls: extractToolCalls(finalToolCalls),
|
|
281
|
+
stopReason: mapStopReason(finalFinishReason),
|
|
282
|
+
usage: mapUsage2(finalUsage)
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// src/ai-sdk/index.ts
|
|
287
|
+
function createAIActivities(options) {
|
|
288
|
+
return {
|
|
289
|
+
llmChat: createLLMChatActivity(options),
|
|
290
|
+
agentLoopTurn: createAgentLoopTurnActivity(options)
|
|
291
|
+
};
|
|
292
|
+
}
|
|
293
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
294
|
+
0 && (module.exports = {
|
|
295
|
+
createAIActivities
|
|
296
|
+
});
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import { B as BtreeActivities } from '../types-BJPlUisg.cjs';
|
|
2
|
+
import { LanguageModel } from 'ai';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Configuration types for the AI SDK activity factory
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Provider configuration — maps provider names to AI SDK model factories.
|
|
10
|
+
* Each entry is a function that takes a model ID and returns a LanguageModel.
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createAnthropic } from "@ai-sdk/anthropic";
|
|
15
|
+
* import { createOpenAI } from "@ai-sdk/openai";
|
|
16
|
+
*
|
|
17
|
+
* const providers: AIProviderConfig = {
|
|
18
|
+
* anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
|
|
19
|
+
* openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
20
|
+
* };
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
interface AIProviderConfig {
|
|
24
|
+
anthropic?: (modelId: string) => LanguageModel;
|
|
25
|
+
openai?: (modelId: string) => LanguageModel;
|
|
26
|
+
google?: (modelId: string) => LanguageModel;
|
|
27
|
+
ollama?: (modelId: string) => LanguageModel;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Options for creating AI SDK-based activity implementations
|
|
31
|
+
*/
|
|
32
|
+
interface CreateAIActivitiesOptions {
|
|
33
|
+
/** Provider configuration — maps provider names to AI SDK model factories */
|
|
34
|
+
providers: AIProviderConfig;
|
|
35
|
+
/** Default timeout for LLM calls in milliseconds (default: 60000) */
|
|
36
|
+
defaultTimeout?: number;
|
|
37
|
+
/** Callback for streaming tokens — receives (channelId, textDelta) */
|
|
38
|
+
onStreamToken?: (channelId: string, textDelta: string) => void;
|
|
39
|
+
/** Callback when stream completes */
|
|
40
|
+
onStreamComplete?: (channelId: string) => void;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* AI SDK Activity Factory
|
|
45
|
+
*
|
|
46
|
+
* Creates BtreeActivities implementations for `llmChat` and `agentLoopTurn`
|
|
47
|
+
* backed by the Vercel AI SDK. Supports Anthropic, OpenAI, Google, and Ollama.
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* ```typescript
|
|
51
|
+
* import { createAIActivities } from "@q1k-oss/behaviour-tree-workflows/ai-sdk";
|
|
52
|
+
* import { createAnthropic } from "@ai-sdk/anthropic";
|
|
53
|
+
* import { createOpenAI } from "@ai-sdk/openai";
|
|
54
|
+
*
|
|
55
|
+
* const aiActivities = createAIActivities({
|
|
56
|
+
* providers: {
|
|
57
|
+
* anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
|
|
58
|
+
* openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
59
|
+
* },
|
|
60
|
+
* onStreamToken: (channelId, text) => {
|
|
61
|
+
* ws.send(JSON.stringify({ channel: channelId, text }));
|
|
62
|
+
* },
|
|
63
|
+
* });
|
|
64
|
+
*
|
|
65
|
+
* // Merge with other activities
|
|
66
|
+
* const activities: BtreeActivities = {
|
|
67
|
+
* ...aiActivities,
|
|
68
|
+
* executeAgentTool: myToolExecutor,
|
|
69
|
+
* };
|
|
70
|
+
* ```
|
|
71
|
+
*/
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Create AI SDK-based activity implementations for behavior tree LLM nodes.
|
|
75
|
+
*
|
|
76
|
+
* Returns a partial BtreeActivities object with `llmChat` and `agentLoopTurn`
|
|
77
|
+
* implemented using the Vercel AI SDK. Other activities (executeAgentTool,
|
|
78
|
+
* executePieceAction, etc.) must be provided separately.
|
|
79
|
+
*/
|
|
80
|
+
declare function createAIActivities(options: CreateAIActivitiesOptions): Required<Pick<BtreeActivities, "llmChat" | "agentLoopTurn">>;
|
|
81
|
+
|
|
82
|
+
export { type AIProviderConfig, type CreateAIActivitiesOptions, createAIActivities };
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import { B as BtreeActivities } from '../types-BJPlUisg.js';
|
|
2
|
+
import { LanguageModel } from 'ai';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Configuration types for the AI SDK activity factory
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Provider configuration — maps provider names to AI SDK model factories.
|
|
10
|
+
* Each entry is a function that takes a model ID and returns a LanguageModel.
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createAnthropic } from "@ai-sdk/anthropic";
|
|
15
|
+
* import { createOpenAI } from "@ai-sdk/openai";
|
|
16
|
+
*
|
|
17
|
+
* const providers: AIProviderConfig = {
|
|
18
|
+
* anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
|
|
19
|
+
* openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
20
|
+
* };
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
interface AIProviderConfig {
|
|
24
|
+
anthropic?: (modelId: string) => LanguageModel;
|
|
25
|
+
openai?: (modelId: string) => LanguageModel;
|
|
26
|
+
google?: (modelId: string) => LanguageModel;
|
|
27
|
+
ollama?: (modelId: string) => LanguageModel;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Options for creating AI SDK-based activity implementations
|
|
31
|
+
*/
|
|
32
|
+
interface CreateAIActivitiesOptions {
|
|
33
|
+
/** Provider configuration — maps provider names to AI SDK model factories */
|
|
34
|
+
providers: AIProviderConfig;
|
|
35
|
+
/** Default timeout for LLM calls in milliseconds (default: 60000) */
|
|
36
|
+
defaultTimeout?: number;
|
|
37
|
+
/** Callback for streaming tokens — receives (channelId, textDelta) */
|
|
38
|
+
onStreamToken?: (channelId: string, textDelta: string) => void;
|
|
39
|
+
/** Callback when stream completes */
|
|
40
|
+
onStreamComplete?: (channelId: string) => void;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* AI SDK Activity Factory
|
|
45
|
+
*
|
|
46
|
+
* Creates BtreeActivities implementations for `llmChat` and `agentLoopTurn`
|
|
47
|
+
* backed by the Vercel AI SDK. Supports Anthropic, OpenAI, Google, and Ollama.
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* ```typescript
|
|
51
|
+
* import { createAIActivities } from "@q1k-oss/behaviour-tree-workflows/ai-sdk";
|
|
52
|
+
* import { createAnthropic } from "@ai-sdk/anthropic";
|
|
53
|
+
* import { createOpenAI } from "@ai-sdk/openai";
|
|
54
|
+
*
|
|
55
|
+
* const aiActivities = createAIActivities({
|
|
56
|
+
* providers: {
|
|
57
|
+
* anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
|
|
58
|
+
* openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
|
|
59
|
+
* },
|
|
60
|
+
* onStreamToken: (channelId, text) => {
|
|
61
|
+
* ws.send(JSON.stringify({ channel: channelId, text }));
|
|
62
|
+
* },
|
|
63
|
+
* });
|
|
64
|
+
*
|
|
65
|
+
* // Merge with other activities
|
|
66
|
+
* const activities: BtreeActivities = {
|
|
67
|
+
* ...aiActivities,
|
|
68
|
+
* executeAgentTool: myToolExecutor,
|
|
69
|
+
* };
|
|
70
|
+
* ```
|
|
71
|
+
*/
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Create AI SDK-based activity implementations for behavior tree LLM nodes.
|
|
75
|
+
*
|
|
76
|
+
* Returns a partial BtreeActivities object with `llmChat` and `agentLoopTurn`
|
|
77
|
+
* implemented using the Vercel AI SDK. Other activities (executeAgentTool,
|
|
78
|
+
* executePieceAction, etc.) must be provided separately.
|
|
79
|
+
*/
|
|
80
|
+
declare function createAIActivities(options: CreateAIActivitiesOptions): Required<Pick<BtreeActivities, "llmChat" | "agentLoopTurn">>;
|
|
81
|
+
|
|
82
|
+
export { type AIProviderConfig, type CreateAIActivitiesOptions, createAIActivities };
|