@q1k-oss/behaviour-tree-workflows 0.0.2 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-sdk/index.cjs +296 -0
- package/dist/ai-sdk/index.d.cts +82 -0
- package/dist/ai-sdk/index.d.ts +82 -0
- package/dist/ai-sdk/index.js +269 -0
- package/dist/index.cjs +516 -12
- package/dist/index.d.cts +370 -821
- package/dist/index.d.ts +370 -821
- package/dist/index.js +510 -12
- package/dist/types-BJPlUisg.d.cts +931 -0
- package/dist/types-BJPlUisg.d.ts +931 -0
- package/package.json +48 -3
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
// src/ai-sdk/llm-chat-activity.ts
|
|
2
|
+
import { generateText, generateObject } from "ai";
|
|
3
|
+
import { jsonSchema } from "ai";
|
|
4
|
+
|
|
5
|
+
// src/ai-sdk/provider-resolver.ts
|
|
6
|
+
function resolveModel(providers, provider, modelId) {
|
|
7
|
+
const providerFn = providers[provider];
|
|
8
|
+
if (!providerFn) {
|
|
9
|
+
const configured = Object.keys(providers).filter(
|
|
10
|
+
(k) => providers[k] != null
|
|
11
|
+
);
|
|
12
|
+
throw new Error(
|
|
13
|
+
`Provider "${provider}" is not configured. Configured providers: [${configured.join(", ") || "none"}]. Pass it in createAIActivities({ providers: { ${provider}: ... } })`
|
|
14
|
+
);
|
|
15
|
+
}
|
|
16
|
+
return providerFn(modelId);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// src/ai-sdk/message-mapper.ts
|
|
20
|
+
function mapLLMMessages(messages, systemPrompt) {
|
|
21
|
+
const result = [];
|
|
22
|
+
if (systemPrompt) {
|
|
23
|
+
result.push({ role: "system", content: systemPrompt });
|
|
24
|
+
}
|
|
25
|
+
for (const msg of messages) {
|
|
26
|
+
if (msg.role === "system" && systemPrompt) continue;
|
|
27
|
+
result.push({ role: msg.role, content: msg.content });
|
|
28
|
+
}
|
|
29
|
+
return result;
|
|
30
|
+
}
|
|
31
|
+
function mapAgentMessages(messages, systemPrompt) {
|
|
32
|
+
const result = [];
|
|
33
|
+
if (systemPrompt) {
|
|
34
|
+
result.push({ role: "system", content: systemPrompt });
|
|
35
|
+
}
|
|
36
|
+
for (const msg of messages) {
|
|
37
|
+
if (msg.role === "system") {
|
|
38
|
+
if (!systemPrompt) {
|
|
39
|
+
result.push({ role: "system", content: msg.content });
|
|
40
|
+
}
|
|
41
|
+
continue;
|
|
42
|
+
}
|
|
43
|
+
if (typeof msg.content === "string") {
|
|
44
|
+
result.push({
|
|
45
|
+
role: msg.role,
|
|
46
|
+
content: msg.content
|
|
47
|
+
});
|
|
48
|
+
continue;
|
|
49
|
+
}
|
|
50
|
+
const blocks = msg.content;
|
|
51
|
+
if (msg.role === "assistant") {
|
|
52
|
+
mapAssistantMessage(blocks, result);
|
|
53
|
+
} else if (msg.role === "user") {
|
|
54
|
+
mapUserMessage(blocks, result);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
return result;
|
|
58
|
+
}
|
|
59
|
+
function mapAssistantMessage(blocks, result) {
|
|
60
|
+
const contentParts = [];
|
|
61
|
+
for (const block of blocks) {
|
|
62
|
+
if (block.type === "text") {
|
|
63
|
+
contentParts.push({ type: "text", text: block.text });
|
|
64
|
+
} else if (block.type === "tool_use") {
|
|
65
|
+
contentParts.push({
|
|
66
|
+
type: "tool-call",
|
|
67
|
+
toolCallId: block.id,
|
|
68
|
+
toolName: block.name,
|
|
69
|
+
input: block.input
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
if (contentParts.length > 0) {
|
|
74
|
+
result.push({ role: "assistant", content: contentParts });
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
function mapUserMessage(blocks, result) {
|
|
78
|
+
const toolResults = blocks.filter(
|
|
79
|
+
(b) => b.type === "tool_result"
|
|
80
|
+
);
|
|
81
|
+
if (toolResults.length > 0) {
|
|
82
|
+
const toolResultParts = toolResults.map((b) => ({
|
|
83
|
+
type: "tool-result",
|
|
84
|
+
toolCallId: b.tool_use_id,
|
|
85
|
+
toolName: b.tool_use_id,
|
|
86
|
+
// AI SDK v6 requires toolName; use toolCallId as fallback
|
|
87
|
+
output: b.is_error ? { type: "error-text", value: b.content } : { type: "text", value: b.content }
|
|
88
|
+
}));
|
|
89
|
+
result.push({ role: "tool", content: toolResultParts });
|
|
90
|
+
} else {
|
|
91
|
+
const text = blocks.filter(
|
|
92
|
+
(b) => b.type === "text"
|
|
93
|
+
).map((b) => b.text).join("\n");
|
|
94
|
+
result.push({ role: "user", content: text });
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// src/ai-sdk/llm-chat-activity.ts
|
|
99
|
+
var FINISH_REASON_MAP = {
|
|
100
|
+
stop: "stop",
|
|
101
|
+
length: "length",
|
|
102
|
+
"content-filter": "content_filter",
|
|
103
|
+
"tool-calls": "tool_calls",
|
|
104
|
+
error: "error"
|
|
105
|
+
};
|
|
106
|
+
function mapUsage(usage) {
|
|
107
|
+
return {
|
|
108
|
+
promptTokens: usage?.inputTokens ?? 0,
|
|
109
|
+
completionTokens: usage?.outputTokens ?? 0,
|
|
110
|
+
totalTokens: usage?.totalTokens ?? (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
function createLLMChatActivity(options) {
|
|
114
|
+
return async (request) => {
|
|
115
|
+
const model = resolveModel(options.providers, request.provider, request.model);
|
|
116
|
+
const messages = mapLLMMessages(request.messages, request.systemPrompt);
|
|
117
|
+
const timeout = request.timeout ?? options.defaultTimeout ?? 6e4;
|
|
118
|
+
const abortController = new AbortController();
|
|
119
|
+
const timer = setTimeout(() => abortController.abort(), timeout);
|
|
120
|
+
try {
|
|
121
|
+
if (request.responseFormat === "json" && request.jsonSchema) {
|
|
122
|
+
const result2 = await generateObject({
|
|
123
|
+
model,
|
|
124
|
+
messages,
|
|
125
|
+
schema: jsonSchema(request.jsonSchema),
|
|
126
|
+
temperature: request.temperature,
|
|
127
|
+
maxOutputTokens: request.maxTokens,
|
|
128
|
+
abortSignal: abortController.signal
|
|
129
|
+
});
|
|
130
|
+
return {
|
|
131
|
+
content: JSON.stringify(result2.object),
|
|
132
|
+
parsed: result2.object,
|
|
133
|
+
usage: mapUsage(result2.usage),
|
|
134
|
+
model: request.model,
|
|
135
|
+
finishReason: FINISH_REASON_MAP[result2.finishReason] ?? "stop"
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
const result = await generateText({
|
|
139
|
+
model,
|
|
140
|
+
messages,
|
|
141
|
+
temperature: request.temperature,
|
|
142
|
+
maxOutputTokens: request.maxTokens,
|
|
143
|
+
abortSignal: abortController.signal
|
|
144
|
+
});
|
|
145
|
+
let parsed;
|
|
146
|
+
if (request.responseFormat === "json") {
|
|
147
|
+
try {
|
|
148
|
+
parsed = JSON.parse(result.text);
|
|
149
|
+
} catch {
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
return {
|
|
153
|
+
content: result.text,
|
|
154
|
+
parsed,
|
|
155
|
+
usage: mapUsage(result.usage),
|
|
156
|
+
model: request.model,
|
|
157
|
+
finishReason: FINISH_REASON_MAP[result.finishReason] ?? "stop"
|
|
158
|
+
};
|
|
159
|
+
} finally {
|
|
160
|
+
clearTimeout(timer);
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// src/ai-sdk/agent-loop-turn-activity.ts
|
|
166
|
+
import { generateText as generateText2, streamText } from "ai";
|
|
167
|
+
|
|
168
|
+
// src/ai-sdk/tool-mapper.ts
|
|
169
|
+
import { tool, jsonSchema as jsonSchema2 } from "ai";
|
|
170
|
+
function mapToolDefinitions(tools) {
|
|
171
|
+
const result = {};
|
|
172
|
+
for (const t of tools) {
|
|
173
|
+
result[t.name] = tool({
|
|
174
|
+
description: t.description,
|
|
175
|
+
inputSchema: jsonSchema2(t.inputSchema)
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
return result;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// src/ai-sdk/agent-loop-turn-activity.ts
|
|
182
|
+
function mapStopReason(finishReason) {
|
|
183
|
+
if (finishReason === "tool-calls") return "tool_use";
|
|
184
|
+
if (finishReason === "length") return "max_tokens";
|
|
185
|
+
return "end_turn";
|
|
186
|
+
}
|
|
187
|
+
function extractToolCalls(toolCalls) {
|
|
188
|
+
if (!toolCalls || toolCalls.length === 0) return void 0;
|
|
189
|
+
return toolCalls.map((tc) => ({
|
|
190
|
+
id: tc.toolCallId,
|
|
191
|
+
name: tc.toolName,
|
|
192
|
+
input: tc.input
|
|
193
|
+
}));
|
|
194
|
+
}
|
|
195
|
+
function mapUsage2(usage) {
|
|
196
|
+
return {
|
|
197
|
+
promptTokens: usage?.inputTokens ?? 0,
|
|
198
|
+
completionTokens: usage?.outputTokens ?? 0,
|
|
199
|
+
totalTokens: usage?.totalTokens ?? (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
function createAgentLoopTurnActivity(options) {
|
|
203
|
+
return async (request) => {
|
|
204
|
+
const model = resolveModel(
|
|
205
|
+
options.providers,
|
|
206
|
+
request.provider,
|
|
207
|
+
request.model
|
|
208
|
+
);
|
|
209
|
+
const messages = mapAgentMessages(request.messages, request.systemPrompt);
|
|
210
|
+
const tools = request.tools && request.tools.length > 0 ? mapToolDefinitions(request.tools) : void 0;
|
|
211
|
+
if (request.streamChannelId && options.onStreamToken) {
|
|
212
|
+
return executeWithStreaming(
|
|
213
|
+
model,
|
|
214
|
+
messages,
|
|
215
|
+
tools,
|
|
216
|
+
request,
|
|
217
|
+
options
|
|
218
|
+
);
|
|
219
|
+
}
|
|
220
|
+
const result = await generateText2({
|
|
221
|
+
model,
|
|
222
|
+
messages,
|
|
223
|
+
tools,
|
|
224
|
+
temperature: request.temperature,
|
|
225
|
+
maxOutputTokens: request.maxTokens
|
|
226
|
+
});
|
|
227
|
+
return {
|
|
228
|
+
content: result.text,
|
|
229
|
+
toolCalls: extractToolCalls(result.toolCalls),
|
|
230
|
+
stopReason: mapStopReason(result.finishReason),
|
|
231
|
+
usage: mapUsage2(result.usage)
|
|
232
|
+
};
|
|
233
|
+
};
|
|
234
|
+
}
|
|
235
|
+
async function executeWithStreaming(model, messages, tools, request, options) {
|
|
236
|
+
const channelId = request.streamChannelId;
|
|
237
|
+
const result = streamText({
|
|
238
|
+
model,
|
|
239
|
+
messages,
|
|
240
|
+
tools,
|
|
241
|
+
temperature: request.temperature,
|
|
242
|
+
maxOutputTokens: request.maxTokens
|
|
243
|
+
});
|
|
244
|
+
for await (const chunk of result.textStream) {
|
|
245
|
+
options.onStreamToken(channelId, chunk);
|
|
246
|
+
}
|
|
247
|
+
options.onStreamComplete?.(channelId);
|
|
248
|
+
const finalText = await result.text;
|
|
249
|
+
const finalToolCalls = await result.toolCalls;
|
|
250
|
+
const finalFinishReason = await result.finishReason;
|
|
251
|
+
const finalUsage = await result.usage;
|
|
252
|
+
return {
|
|
253
|
+
content: finalText,
|
|
254
|
+
toolCalls: extractToolCalls(finalToolCalls),
|
|
255
|
+
stopReason: mapStopReason(finalFinishReason),
|
|
256
|
+
usage: mapUsage2(finalUsage)
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// src/ai-sdk/index.ts
|
|
261
|
+
function createAIActivities(options) {
|
|
262
|
+
return {
|
|
263
|
+
llmChat: createLLMChatActivity(options),
|
|
264
|
+
agentLoopTurn: createAgentLoopTurnActivity(options)
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
export {
|
|
268
|
+
createAIActivities
|
|
269
|
+
};
|