@page-agent/llms 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Alibaba
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,41 @@
1
+ # @page-agent/llms
2
+
3
+ LLM client with a **reflection-before-action** mental model for page-agent.
4
+
5
+ ## Why This Package Exists
6
+
7
+ The LLM module and the agent logic are inherently coupled. This package exists not to decouple them, but to **define the interface contract** between the LLM and the agent.
8
+
9
+ The core abstraction is the `MacroToolInput` — a structured output format that **forces the model to reflect before acting**.
10
+
11
+ ## The Reflection-Before-Action Model
12
+
13
+ Every tool call must first output its reasoning state before the actual action:
14
+
15
+ ```typescript
16
+ interface MacroToolInput {
17
+ // Reflection (mandatory before any action)
18
+ evaluation_previous_goal?: string // How well did the previous action work?
19
+ memory?: string // Key information to remember
20
+ next_goal?: string // What to accomplish next
21
+
22
+ // Action (the actual operation)
23
+ action: Record<string, any>
24
+ }
25
+ ```
26
+
27
+ This design ensures that:
28
+
29
+ 1. **The model evaluates its previous action** before deciding the next step
30
+ 2. **Working memory is explicitly maintained** across conversation turns
31
+ 3. **Goals are clearly stated**, making the agent's reasoning transparent and debuggable
32
+
33
+ ## Key Components
34
+
35
+ | Export | Description |
36
+ |--------|-------------|
37
+ | `LLM` | Main LLM client class with retry logic |
38
+ | `MacroToolInput` | The reflection-before-action input schema |
39
+ | `AgentBrain` | Agent's thinking state (eval, memory, goal) |
40
+ | `LLMConfig` | Configuration for LLM connection |
41
+ | `parseLLMConfig` | Parse and apply defaults to config |
@@ -0,0 +1,116 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Agent brain state - the reflection-before-action model
5
+ *
6
+ * Every tool call must first reflect on:
7
+ * - evaluation_previous_goal: How well did the previous action achieve its goal?
8
+ * - memory: Key information to remember for future steps
9
+ * - next_goal: What should be accomplished in the next action?
10
+ */
11
+ export declare interface AgentBrain {
12
+ evaluation_previous_goal: string;
13
+ memory: string;
14
+ next_goal: string;
15
+ }
16
+
17
+ /**
18
+ * Invoke result (strict typing, supports generics)
19
+ */
20
+ export declare interface InvokeResult<TResult = unknown> {
21
+ toolCall: {
22
+ name: string;
23
+ args: any;
24
+ };
25
+ toolResult: TResult;
26
+ usage: {
27
+ promptTokens: number;
28
+ completionTokens: number;
29
+ totalTokens: number;
30
+ cachedTokens?: number;
31
+ reasoningTokens?: number;
32
+ };
33
+ rawResponse?: unknown;
34
+ }
35
+
36
+ export declare class LLM extends EventTarget {
37
+ config: Required<LLMConfig>;
38
+ client: LLMClient;
39
+ constructor(config: LLMConfig);
40
+ /**
41
+ * - call llm api *once*
42
+ * - invoke tool call *once*
43
+ * - return the result of the tool
44
+ */
45
+ invoke(messages: Message[], tools: Record<string, Tool>, abortSignal: AbortSignal): Promise<InvokeResult>;
46
+ }
47
+
48
+ /**
49
+ * LLM Client interface
50
+ * Note: Does not use generics because each tool in the tools array has different types
51
+ */
52
+ export declare interface LLMClient {
53
+ invoke(messages: Message[], tools: Record<string, Tool>, abortSignal?: AbortSignal): Promise<InvokeResult>;
54
+ }
55
+
56
+ /**
57
+ * LLM configuration for PageAgent
58
+ */
59
+ export declare interface LLMConfig {
60
+ baseURL?: string;
61
+ apiKey?: string;
62
+ model?: string;
63
+ temperature?: number;
64
+ maxTokens?: number;
65
+ maxRetries?: number;
66
+ }
67
+
68
+ /**
69
+ * MacroTool input structure
70
+ *
71
+ * This is the core abstraction that enforces the "reflection-before-action" mental model.
72
+ * Before executing any action, the LLM must output its reasoning state.
73
+ */
74
+ export declare interface MacroToolInput extends AgentBrain {
75
+ action: Record<string, any>;
76
+ }
77
+
78
+ /**
79
+ * MacroTool output structure
80
+ */
81
+ export declare interface MacroToolResult {
82
+ input: MacroToolInput;
83
+ output: string;
84
+ }
85
+
86
+ /**
87
+ * Message format - OpenAI standard (industry standard)
88
+ */
89
+ export declare interface Message {
90
+ role: 'system' | 'user' | 'assistant' | 'tool';
91
+ content?: string | null;
92
+ tool_calls?: {
93
+ id: string;
94
+ type: 'function';
95
+ function: {
96
+ name: string;
97
+ arguments: string;
98
+ };
99
+ }[];
100
+ tool_call_id?: string;
101
+ name?: string;
102
+ }
103
+
104
+ export declare function parseLLMConfig(config: LLMConfig): Required<LLMConfig>;
105
+
106
+ /**
107
+ * Tool definition - uses Zod schema (LLM-agnostic)
108
+ * Supports generics for type-safe parameters and return values
109
+ */
110
+ export declare interface Tool<TParams = any, TResult = any> {
111
+ description?: string;
112
+ inputSchema: z.ZodType<TParams>;
113
+ execute: (args: TParams) => Promise<TResult>;
114
+ }
115
+
116
+ export { }
@@ -0,0 +1,387 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
3
+ import chalk from "chalk";
4
+ import { z } from "zod";
5
+ const DEFAULT_MODEL_NAME = "PAGE-AGENT-FREE-TESTING-RANDOM";
6
+ const DEFAULT_API_KEY = "PAGE-AGENT-FREE-TESTING-RANDOM";
7
+ const DEFAULT_BASE_URL = "https://hwcxiuzfylggtcktqgij.supabase.co/functions/v1/llm-testing-proxy";
8
+ const LLM_MAX_RETRIES = 2;
9
+ const DEFAULT_TEMPERATURE = 0.7;
10
+ const DEFAULT_MAX_TOKENS = 4096;
11
+ const InvokeErrorType = {
12
+ // Retryable
13
+ NETWORK_ERROR: "network_error",
14
+ // Network error, retry
15
+ RATE_LIMIT: "rate_limit",
16
+ // Rate limit, retry
17
+ SERVER_ERROR: "server_error",
18
+ // 5xx, retry
19
+ NO_TOOL_CALL: "no_tool_call",
20
+ // Model did not call tool
21
+ INVALID_TOOL_ARGS: "invalid_tool_args",
22
+ // Tool args don't match schema
23
+ TOOL_EXECUTION_ERROR: "tool_execution_error",
24
+ // Tool execution error
25
+ UNKNOWN: "unknown",
26
+ // Non-retryable
27
+ AUTH_ERROR: "auth_error",
28
+ // Authentication failed
29
+ CONTEXT_LENGTH: "context_length",
30
+ // Prompt too long
31
+ CONTENT_FILTER: "content_filter"
32
+ // Content filtered
33
+ };
34
+ const _InvokeError = class _InvokeError extends Error {
35
+ type;
36
+ retryable;
37
+ statusCode;
38
+ rawError;
39
+ constructor(type, message, rawError) {
40
+ super(message);
41
+ this.name = "InvokeError";
42
+ this.type = type;
43
+ this.retryable = this.isRetryable(type);
44
+ this.rawError = rawError;
45
+ }
46
+ isRetryable(type) {
47
+ const retryableTypes = [
48
+ InvokeErrorType.NETWORK_ERROR,
49
+ InvokeErrorType.RATE_LIMIT,
50
+ InvokeErrorType.SERVER_ERROR,
51
+ InvokeErrorType.NO_TOOL_CALL,
52
+ InvokeErrorType.INVALID_TOOL_ARGS,
53
+ InvokeErrorType.TOOL_EXECUTION_ERROR,
54
+ InvokeErrorType.UNKNOWN
55
+ ];
56
+ return retryableTypes.includes(type);
57
+ }
58
+ };
59
+ __name(_InvokeError, "InvokeError");
60
+ let InvokeError = _InvokeError;
61
+ function zodToOpenAITool(name, tool) {
62
+ return {
63
+ type: "function",
64
+ function: {
65
+ name,
66
+ description: tool.description,
67
+ parameters: z.toJSONSchema(tool.inputSchema, { target: "openapi-3.0" })
68
+ }
69
+ };
70
+ }
71
+ __name(zodToOpenAITool, "zodToOpenAITool");
72
+ function lenientParseMacroToolCall(responseData, inputSchema) {
73
+ const choice = responseData.choices?.[0];
74
+ if (!choice) {
75
+ throw new InvokeError(InvokeErrorType.UNKNOWN, "No choices in response", responseData);
76
+ }
77
+ switch (choice.finish_reason) {
78
+ case "tool_calls":
79
+ case "function_call":
80
+ // gemini
81
+ case "stop":
82
+ break;
83
+ case "length":
84
+ throw new InvokeError(
85
+ InvokeErrorType.CONTEXT_LENGTH,
86
+ "Response truncated: max tokens reached"
87
+ );
88
+ case "content_filter":
89
+ throw new InvokeError(InvokeErrorType.CONTENT_FILTER, "Content filtered by safety system");
90
+ default:
91
+ throw new InvokeError(
92
+ InvokeErrorType.UNKNOWN,
93
+ `Unexpected finish_reason: ${choice.finish_reason}`
94
+ );
95
+ }
96
+ const actionSchema = inputSchema.shape.action;
97
+ if (!actionSchema) {
98
+ throw new Error('inputSchema must have an "action" field');
99
+ }
100
+ let arg = null;
101
+ const toolCall = choice.message?.tool_calls?.[0]?.function;
102
+ arg = toolCall?.arguments ?? null;
103
+ if (arg && toolCall.name !== "AgentOutput") {
104
+ console.log(chalk.yellow("lenientParseMacroToolCall: #1 fixing incorrect tool call"));
105
+ let tmpArg;
106
+ try {
107
+ tmpArg = JSON.parse(arg);
108
+ } catch (error) {
109
+ throw new InvokeError(
110
+ InvokeErrorType.INVALID_TOOL_ARGS,
111
+ "Failed to parse tool arguments as JSON",
112
+ error
113
+ );
114
+ }
115
+ arg = JSON.stringify({ action: { [toolCall.name]: tmpArg } });
116
+ }
117
+ if (!arg) {
118
+ arg = choice.message?.content.trim() || null;
119
+ }
120
+ if (!arg) {
121
+ throw new InvokeError(
122
+ InvokeErrorType.NO_TOOL_CALL,
123
+ "No tool call or content found in response",
124
+ responseData
125
+ );
126
+ }
127
+ let parsedArgs;
128
+ try {
129
+ parsedArgs = JSON.parse(arg);
130
+ } catch (error) {
131
+ throw new InvokeError(
132
+ InvokeErrorType.INVALID_TOOL_ARGS,
133
+ "Failed to parse tool arguments as JSON",
134
+ error
135
+ );
136
+ }
137
+ if (parsedArgs.action || parsedArgs.evaluation_previous_goal || parsedArgs.next_goal) {
138
+ if (!parsedArgs.action) {
139
+ console.log(chalk.yellow("lenientParseMacroToolCall: #2 fixing incorrect tool call"));
140
+ parsedArgs.action = {
141
+ wait: { seconds: 1 }
142
+ };
143
+ }
144
+ } else if (parsedArgs.type && parsedArgs.function) {
145
+ if (parsedArgs.function.name !== "AgentOutput")
146
+ throw new InvokeError(
147
+ InvokeErrorType.INVALID_TOOL_ARGS,
148
+ `Expected function name "AgentOutput", got "${parsedArgs.function.name}"`,
149
+ null
150
+ );
151
+ console.log(chalk.yellow("lenientParseMacroToolCall: #3 fixing incorrect tool call"));
152
+ parsedArgs = parsedArgs.function.arguments;
153
+ } else if (parsedArgs.name && parsedArgs.arguments) {
154
+ if (parsedArgs.name !== "AgentOutput")
155
+ throw new InvokeError(
156
+ InvokeErrorType.INVALID_TOOL_ARGS,
157
+ `Expected function name "AgentOutput", got "${parsedArgs.name}"`,
158
+ null
159
+ );
160
+ console.log(chalk.yellow("lenientParseMacroToolCall: #4 fixing incorrect tool call"));
161
+ parsedArgs = parsedArgs.arguments;
162
+ } else {
163
+ console.log(chalk.yellow("lenientParseMacroToolCall: #5 fixing incorrect tool call"));
164
+ parsedArgs = { action: parsedArgs };
165
+ }
166
+ if (typeof parsedArgs === "string") {
167
+ console.log(chalk.yellow("lenientParseMacroToolCall: #6 fixing incorrect tool call"));
168
+ try {
169
+ parsedArgs = JSON.parse(parsedArgs);
170
+ } catch (error) {
171
+ throw new InvokeError(
172
+ InvokeErrorType.INVALID_TOOL_ARGS,
173
+ "Failed to parse nested tool arguments as JSON",
174
+ error
175
+ );
176
+ }
177
+ }
178
+ const validation = inputSchema.safeParse(parsedArgs);
179
+ if (validation.success) {
180
+ return validation.data;
181
+ } else {
182
+ const action = parsedArgs.action ?? {};
183
+ const actionName = Object.keys(action)[0] || "unknown";
184
+ const actionArgs = JSON.stringify(action[actionName] || "unknown");
185
+ throw new InvokeError(
186
+ InvokeErrorType.INVALID_TOOL_ARGS,
187
+ `Tool arguments validation failed: action "${actionName}" with args ${actionArgs}`,
188
+ validation.error
189
+ );
190
+ }
191
+ }
192
+ __name(lenientParseMacroToolCall, "lenientParseMacroToolCall");
193
+ function modelPatch(body) {
194
+ const model = body.model || "";
195
+ if (model.toLowerCase().startsWith("claude")) {
196
+ body.tool_choice = { type: "tool", name: "AgentOutput" };
197
+ body.thinking = { type: "disabled" };
198
+ }
199
+ if (model.toLowerCase().includes("grok")) {
200
+ console.log("Applying Grok patch: removing tool_choice");
201
+ delete body.tool_choice;
202
+ console.log("Applying Grok patch: disable reasoning and thinking");
203
+ body.thinking = { type: "disabled", effort: "minimal" };
204
+ body.reasoning = { enabled: false, effort: "low" };
205
+ }
206
+ return body;
207
+ }
208
+ __name(modelPatch, "modelPatch");
209
+ const _OpenAIClient = class _OpenAIClient {
210
+ config;
211
+ constructor(config) {
212
+ this.config = config;
213
+ }
214
+ async invoke(messages, tools, abortSignal) {
215
+ const openaiTools = Object.entries(tools).map(([name, tool2]) => zodToOpenAITool(name, tool2));
216
+ let response;
217
+ try {
218
+ response = await fetch(`${this.config.baseURL}/chat/completions`, {
219
+ method: "POST",
220
+ headers: {
221
+ "Content-Type": "application/json",
222
+ Authorization: `Bearer ${this.config.apiKey}`
223
+ },
224
+ body: JSON.stringify(
225
+ modelPatch({
226
+ model: this.config.model,
227
+ temperature: this.config.temperature,
228
+ max_tokens: this.config.maxTokens,
229
+ messages,
230
+ tools: openaiTools,
231
+ // tool_choice: 'required',
232
+ tool_choice: { type: "function", function: { name: "AgentOutput" } },
233
+ // model specific params
234
+ // reasoning_effort: 'minimal',
235
+ // verbosity: 'low',
236
+ parallel_tool_calls: false
237
+ })
238
+ ),
239
+ signal: abortSignal
240
+ });
241
+ } catch (error) {
242
+ throw new InvokeError(InvokeErrorType.NETWORK_ERROR, "Network request failed", error);
243
+ }
244
+ if (!response.ok) {
245
+ const errorData = await response.json().catch();
246
+ const errorMessage = errorData.error?.message || response.statusText;
247
+ if (response.status === 401 || response.status === 403) {
248
+ throw new InvokeError(
249
+ InvokeErrorType.AUTH_ERROR,
250
+ `Authentication failed: ${errorMessage}`,
251
+ errorData
252
+ );
253
+ }
254
+ if (response.status === 429) {
255
+ throw new InvokeError(
256
+ InvokeErrorType.RATE_LIMIT,
257
+ `Rate limit exceeded: ${errorMessage}`,
258
+ errorData
259
+ );
260
+ }
261
+ if (response.status >= 500) {
262
+ throw new InvokeError(
263
+ InvokeErrorType.SERVER_ERROR,
264
+ `Server error: ${errorMessage}`,
265
+ errorData
266
+ );
267
+ }
268
+ throw new InvokeError(
269
+ InvokeErrorType.UNKNOWN,
270
+ `HTTP ${response.status}: ${errorMessage}`,
271
+ errorData
272
+ );
273
+ }
274
+ const data = await response.json();
275
+ const tool = tools.AgentOutput;
276
+ const macroToolInput = lenientParseMacroToolCall(data, tool.inputSchema);
277
+ let toolResult;
278
+ try {
279
+ toolResult = await tool.execute(macroToolInput);
280
+ } catch (e) {
281
+ throw new InvokeError(
282
+ InvokeErrorType.TOOL_EXECUTION_ERROR,
283
+ `Tool execution failed: ${e.message}`,
284
+ e
285
+ );
286
+ }
287
+ return {
288
+ toolCall: {
289
+ // id: toolCall.id,
290
+ name: "AgentOutput",
291
+ args: macroToolInput
292
+ },
293
+ toolResult,
294
+ usage: {
295
+ promptTokens: data.usage?.prompt_tokens ?? 0,
296
+ completionTokens: data.usage?.completion_tokens ?? 0,
297
+ totalTokens: data.usage?.total_tokens ?? 0,
298
+ cachedTokens: data.usage?.prompt_tokens_details?.cached_tokens,
299
+ reasoningTokens: data.usage?.completion_tokens_details?.reasoning_tokens
300
+ },
301
+ rawResponse: data
302
+ };
303
+ }
304
+ };
305
+ __name(_OpenAIClient, "OpenAIClient");
306
+ let OpenAIClient = _OpenAIClient;
307
+ function parseLLMConfig(config) {
308
+ return {
309
+ baseURL: config.baseURL ?? DEFAULT_BASE_URL,
310
+ apiKey: config.apiKey ?? DEFAULT_API_KEY,
311
+ model: config.model ?? DEFAULT_MODEL_NAME,
312
+ temperature: config.temperature ?? DEFAULT_TEMPERATURE,
313
+ maxTokens: config.maxTokens ?? DEFAULT_MAX_TOKENS,
314
+ maxRetries: config.maxRetries ?? LLM_MAX_RETRIES
315
+ };
316
+ }
317
+ __name(parseLLMConfig, "parseLLMConfig");
318
+ const _LLM = class _LLM extends EventTarget {
319
+ config;
320
+ client;
321
+ constructor(config) {
322
+ super();
323
+ this.config = parseLLMConfig(config);
324
+ this.client = new OpenAIClient({
325
+ model: this.config.model,
326
+ apiKey: this.config.apiKey,
327
+ baseURL: this.config.baseURL,
328
+ temperature: this.config.temperature,
329
+ maxTokens: this.config.maxTokens
330
+ });
331
+ }
332
+ /**
333
+ * - call llm api *once*
334
+ * - invoke tool call *once*
335
+ * - return the result of the tool
336
+ */
337
+ async invoke(messages, tools, abortSignal) {
338
+ return await withRetry(
339
+ async () => {
340
+ const result = await this.client.invoke(messages, tools, abortSignal);
341
+ return result;
342
+ },
343
+ // retry settings
344
+ {
345
+ maxRetries: this.config.maxRetries,
346
+ onRetry: /* @__PURE__ */ __name((current) => {
347
+ this.dispatchEvent(
348
+ new CustomEvent("retry", { detail: { current, max: this.config.maxRetries } })
349
+ );
350
+ }, "onRetry"),
351
+ onError: /* @__PURE__ */ __name((error) => {
352
+ this.dispatchEvent(new CustomEvent("error", { detail: { error } }));
353
+ }, "onError")
354
+ }
355
+ );
356
+ }
357
+ };
358
+ __name(_LLM, "LLM");
359
+ let LLM = _LLM;
360
+ async function withRetry(fn, settings) {
361
+ let retries = 0;
362
+ let lastError = null;
363
+ while (retries <= settings.maxRetries) {
364
+ if (retries > 0) {
365
+ settings.onRetry(retries);
366
+ await new Promise((resolve) => setTimeout(resolve, 100));
367
+ }
368
+ try {
369
+ return await fn();
370
+ } catch (error) {
371
+ console.error(error);
372
+ settings.onError(error);
373
+ if (error?.name === "AbortError") throw error;
374
+ if (error instanceof InvokeError && !error.retryable) throw error;
375
+ lastError = error;
376
+ retries++;
377
+ await new Promise((resolve) => setTimeout(resolve, 100));
378
+ }
379
+ }
380
+ throw lastError;
381
+ }
382
+ __name(withRetry, "withRetry");
383
+ export {
384
+ LLM,
385
+ parseLLMConfig
386
+ };
387
+ //# sourceMappingURL=page-agent-llms.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"page-agent-llms.js","sources":["../../src/constants.ts","../../src/errors.ts","../../src/utils.ts","../../src/OpenAILenientClient.ts","../../src/index.ts"],"sourcesContent":["// Dev environment: use .env config if available, otherwise fallback to testing api\nexport const DEFAULT_MODEL_NAME: string =\n\timport.meta.env.DEV && import.meta.env.LLM_MODEL_NAME\n\t\t? import.meta.env.LLM_MODEL_NAME\n\t\t: 'PAGE-AGENT-FREE-TESTING-RANDOM'\n\nexport const DEFAULT_API_KEY: string =\n\timport.meta.env.DEV && import.meta.env.LLM_API_KEY\n\t\t? import.meta.env.LLM_API_KEY\n\t\t: 'PAGE-AGENT-FREE-TESTING-RANDOM'\n\nexport const DEFAULT_BASE_URL: string =\n\timport.meta.env.DEV && import.meta.env.LLM_BASE_URL\n\t\t? import.meta.env.LLM_BASE_URL\n\t\t: 'https://hwcxiuzfylggtcktqgij.supabase.co/functions/v1/llm-testing-proxy'\n\n// internal\n\nexport const LLM_MAX_RETRIES = 2\nexport const DEFAULT_TEMPERATURE = 0.7 // higher randomness helps auto-recovery\nexport const DEFAULT_MAX_TOKENS = 4096\n","/**\n * Error types and error handling for LLM invocations\n */\n\nexport const InvokeErrorType = {\n\t// Retryable\n\tNETWORK_ERROR: 'network_error', // Network error, retry\n\tRATE_LIMIT: 'rate_limit', // Rate limit, retry\n\tSERVER_ERROR: 'server_error', // 5xx, retry\n\tNO_TOOL_CALL: 'no_tool_call', // Model did not call tool\n\tINVALID_TOOL_ARGS: 'invalid_tool_args', // Tool args don't match schema\n\tTOOL_EXECUTION_ERROR: 'tool_execution_error', // Tool execution error\n\n\tUNKNOWN: 'unknown',\n\n\t// Non-retryable\n\tAUTH_ERROR: 'auth_error', // Authentication failed\n\tCONTEXT_LENGTH: 'context_length', // Prompt too long\n\tCONTENT_FILTER: 'content_filter', // Content filtered\n} as const\n\nexport type InvokeErrorType = (typeof InvokeErrorType)[keyof typeof InvokeErrorType]\n\nexport class InvokeError extends Error {\n\ttype: InvokeErrorType\n\tretryable: boolean\n\tstatusCode?: number\n\trawError?: unknown\n\n\tconstructor(type: InvokeErrorType, message: string, rawError?: unknown) {\n\t\tsuper(message)\n\t\tthis.name = 'InvokeError'\n\t\tthis.type = type\n\t\tthis.retryable = this.isRetryable(type)\n\t\tthis.rawError = rawError\n\t}\n\n\tprivate isRetryable(type: InvokeErrorType): boolean {\n\t\tconst retryableTypes: InvokeErrorType[] = [\n\t\t\tInvokeErrorType.NETWORK_ERROR,\n\t\t\tInvokeErrorType.RATE_LIMIT,\n\t\t\tInvokeErrorType.SERVER_ERROR,\n\t\t\tInvokeErrorType.NO_TOOL_CALL,\n\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\tInvokeErrorType.TOOL_EXECUTION_ERROR,\n\t\t\tInvokeErrorType.UNKNOWN,\n\t\t]\n\t\treturn retryableTypes.includes(type)\n\t}\n}\n","/**\n * Utility functions for LLM integration\n */\nimport chalk from 'chalk'\nimport { z } from 'zod'\n\nimport { InvokeError, InvokeErrorType } from './errors'\nimport type { MacroToolInput, Tool } from './types'\n\n/**\n * Convert Zod schema to OpenAI tool format\n * Uses Zod 4 native z.toJSONSchema()\n */\nexport function zodToOpenAITool(name: string, tool: Tool) {\n\treturn {\n\t\ttype: 'function' as const,\n\t\tfunction: {\n\t\t\tname,\n\t\t\tdescription: tool.description,\n\t\t\tparameters: z.toJSONSchema(tool.inputSchema, { target: 'openapi-3.0' }),\n\t\t},\n\t}\n}\n\n/**\n * Although some models cannot guarantee correct response. Common issues are fixable:\n * - Instead of returning a proper tool call. Return the tool call parameters in the message content.\n * - Returned tool calls or messages don't follow the nested MacroToolInput format.\n */\nexport function lenientParseMacroToolCall(\n\tresponseData: any,\n\tinputSchema: z.ZodObject<MacroToolInput & Record<string, any>>\n): MacroToolInput {\n\t// check\n\tconst choice = responseData.choices?.[0]\n\tif (!choice) {\n\t\tthrow new InvokeError(InvokeErrorType.UNKNOWN, 'No choices in response', responseData)\n\t}\n\n\t// check\n\tswitch (choice.finish_reason) {\n\t\tcase 'tool_calls':\n\t\tcase 'function_call': // gemini\n\t\tcase 'stop': // will try a robust parse\n\t\t\t// ✅ Normal\n\t\t\tbreak\n\t\tcase 'length':\n\t\t\t// ⚠️ Token limit reached\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.CONTEXT_LENGTH,\n\t\t\t\t'Response truncated: max tokens reached'\n\t\t\t)\n\t\tcase 'content_filter':\n\t\t\t// ❌ Content filtered\n\t\t\tthrow new InvokeError(InvokeErrorType.CONTENT_FILTER, 'Content filtered by safety system')\n\t\tdefault:\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.UNKNOWN,\n\t\t\t\t`Unexpected finish_reason: ${choice.finish_reason}`\n\t\t\t)\n\t}\n\n\t// Extract action schema from MacroToolInput schema\n\tconst actionSchema = inputSchema.shape.action\n\tif (!actionSchema) {\n\t\tthrow new Error('inputSchema must have an \"action\" field')\n\t}\n\n\t// patch stopReason mis-format\n\n\tlet arg: string | null = null\n\n\t// try to use tool call\n\tconst toolCall = choice.message?.tool_calls?.[0]?.function\n\targ = toolCall?.arguments ?? null\n\n\tif (arg && toolCall.name !== 'AgentOutput') {\n\t\t// TODO: check if toolCall.name is a valid action name\n\t\t// case: instead of AgentOutput, the model returned a action name as tool call\n\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #1 fixing incorrect tool call'))\n\t\tlet tmpArg\n\t\ttry {\n\t\t\ttmpArg = JSON.parse(arg)\n\t\t} catch (error) {\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t\t'Failed to parse tool arguments as JSON',\n\t\t\t\terror\n\t\t\t)\n\t\t}\n\t\targ = JSON.stringify({ action: { [toolCall.name]: tmpArg } })\n\t}\n\n\tif (!arg) {\n\t\t// try to use message content as JSON\n\t\targ = choice.message?.content.trim() || null\n\t}\n\n\tif (!arg) {\n\t\tthrow new InvokeError(\n\t\t\tInvokeErrorType.NO_TOOL_CALL,\n\t\t\t'No tool call or content found in response',\n\t\t\tresponseData\n\t\t)\n\t}\n\n\t// make sure is valid JSON\n\n\tlet parsedArgs: any\n\ttry {\n\t\tparsedArgs = JSON.parse(arg)\n\t} catch (error) {\n\t\tthrow new InvokeError(\n\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t'Failed to parse tool arguments as JSON',\n\t\t\terror\n\t\t)\n\t}\n\n\t// patch incomplete formats\n\n\tif (parsedArgs.action || parsedArgs.evaluation_previous_goal || parsedArgs.next_goal) {\n\t\t// case: nested MacroToolInput format (correct format)\n\n\t\t// some models may give a empty action (they may think reasoning and action should be separate)\n\t\tif (!parsedArgs.action) {\n\t\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #2 fixing incorrect tool call'))\n\t\t\tparsedArgs.action = {\n\t\t\t\twait: { seconds: 1 },\n\t\t\t}\n\t\t}\n\t} else if (parsedArgs.type && parsedArgs.function) {\n\t\t// case: upper level function call format provided. only keep its arguments\n\t\t// TODO: check if function name is a valid action name\n\t\tif (parsedArgs.function.name !== 'AgentOutput')\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t\t`Expected function name \"AgentOutput\", got \"${parsedArgs.function.name}\"`,\n\t\t\t\tnull\n\t\t\t)\n\n\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #3 fixing incorrect tool call'))\n\t\tparsedArgs = parsedArgs.function.arguments\n\t} else if (parsedArgs.name && parsedArgs.arguments) {\n\t\t// case: upper level function call format provided. only keep its arguments\n\t\t// TODO: check if function name is a valid action name\n\t\tif (parsedArgs.name !== 'AgentOutput')\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t\t`Expected function name \"AgentOutput\", got \"${parsedArgs.name}\"`,\n\t\t\t\tnull\n\t\t\t)\n\n\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #4 fixing incorrect tool call'))\n\t\tparsedArgs = parsedArgs.arguments\n\t} else {\n\t\t// case: only action parameters provided, wrap into MacroToolInput\n\t\t// TODO: check if action name is valid\n\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #5 fixing incorrect tool call'))\n\t\tparsedArgs = { action: parsedArgs } as MacroToolInput\n\t}\n\n\t// make sure it's not wrapped as string\n\tif (typeof parsedArgs === 'string') {\n\t\tconsole.log(chalk.yellow('lenientParseMacroToolCall: #6 fixing incorrect tool call'))\n\t\ttry {\n\t\t\tparsedArgs = JSON.parse(parsedArgs)\n\t\t} catch (error) {\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t\t'Failed to parse nested tool arguments as JSON',\n\t\t\t\terror\n\t\t\t)\n\t\t}\n\t}\n\n\tconst validation = inputSchema.safeParse(parsedArgs)\n\tif (validation.success) {\n\t\treturn validation.data as unknown as MacroToolInput\n\t} else {\n\t\tconst action = parsedArgs.action ?? {}\n\t\tconst actionName = Object.keys(action)[0] || 'unknown'\n\t\tconst actionArgs = JSON.stringify(action[actionName] || 'unknown')\n\n\t\t// TODO: check if action name is valid. give a readable error message\n\n\t\tthrow new InvokeError(\n\t\t\tInvokeErrorType.INVALID_TOOL_ARGS,\n\t\t\t`Tool arguments validation failed: action \"${actionName}\" with args ${actionArgs}`,\n\t\t\tvalidation.error\n\t\t)\n\t}\n}\n\nexport function modelPatch(body: Record<string, any>) {\n\tconst model: string = body.model || ''\n\n\tif (model.toLowerCase().startsWith('claude')) {\n\t\tbody.tool_choice = { type: 'tool', name: 'AgentOutput' }\n\t\tbody.thinking = { type: 'disabled' }\n\t\t// body.reasoning = { enabled: 'disabled' }\n\t}\n\n\tif (model.toLowerCase().includes('grok')) {\n\t\tconsole.log('Applying Grok patch: removing tool_choice')\n\t\tdelete body.tool_choice\n\t\tconsole.log('Applying Grok patch: disable reasoning and thinking')\n\t\tbody.thinking = { type: 'disabled', effort: 'minimal' }\n\t\tbody.reasoning = { enabled: false, effort: 'low' }\n\t}\n\n\treturn body\n}\n","/**\n * OpenAI Client implementation\n */\nimport { InvokeError, InvokeErrorType } from './errors'\nimport type {\n\tInvokeResult,\n\tLLMClient,\n\tMacroToolInput,\n\tMessage,\n\tOpenAIClientConfig,\n\tTool,\n} from './types'\nimport { lenientParseMacroToolCall, modelPatch, zodToOpenAITool } from './utils'\n\nexport class OpenAIClient implements LLMClient {\n\tconfig: OpenAIClientConfig\n\n\tconstructor(config: OpenAIClientConfig) {\n\t\tthis.config = config\n\t}\n\n\tasync invoke(\n\t\tmessages: Message[],\n\t\ttools: { AgentOutput: Tool<MacroToolInput> },\n\t\tabortSignal?: AbortSignal\n\t): Promise<InvokeResult> {\n\t\t// 1. Convert tools to OpenAI format\n\t\tconst openaiTools = Object.entries(tools).map(([name, tool]) => zodToOpenAITool(name, tool))\n\n\t\t// 2. Call API\n\t\tlet response: Response\n\t\ttry {\n\t\t\tresponse = await fetch(`${this.config.baseURL}/chat/completions`, {\n\t\t\t\tmethod: 'POST',\n\t\t\t\theaders: {\n\t\t\t\t\t'Content-Type': 'application/json',\n\t\t\t\t\tAuthorization: `Bearer ${this.config.apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: JSON.stringify(\n\t\t\t\t\tmodelPatch({\n\t\t\t\t\t\tmodel: this.config.model,\n\t\t\t\t\t\ttemperature: this.config.temperature,\n\t\t\t\t\t\tmax_tokens: this.config.maxTokens,\n\t\t\t\t\t\tmessages,\n\n\t\t\t\t\t\ttools: openaiTools,\n\t\t\t\t\t\t// tool_choice: 'required',\n\t\t\t\t\t\ttool_choice: { type: 'function', function: { name: 'AgentOutput' } },\n\n\t\t\t\t\t\t// model specific params\n\n\t\t\t\t\t\t// reasoning_effort: 'minimal',\n\t\t\t\t\t\t// verbosity: 'low',\n\t\t\t\t\t\tparallel_tool_calls: false,\n\t\t\t\t\t})\n\t\t\t\t),\n\t\t\t\tsignal: abortSignal,\n\t\t\t})\n\t\t} catch (error: unknown) {\n\t\t\t// Network error\n\t\t\tthrow new InvokeError(InvokeErrorType.NETWORK_ERROR, 'Network request failed', error)\n\t\t}\n\n\t\t// 3. Handle HTTP errors\n\t\tif (!response.ok) {\n\t\t\tconst errorData = await response.json().catch()\n\t\t\tconst errorMessage =\n\t\t\t\t(errorData as { error?: { message?: string } }).error?.message || response.statusText\n\n\t\t\tif (response.status === 401 || response.status === 403) {\n\t\t\t\tthrow new InvokeError(\n\t\t\t\t\tInvokeErrorType.AUTH_ERROR,\n\t\t\t\t\t`Authentication failed: ${errorMessage}`,\n\t\t\t\t\terrorData\n\t\t\t\t)\n\t\t\t}\n\t\t\tif (response.status === 429) {\n\t\t\t\tthrow new InvokeError(\n\t\t\t\t\tInvokeErrorType.RATE_LIMIT,\n\t\t\t\t\t`Rate limit exceeded: ${errorMessage}`,\n\t\t\t\t\terrorData\n\t\t\t\t)\n\t\t\t}\n\t\t\tif (response.status >= 500) {\n\t\t\t\tthrow new InvokeError(\n\t\t\t\t\tInvokeErrorType.SERVER_ERROR,\n\t\t\t\t\t`Server error: ${errorMessage}`,\n\t\t\t\t\terrorData\n\t\t\t\t)\n\t\t\t}\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.UNKNOWN,\n\t\t\t\t`HTTP ${response.status}: ${errorMessage}`,\n\t\t\t\terrorData\n\t\t\t)\n\t\t}\n\n\t\t// parse response\n\n\t\tconst data = await response.json()\n\t\tconst tool = tools.AgentOutput\n\t\tconst macroToolInput = lenientParseMacroToolCall(data, tool.inputSchema as any)\n\n\t\t// Execute tool\n\t\tlet toolResult: unknown\n\t\ttry {\n\t\t\ttoolResult = await tool.execute(macroToolInput)\n\t\t} catch (e) {\n\t\t\tthrow new InvokeError(\n\t\t\t\tInvokeErrorType.TOOL_EXECUTION_ERROR,\n\t\t\t\t`Tool execution failed: ${(e as Error).message}`,\n\t\t\t\te\n\t\t\t)\n\t\t}\n\n\t\t// Return result (including cache tokens)\n\t\treturn {\n\t\t\ttoolCall: {\n\t\t\t\t// id: toolCall.id,\n\t\t\t\tname: 'AgentOutput',\n\t\t\t\targs: macroToolInput,\n\t\t\t},\n\t\t\ttoolResult,\n\t\t\tusage: {\n\t\t\t\tpromptTokens: data.usage?.prompt_tokens ?? 0,\n\t\t\t\tcompletionTokens: data.usage?.completion_tokens ?? 0,\n\t\t\t\ttotalTokens: data.usage?.total_tokens ?? 0,\n\t\t\t\tcachedTokens: data.usage?.prompt_tokens_details?.cached_tokens,\n\t\t\t\treasoningTokens: data.usage?.completion_tokens_details?.reasoning_tokens,\n\t\t\t},\n\t\t\trawResponse: data,\n\t\t}\n\t}\n}\n","/**\n * @topic LLM 与主流程的隔离\n * @reasoning\n * 将 llm 的调用和主流程分开是复杂的,\n * 因为 agent 的 tool call 通常集成在 llm 模块中,而而先得到 llm 返回,然后处理工具调用\n * tools 和 llm 调用的逻辑不可避免地耦合在一起,tool 的执行又和主流程耦合在一起\n * 而 history 的维护和更新逻辑,又必须嵌入多轮 tool call 中\n * @reasoning\n * - 放弃框架提供的自动的多轮调用,每轮调用都由主流程发起\n * - 理想情况下,llm 调用应该获得 structured output,然后由额外的模块触发 tool call,目前模型和框架都无法实现\n * - 当前只能将 llm api 和 本地 tool call 耦合在一起,不关心其中的衔接方式\n * @conclusion\n * - @llm responsibility boundary:\n * - call llm api with given messages and tools\n * - invoke tool call and get the result of the tool\n * - return the result to main loop\n * - @main_loop responsibility boundary:\n * - maintain all behaviors of an **agent**\n * @conclusion\n * - 这里的 llm 模块不是 agent,只负责一轮 llm 调用和工具调用,无状态\n */\n/**\n * @topic 结构化输出\n * @facts\n * - 几乎所有模型都支持 tool call schema\n * - 几乎所有模型都支持返回 json\n * - 只有 openAI/grok/gemini 支持 schema 并保证格式\n * - 主流模型都支持 tool_choice: required\n * - 除了 qwen 必须指定一个函数名 (9月上新后支持)\n * @conclusion\n * - 永远使用 tool call 来返回结构化数据,禁止模型直接返回(视为出错)\n * - 不能假设 tool 参数合法,必须有修复机制,而且修复也应该使用 tool call 返回\n */\nimport {\n\tDEFAULT_API_KEY,\n\tDEFAULT_BASE_URL,\n\tDEFAULT_MAX_TOKENS,\n\tDEFAULT_MODEL_NAME,\n\tDEFAULT_TEMPERATURE,\n\tLLM_MAX_RETRIES,\n} from './constants'\nimport { InvokeError } from './errors'\nimport { OpenAIClient } from './OpenAILenientClient'\nimport type {\n\tAgentBrain,\n\tInvokeResult,\n\tLLMClient,\n\tLLMConfig,\n\tMacroToolInput,\n\tMacroToolResult,\n\tMessage,\n\tTool,\n} from './types'\n\nexport type {\n\tAgentBrain,\n\tInvokeResult,\n\tLLMClient,\n\tLLMConfig,\n\tMacroToolInput,\n\tMacroToolResult,\n\tMessage,\n\tTool,\n}\n\nexport function parseLLMConfig(config: LLMConfig): Required<LLMConfig> {\n\treturn {\n\t\tbaseURL: config.baseURL ?? DEFAULT_BASE_URL,\n\t\tapiKey: config.apiKey ?? DEFAULT_API_KEY,\n\t\tmodel: config.model ?? DEFAULT_MODEL_NAME,\n\t\ttemperature: config.temperature ?? DEFAULT_TEMPERATURE,\n\t\tmaxTokens: config.maxTokens ?? DEFAULT_MAX_TOKENS,\n\t\tmaxRetries: config.maxRetries ?? LLM_MAX_RETRIES,\n\t}\n}\n\nexport class LLM extends EventTarget {\n\tconfig: Required<LLMConfig>\n\tclient: LLMClient\n\n\tconstructor(config: LLMConfig) {\n\t\tsuper()\n\t\tthis.config = parseLLMConfig(config)\n\n\t\t// Default to OpenAI client\n\t\tthis.client = new OpenAIClient({\n\t\t\tmodel: this.config.model,\n\t\t\tapiKey: this.config.apiKey,\n\t\t\tbaseURL: this.config.baseURL,\n\t\t\ttemperature: this.config.temperature,\n\t\t\tmaxTokens: this.config.maxTokens,\n\t\t})\n\t}\n\n\t/**\n\t * - call llm api *once*\n\t * - invoke tool call *once*\n\t * - return the result of the tool\n\t */\n\tasync invoke(\n\t\tmessages: Message[],\n\t\ttools: Record<string, Tool>,\n\t\tabortSignal: AbortSignal\n\t): Promise<InvokeResult> {\n\t\treturn await withRetry(\n\t\t\tasync () => {\n\t\t\t\tconst result = await this.client.invoke(messages, tools, abortSignal)\n\n\t\t\t\treturn result\n\t\t\t},\n\t\t\t// retry settings\n\t\t\t{\n\t\t\t\tmaxRetries: this.config.maxRetries,\n\t\t\t\tonRetry: (current: number) => {\n\t\t\t\t\tthis.dispatchEvent(\n\t\t\t\t\t\tnew CustomEvent('retry', { detail: { current, max: this.config.maxRetries } })\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t\tonError: (error: Error) => {\n\t\t\t\t\tthis.dispatchEvent(new CustomEvent('error', { detail: { error } }))\n\t\t\t\t},\n\t\t\t}\n\t\t)\n\t}\n}\n\nasync function withRetry<T>(\n\tfn: () => Promise<T>,\n\tsettings: {\n\t\tmaxRetries: number\n\t\tonRetry: (retries: number) => void\n\t\tonError: (error: Error) => void\n\t}\n): Promise<T> {\n\tlet retries = 0\n\tlet lastError: Error | null = null\n\twhile (retries <= settings.maxRetries) {\n\t\tif (retries > 0) {\n\t\t\tsettings.onRetry(retries)\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 100))\n\t\t}\n\n\t\ttry {\n\t\t\treturn await fn()\n\t\t} catch (error: unknown) {\n\t\t\tconsole.error(error)\n\t\t\tsettings.onError(error as Error)\n\n\t\t\t// do not retry if aborted by user\n\t\t\tif ((error as { name?: string })?.name === 'AbortError') throw error\n\n\t\t\t// do not retry if error is not retryable (InvokeError)\n\t\t\tif (error instanceof InvokeError && !error.retryable) throw error\n\n\t\t\tlastError = error as Error\n\t\t\tretries++\n\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 100))\n\t\t}\n\t}\n\n\tthrow lastError!\n}\n"],"names":["tool"],"mappings":";;;;AACO,MAAM,qBAGT;AAEG,MAAM,kBAGT;AAEG,MAAM,mBAGT;AAIG,MAAM,kBAAkB;AACxB,MAAM,sBAAsB;AAC5B,MAAM,qBAAqB;AChB3B,MAAM,kBAAkB;AAAA;AAAA,EAE9B,eAAe;AAAA;AAAA,EACf,YAAY;AAAA;AAAA,EACZ,cAAc;AAAA;AAAA,EACd,cAAc;AAAA;AAAA,EACd,mBAAmB;AAAA;AAAA,EACnB,sBAAsB;AAAA;AAAA,EAEtB,SAAS;AAAA;AAAA,EAGT,YAAY;AAAA;AAAA,EACZ,gBAAgB;AAAA;AAAA,EAChB,gBAAgB;AAAA;AACjB;AAIO,MAAM,eAAN,MAAM,qBAAoB,MAAM;AAAA,EACtC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA,YAAY,MAAuB,SAAiB,UAAoB;AACvE,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,OAAO;AACZ,SAAK,YAAY,KAAK,YAAY,IAAI;AACtC,SAAK,WAAW;AAAA,EACjB;AAAA,EAEQ,YAAY,MAAgC;AACnD,UAAM,iBAAoC;AAAA,MACzC,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,MAChB,gBAAgB;AAAA,IAAA;AAEjB,WAAO,eAAe,SAAS,IAAI;AAAA,EACpC;AACD;AA1BuC;AAAhC,IAAM,cAAN;ACVA,SAAS,gBAAgB,MAAc,MAAY;AACzD,SAAO;AAAA,IACN,MAAM;AAAA,IACN,UAAU;AAAA,MACT;AAAA,MACA,aAAa,KAAK;AAAA,MAClB,YAAY,EAAE,aAAa,KAAK,aAAa,EAAE,QAAQ,eAAe;AAAA,IAAA;AAAA,EACvE;AAEF;AATgB;AAgBT,SAAS,0BACf,cACA,aACiB;AAEjB,QAAM,SAAS,aAAa,UAAU,CAAC;AACvC,MAAI,CAAC,QAAQ;AACZ,UAAM,IAAI,YAAY,gBAAgB,SAAS,0BAA0B,YAAY;AAAA,EACtF;AAGA,UAAQ,OAAO,eAAA;AAAA,IACd,KAAK;AAAA,IACL,KAAK;AAAA;AAAA,IACL,KAAK;AAEJ;AAAA,IACD,KAAK;AAEJ,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB;AAAA,MAAA;AAAA,IAEF,KAAK;AAEJ,YAAM,IAAI,YAAY,gBAAgB,gBAAgB,mCAAmC;AAAA,IAC1F;AACC,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB,6BAA6B,OAAO,aAAa;AAAA,MAAA;AAAA,EAClD;AAIF,QAAM,eAAe,YAAY,MAAM;AACvC,MAAI,CAAC,cAAc;AAClB,UAAM,IAAI,MAAM,yCAAyC;AAAA,EAC1D;AAIA,MAAI,MAAqB;AAGzB,QAAM,WAAW,OAAO,SAAS,aAAa,CAAC,GAAG;AAClD,QAAM,UAAU,aAAa;AAE7B,MAAI,OAAO,SAAS,SAAS,eAAe;AAG3C,YAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,QAAI;AACJ,QAAI;AACH,eAAS,KAAK,MAAM,GAAG;AAAA,IACxB,SAAS,OAAO;AACf,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB;AAAA,QACA;AAAA,MAAA;AAAA,IAEF;AACA,UAAM,KAAK,UAAU,EAAE,QAAQ,EAAE,CAAC,SAAS,IAAI,GAAG,OAAA,GAAU;AAAA,EAC7D;AAEA,MAAI,CAAC,KAAK;AAET,UAAM,OAAO,SAAS,QAAQ,KAAA,KAAU;AAAA,EACzC;AAEA,MAAI,CAAC,KAAK;AACT,UAAM,IAAI;AAAA,MACT,gBAAgB;AAAA,MAChB;AAAA,MACA;AAAA,IAAA;AAAA,EAEF;AAIA,MAAI;AACJ,MAAI;AACH,iBAAa,KAAK,MAAM,GAAG;AAAA,EAC5B,SAAS,OAAO;AACf,UAAM,IAAI;AAAA,MACT,gBAAgB;AAAA,MAChB;AAAA,MACA;AAAA,IAAA;AAAA,EAEF;AAIA,MAAI,WAAW,UAAU,WAAW,4BAA4B,WAAW,WAAW;AAIrF,QAAI,CAAC,WAAW,QAAQ;AACvB,cAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,iBAAW,SAAS;AAAA,QACnB,MAAM,EAAE,SAAS,EAAA;AAAA,MAAE;AAAA,IAErB;AAAA,EACD,WAAW,WAAW,QAAQ,WAAW,UAAU;AAGlD,QAAI,WAAW,SAAS,SAAS;AAChC,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB,8CAA8C,WAAW,SAAS,IAAI;AAAA,QACtE;AAAA,MAAA;AAGF,YAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,iBAAa,WAAW,SAAS;AAAA,EAClC,WAAW,WAAW,QAAQ,WAAW,WAAW;AAGnD,QAAI,WAAW,SAAS;AACvB,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB,8CAA8C,WAAW,IAAI;AAAA,QAC7D;AAAA,MAAA;AAGF,YAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,iBAAa,WAAW;AAAA,EACzB,OAAO;AAGN,YAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,iBAAa,EAAE,QAAQ,WAAA;AAAA,EACxB;AAGA,MAAI,OAAO,eAAe,UAAU;AACnC,YAAQ,IAAI,MAAM,OAAO,0DAA0D,CAAC;AACpF,QAAI;AACH,mBAAa,KAAK,MAAM,UAAU;AAAA,IACnC,SAAS,OAAO;AACf,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB;AAAA,QACA;AAAA,MAAA;AAAA,IAEF;AAAA,EACD;AAEA,QAAM,aAAa,YAAY,UAAU,UAAU;AACnD,MAAI,WAAW,SAAS;AACvB,WAAO,WAAW;AAAA,EACnB,OAAO;AACN,UAAM,SAAS,WAAW,UAAU,CAAA;AACpC,UAAM,aAAa,OAAO,KAAK,MAAM,EAAE,CAAC,KAAK;AAC7C,UAAM,aAAa,KAAK,UAAU,OAAO,UAAU,KAAK,SAAS;AAIjE,UAAM,IAAI;AAAA,MACT,gBAAgB;AAAA,MAChB,6CAA6C,UAAU,eAAe,UAAU;AAAA,MAChF,WAAW;AAAA,IAAA;AAAA,EAEb;AACD;AAnKgB;AAqKT,SAAS,WAAW,MAA2B;AACrD,QAAM,QAAgB,KAAK,SAAS;AAEpC,MAAI,MAAM,YAAA,EAAc,WAAW,QAAQ,GAAG;AAC7C,SAAK,cAAc,EAAE,MAAM,QAAQ,MAAM,cAAA;AACzC,SAAK,WAAW,EAAE,MAAM,WAAA;AAAA,EAEzB;AAEA,MAAI,MAAM,YAAA,EAAc,SAAS,MAAM,GAAG;AACzC,YAAQ,IAAI,2CAA2C;AACvD,WAAO,KAAK;AACZ,YAAQ,IAAI,qDAAqD;AACjE,SAAK,WAAW,EAAE,MAAM,YAAY,QAAQ,UAAA;AAC5C,SAAK,YAAY,EAAE,SAAS,OAAO,QAAQ,MAAA;AAAA,EAC5C;AAEA,SAAO;AACR;AAlBgB;ACpLT,MAAM,gBAAN,MAAM,cAAkC;AAAA,EAC9C;AAAA,EAEA,YAAY,QAA4B;AACvC,SAAK,SAAS;AAAA,EACf;AAAA,EAEA,MAAM,OACL,UACA,OACA,aACwB;AAExB,UAAM,cAAc,OAAO,QAAQ,KAAK,EAAE,IAAI,CAAC,CAAC,MAAMA,KAAI,MAAM,gBAAgB,MAAMA,KAAI,CAAC;AAG3F,QAAI;AACJ,QAAI;AACH,iBAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,qBAAqB;AAAA,QACjE,QAAQ;AAAA,QACR,SAAS;AAAA,UACR,gBAAgB;AAAA,UAChB,eAAe,UAAU,KAAK,OAAO,MAAM;AAAA,QAAA;AAAA,QAE5C,MAAM,KAAK;AAAA,UACV,WAAW;AAAA,YACV,OAAO,KAAK,OAAO;AAAA,YACnB,aAAa,KAAK,OAAO;AAAA,YACzB,YAAY,KAAK,OAAO;AAAA,YACxB;AAAA,YAEA,OAAO;AAAA;AAAA,YAEP,aAAa,EAAE,MAAM,YAAY,UAAU,EAAE,MAAM,gBAAc;AAAA;AAAA;AAAA;AAAA,YAMjE,qBAAqB;AAAA,UAAA,CACrB;AAAA,QAAA;AAAA,QAEF,QAAQ;AAAA,MAAA,CACR;AAAA,IACF,SAAS,OAAgB;AAExB,YAAM,IAAI,YAAY,gBAAgB,eAAe,0BAA0B,KAAK;AAAA,IACrF;AAGA,QAAI,CAAC,SAAS,IAAI;AACjB,YAAM,YAAY,MAAM,SAAS,KAAA,EAAO,MAAA;AACxC,YAAM,eACJ,UAA+C,OAAO,WAAW,SAAS;AAE5E,UAAI,SAAS,WAAW,OAAO,SAAS,WAAW,KAAK;AACvD,cAAM,IAAI;AAAA,UACT,gBAAgB;AAAA,UAChB,0BAA0B,YAAY;AAAA,UACtC;AAAA,QAAA;AAAA,MAEF;AACA,UAAI,SAAS,WAAW,KAAK;AAC5B,cAAM,IAAI;AAAA,UACT,gBAAgB;AAAA,UAChB,wBAAwB,YAAY;AAAA,UACpC;AAAA,QAAA;AAAA,MAEF;AACA,UAAI,SAAS,UAAU,KAAK;AAC3B,cAAM,IAAI;AAAA,UACT,gBAAgB;AAAA,UAChB,iBAAiB,YAAY;AAAA,UAC7B;AAAA,QAAA;AAAA,MAEF;AACA,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB,QAAQ,SAAS,MAAM,KAAK,YAAY;AAAA,QACxC;AAAA,MAAA;AAAA,IAEF;AAIA,UAAM,OAAO,MAAM,SAAS,KAAA;AAC5B,UAAM,OAAO,MAAM;AACnB,UAAM,iBAAiB,0BAA0B,MAAM,KAAK,WAAkB;AAG9E,QAAI;AACJ,QAAI;AACH,mBAAa,MAAM,KAAK,QAAQ,cAAc;AAAA,IAC/C,SAAS,GAAG;AACX,YAAM,IAAI;AAAA,QACT,gBAAgB;AAAA,QAChB,0BAA2B,EAAY,OAAO;AAAA,QAC9C;AAAA,MAAA;AAAA,IAEF;AAGA,WAAO;AAAA,MACN,UAAU;AAAA;AAAA,QAET,MAAM;AAAA,QACN,MAAM;AAAA,MAAA;AAAA,MAEP;AAAA,MACA,OAAO;AAAA,QACN,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,QACzC,cAAc,KAAK,OAAO,uBAAuB;AAAA,QACjD,iBAAiB,KAAK,OAAO,2BAA2B;AAAA,MAAA;AAAA,MAEzD,aAAa;AAAA,IAAA;AAAA,EAEf;AACD;AAvH+C;AAAxC,IAAM,eAAN;ACmDA,SAAS,eAAe,QAAwC;AACtE,SAAO;AAAA,IACN,SAAS,OAAO,WAAW;AAAA,IAC3B,QAAQ,OAAO,UAAU;AAAA,IACzB,OAAO,OAAO,SAAS;AAAA,IACvB,aAAa,OAAO,eAAe;AAAA,IACnC,WAAW,OAAO,aAAa;AAAA,IAC/B,YAAY,OAAO,cAAc;AAAA,EAAA;AAEnC;AATgB;AAWT,MAAM,OAAN,MAAM,aAAY,YAAY;AAAA,EACpC;AAAA,EACA;AAAA,EAEA,YAAY,QAAmB;AAC9B,UAAA;AACA,SAAK,SAAS,eAAe,MAAM;AAGnC,SAAK,SAAS,IAAI,aAAa;AAAA,MAC9B,OAAO,KAAK,OAAO;AAAA,MACnB,QAAQ,KAAK,OAAO;AAAA,MACpB,SAAS,KAAK,OAAO;AAAA,MACrB,aAAa,KAAK,OAAO;AAAA,MACzB,WAAW,KAAK,OAAO;AAAA,IAAA,CACvB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,OACL,UACA,OACA,aACwB;AACxB,WAAO,MAAM;AAAA,MACZ,YAAY;AACX,cAAM,SAAS,MAAM,KAAK,OAAO,OAAO,UAAU,OAAO,WAAW;AAEpE,eAAO;AAAA,MACR;AAAA;AAAA,MAEA;AAAA,QACC,YAAY,KAAK,OAAO;AAAA,QACxB,SAAS,wBAAC,YAAoB;AAC7B,eAAK;AAAA,YACJ,IAAI,YAAY,SAAS,EAAE,QAAQ,EAAE,SAAS,KAAK,KAAK,OAAO,aAAW,CAAG;AAAA,UAAA;AAAA,QAE/E,GAJS;AAAA,QAKT,SAAS,wBAAC,UAAiB;AAC1B,eAAK,cAAc,IAAI,YAAY,SAAS,EAAE,QAAQ,EAAE,MAAA,EAAM,CAAG,CAAC;AAAA,QACnE,GAFS;AAAA,MAET;AAAA,IACD;AAAA,EAEF;AACD;AAhDqC;AAA9B,IAAM,MAAN;AAkDP,eAAe,UACd,IACA,UAKa;AACb,MAAI,UAAU;AACd,MAAI,YAA0B;AAC9B,SAAO,WAAW,SAAS,YAAY;AACtC,QAAI,UAAU,GAAG;AAChB,eAAS,QAAQ,OAAO;AACxB,YAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,GAAG,CAAC;AAAA,IACxD;AAEA,QAAI;AACH,aAAO,MAAM,GAAA;AAAA,IACd,SAAS,OAAgB;AACxB,cAAQ,MAAM,KAAK;AACnB,eAAS,QAAQ,KAAc;AAG/B,UAAK,OAA6B,SAAS,aAAc,OAAM;AAG/D,UAAI,iBAAiB,eAAe,CAAC,MAAM,UAAW,OAAM;AAE5D,kBAAY;AACZ;AAEA,YAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,GAAG,CAAC;AAAA,IACxD;AAAA,EACD;AAEA,QAAM;AACP;AApCe;"}
package/package.json ADDED
@@ -0,0 +1,43 @@
1
+ {
2
+ "name": "@page-agent/llms",
3
+ "version": "0.0.14",
4
+ "type": "module",
5
+ "main": "./dist/lib/page-agent-llms.js",
6
+ "module": "./dist/lib/page-agent-llms.js",
7
+ "types": "./dist/lib/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/lib/index.d.ts",
11
+ "import": "./dist/lib/page-agent-llms.js",
12
+ "default": "./dist/lib/page-agent-llms.js"
13
+ }
14
+ },
15
+ "files": [
16
+ "dist/"
17
+ ],
18
+ "description": "LLM client with reflection-before-action mental model for page-agent",
19
+ "keywords": [
20
+ "page-agent",
21
+ "llm",
22
+ "openai",
23
+ "tool-calling",
24
+ "agent"
25
+ ],
26
+ "author": "Simon<gaomeng1900>",
27
+ "license": "MIT",
28
+ "repository": {
29
+ "type": "git",
30
+ "url": "https://github.com/alibaba/page-agent.git",
31
+ "directory": "packages/llms"
32
+ },
33
+ "homepage": "https://alibaba.github.io/page-agent/",
34
+ "scripts": {
35
+ "build": "vite build",
36
+ "prepublishOnly": "node -e \"const fs=require('fs');['LICENSE'].forEach(f=>fs.copyFileSync('../../'+f,f))\"",
37
+ "postpublish": "node -e \"['LICENSE'].forEach(f=>{try{require('fs').unlinkSync(f)}catch{}})\""
38
+ },
39
+ "dependencies": {
40
+ "chalk": "^5.6.2",
41
+ "zod": "^4.2.0"
42
+ }
43
+ }