llmist 6.2.0 → 8.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +901 -1661
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +6105 -34
- package/dist/index.d.ts +6105 -34
- package/dist/index.js +11955 -115
- package/dist/index.js.map +1 -1
- package/package.json +9 -36
- package/LICENSE +0 -21
- package/README.md +0 -506
- package/dist/chunk-36YSBSGB.js +0 -12346
- package/dist/chunk-36YSBSGB.js.map +0 -1
- package/dist/chunk-EJEP5MHQ.js +0 -1182
- package/dist/chunk-EJEP5MHQ.js.map +0 -1
- package/dist/cli.cjs +0 -18581
- package/dist/cli.cjs.map +0 -1
- package/dist/cli.d.cts +0 -1
- package/dist/cli.d.ts +0 -1
- package/dist/cli.js +0 -8042
- package/dist/cli.js.map +0 -1
- package/dist/mock-stream-DG4wF-NH.d.cts +0 -6345
- package/dist/mock-stream-DG4wF-NH.d.ts +0 -6345
- package/dist/testing/index.cjs +0 -12220
- package/dist/testing/index.cjs.map +0 -1
- package/dist/testing/index.d.cts +0 -710
- package/dist/testing/index.d.ts +0 -710
- package/dist/testing/index.js +0 -83
- package/dist/testing/index.js.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -45,6 +45,19 @@ var init_constants = __esm({
|
|
|
45
45
|
}
|
|
46
46
|
});
|
|
47
47
|
|
|
48
|
+
// src/providers/constants.ts
|
|
49
|
+
var ANTHROPIC_DEFAULT_MAX_OUTPUT_TOKENS, FALLBACK_CHARS_PER_TOKEN, OPENAI_MESSAGE_OVERHEAD_TOKENS, OPENAI_REPLY_PRIMING_TOKENS, OPENAI_NAME_FIELD_OVERHEAD_TOKENS;
|
|
50
|
+
var init_constants2 = __esm({
|
|
51
|
+
"src/providers/constants.ts"() {
|
|
52
|
+
"use strict";
|
|
53
|
+
ANTHROPIC_DEFAULT_MAX_OUTPUT_TOKENS = 4096;
|
|
54
|
+
FALLBACK_CHARS_PER_TOKEN = 4;
|
|
55
|
+
OPENAI_MESSAGE_OVERHEAD_TOKENS = 4;
|
|
56
|
+
OPENAI_REPLY_PRIMING_TOKENS = 2;
|
|
57
|
+
OPENAI_NAME_FIELD_OVERHEAD_TOKENS = 1;
|
|
58
|
+
}
|
|
59
|
+
});
|
|
60
|
+
|
|
48
61
|
// src/core/input-content.ts
|
|
49
62
|
function isTextPart(part) {
|
|
50
63
|
return part.type === "text";
|
|
@@ -1877,6 +1890,9 @@ function parseEnvBoolean(value) {
|
|
|
1877
1890
|
if (normalized === "false" || normalized === "0") return false;
|
|
1878
1891
|
return void 0;
|
|
1879
1892
|
}
|
|
1893
|
+
function stripAnsi(str) {
|
|
1894
|
+
return str.replace(/\x1b\[[0-9;]*m/g, "");
|
|
1895
|
+
}
|
|
1880
1896
|
function createLogger(options = {}) {
|
|
1881
1897
|
const envMinLevel = parseLogLevel(process.env.LLMIST_LOG_LEVEL);
|
|
1882
1898
|
const envLogFile = process.env.LLMIST_LOG_FILE?.trim() ?? "";
|
|
@@ -1885,36 +1901,62 @@ function createLogger(options = {}) {
|
|
|
1885
1901
|
const defaultType = options.type ?? "pretty";
|
|
1886
1902
|
const name = options.name ?? "llmist";
|
|
1887
1903
|
const logReset = options.logReset ?? envLogReset ?? false;
|
|
1888
|
-
|
|
1889
|
-
let finalType = defaultType;
|
|
1890
|
-
if (envLogFile) {
|
|
1904
|
+
if (envLogFile && (!logFileInitialized || sharedLogFilePath !== envLogFile)) {
|
|
1891
1905
|
try {
|
|
1906
|
+
if (sharedLogFileStream) {
|
|
1907
|
+
sharedLogFileStream.end();
|
|
1908
|
+
sharedLogFileStream = void 0;
|
|
1909
|
+
}
|
|
1892
1910
|
(0, import_node_fs.mkdirSync)((0, import_node_path2.dirname)(envLogFile), { recursive: true });
|
|
1893
1911
|
const flags = logReset ? "w" : "a";
|
|
1894
|
-
|
|
1895
|
-
|
|
1912
|
+
sharedLogFileStream = (0, import_node_fs.createWriteStream)(envLogFile, { flags });
|
|
1913
|
+
sharedLogFilePath = envLogFile;
|
|
1914
|
+
logFileInitialized = true;
|
|
1915
|
+
writeErrorCount = 0;
|
|
1916
|
+
writeErrorReported = false;
|
|
1917
|
+
sharedLogFileStream.on("error", (error) => {
|
|
1918
|
+
writeErrorCount++;
|
|
1919
|
+
if (!writeErrorReported) {
|
|
1920
|
+
console.error(`[llmist] Log file write error: ${error.message}`);
|
|
1921
|
+
writeErrorReported = true;
|
|
1922
|
+
}
|
|
1923
|
+
if (writeErrorCount >= MAX_WRITE_ERRORS_BEFORE_DISABLE) {
|
|
1924
|
+
console.error(
|
|
1925
|
+
`[llmist] Too many log file errors (${writeErrorCount}), disabling file logging`
|
|
1926
|
+
);
|
|
1927
|
+
sharedLogFileStream?.end();
|
|
1928
|
+
sharedLogFileStream = void 0;
|
|
1929
|
+
}
|
|
1930
|
+
});
|
|
1896
1931
|
} catch (error) {
|
|
1897
1932
|
console.error("Failed to initialize LLMIST_LOG_FILE output:", error);
|
|
1898
1933
|
}
|
|
1899
1934
|
}
|
|
1935
|
+
const useFileLogging = Boolean(sharedLogFileStream);
|
|
1900
1936
|
const logger = new import_tslog.Logger({
|
|
1901
1937
|
name,
|
|
1902
1938
|
minLevel,
|
|
1903
|
-
type:
|
|
1904
|
-
//
|
|
1905
|
-
hideLogPositionForProduction:
|
|
1906
|
-
|
|
1907
|
-
|
|
1939
|
+
type: useFileLogging ? "pretty" : defaultType,
|
|
1940
|
+
// Hide log position for file logging and non-pretty types
|
|
1941
|
+
hideLogPositionForProduction: useFileLogging || defaultType !== "pretty",
|
|
1942
|
+
prettyLogTemplate: LOG_TEMPLATE,
|
|
1943
|
+
// Use overwrite to redirect tslog's formatted output to file instead of console
|
|
1944
|
+
overwrite: useFileLogging ? {
|
|
1945
|
+
transportFormatted: (logMetaMarkup, logArgs, _logErrors) => {
|
|
1946
|
+
if (!sharedLogFileStream) return;
|
|
1947
|
+
const meta = stripAnsi(logMetaMarkup);
|
|
1948
|
+
const args = logArgs.map(
|
|
1949
|
+
(arg) => typeof arg === "string" ? stripAnsi(arg) : JSON.stringify(arg)
|
|
1950
|
+
);
|
|
1951
|
+
const line = `${meta}${args.join(" ")}
|
|
1952
|
+
`;
|
|
1953
|
+
sharedLogFileStream.write(line);
|
|
1954
|
+
}
|
|
1955
|
+
} : void 0
|
|
1908
1956
|
});
|
|
1909
|
-
if (logFileStream) {
|
|
1910
|
-
logger.attachTransport((logObj) => {
|
|
1911
|
-
logFileStream?.write(`${JSON.stringify(logObj)}
|
|
1912
|
-
`);
|
|
1913
|
-
});
|
|
1914
|
-
}
|
|
1915
1957
|
return logger;
|
|
1916
1958
|
}
|
|
1917
|
-
var import_node_fs, import_node_path2, import_tslog, LEVEL_NAME_TO_ID, defaultLogger;
|
|
1959
|
+
var import_node_fs, import_node_path2, import_tslog, LEVEL_NAME_TO_ID, sharedLogFilePath, sharedLogFileStream, logFileInitialized, writeErrorCount, writeErrorReported, MAX_WRITE_ERRORS_BEFORE_DISABLE, LOG_TEMPLATE, defaultLogger;
|
|
1918
1960
|
var init_logger = __esm({
|
|
1919
1961
|
"src/logging/logger.ts"() {
|
|
1920
1962
|
"use strict";
|
|
@@ -1930,6 +1972,11 @@ var init_logger = __esm({
|
|
|
1930
1972
|
error: 5,
|
|
1931
1973
|
fatal: 6
|
|
1932
1974
|
};
|
|
1975
|
+
logFileInitialized = false;
|
|
1976
|
+
writeErrorCount = 0;
|
|
1977
|
+
writeErrorReported = false;
|
|
1978
|
+
MAX_WRITE_ERRORS_BEFORE_DISABLE = 5;
|
|
1979
|
+
LOG_TEMPLATE = "{{yyyy}}-{{mm}}-{{dd}} {{hh}}:{{MM}}:{{ss}}:{{ms}} {{logLevelName}} [{{name}}] ";
|
|
1933
1980
|
defaultLogger = createLogger();
|
|
1934
1981
|
}
|
|
1935
1982
|
});
|
|
@@ -2543,7 +2590,138 @@ var AGENT_INTERNAL_KEY;
|
|
|
2543
2590
|
var init_agent_internal_key = __esm({
|
|
2544
2591
|
"src/agent/agent-internal-key.ts"() {
|
|
2545
2592
|
"use strict";
|
|
2546
|
-
AGENT_INTERNAL_KEY = Symbol("AGENT_INTERNAL_KEY");
|
|
2593
|
+
AGENT_INTERNAL_KEY = /* @__PURE__ */ Symbol("AGENT_INTERNAL_KEY");
|
|
2594
|
+
}
|
|
2595
|
+
});
|
|
2596
|
+
|
|
2597
|
+
// src/core/retry.ts
|
|
2598
|
+
function resolveRetryConfig(config) {
|
|
2599
|
+
if (!config) {
|
|
2600
|
+
return { ...DEFAULT_RETRY_CONFIG };
|
|
2601
|
+
}
|
|
2602
|
+
return {
|
|
2603
|
+
enabled: config.enabled ?? DEFAULT_RETRY_CONFIG.enabled,
|
|
2604
|
+
retries: config.retries ?? DEFAULT_RETRY_CONFIG.retries,
|
|
2605
|
+
minTimeout: config.minTimeout ?? DEFAULT_RETRY_CONFIG.minTimeout,
|
|
2606
|
+
maxTimeout: config.maxTimeout ?? DEFAULT_RETRY_CONFIG.maxTimeout,
|
|
2607
|
+
factor: config.factor ?? DEFAULT_RETRY_CONFIG.factor,
|
|
2608
|
+
randomize: config.randomize ?? DEFAULT_RETRY_CONFIG.randomize,
|
|
2609
|
+
onRetry: config.onRetry,
|
|
2610
|
+
onRetriesExhausted: config.onRetriesExhausted,
|
|
2611
|
+
shouldRetry: config.shouldRetry
|
|
2612
|
+
};
|
|
2613
|
+
}
|
|
2614
|
+
function isRetryableError(error) {
|
|
2615
|
+
const message = error.message.toLowerCase();
|
|
2616
|
+
const name = error.name;
|
|
2617
|
+
if (message.includes("429") || message.includes("rate limit") || message.includes("rate_limit")) {
|
|
2618
|
+
return true;
|
|
2619
|
+
}
|
|
2620
|
+
if (message.includes("500") || message.includes("502") || message.includes("503") || message.includes("504") || message.includes("internal server error") || message.includes("bad gateway") || message.includes("service unavailable") || message.includes("gateway timeout")) {
|
|
2621
|
+
return true;
|
|
2622
|
+
}
|
|
2623
|
+
if (message.includes("timeout") || message.includes("etimedout") || message.includes("timed out")) {
|
|
2624
|
+
return true;
|
|
2625
|
+
}
|
|
2626
|
+
if (message.includes("econnreset") || message.includes("econnrefused") || message.includes("enotfound") || message.includes("connection") || message.includes("network")) {
|
|
2627
|
+
return true;
|
|
2628
|
+
}
|
|
2629
|
+
if (name === "APIConnectionError" || name === "RateLimitError" || name === "InternalServerError" || name === "ServiceUnavailableError" || name === "APITimeoutError") {
|
|
2630
|
+
return true;
|
|
2631
|
+
}
|
|
2632
|
+
if (message.includes("overloaded") || message.includes("capacity")) {
|
|
2633
|
+
return true;
|
|
2634
|
+
}
|
|
2635
|
+
if (message.includes("401") || message.includes("403") || message.includes("400") || message.includes("404") || message.includes("authentication") || message.includes("unauthorized") || message.includes("forbidden") || message.includes("invalid") || message.includes("content policy") || name === "AuthenticationError" || name === "BadRequestError" || name === "NotFoundError" || name === "PermissionDeniedError") {
|
|
2636
|
+
return false;
|
|
2637
|
+
}
|
|
2638
|
+
return false;
|
|
2639
|
+
}
|
|
2640
|
+
function formatLLMError(error) {
|
|
2641
|
+
const message = error.message;
|
|
2642
|
+
const name = error.name;
|
|
2643
|
+
if (message.includes("RESOURCE_EXHAUSTED") || message.includes("429")) {
|
|
2644
|
+
return "Rate limit exceeded (429) - retry after a few seconds";
|
|
2645
|
+
}
|
|
2646
|
+
if (message.toLowerCase().includes("rate limit") || message.toLowerCase().includes("rate_limit")) {
|
|
2647
|
+
return "Rate limit exceeded - retry after a few seconds";
|
|
2648
|
+
}
|
|
2649
|
+
if (message.toLowerCase().includes("overloaded") || message.toLowerCase().includes("capacity")) {
|
|
2650
|
+
return "API overloaded - retry later";
|
|
2651
|
+
}
|
|
2652
|
+
if (message.includes("500") || message.toLowerCase().includes("internal server error")) {
|
|
2653
|
+
return "Internal server error (500) - the API is experiencing issues";
|
|
2654
|
+
}
|
|
2655
|
+
if (message.includes("502") || message.toLowerCase().includes("bad gateway")) {
|
|
2656
|
+
return "Bad gateway (502) - the API is temporarily unavailable";
|
|
2657
|
+
}
|
|
2658
|
+
if (message.includes("503") || message.toLowerCase().includes("service unavailable")) {
|
|
2659
|
+
return "Service unavailable (503) - the API is temporarily down";
|
|
2660
|
+
}
|
|
2661
|
+
if (message.includes("504") || message.toLowerCase().includes("gateway timeout")) {
|
|
2662
|
+
return "Gateway timeout (504) - the request took too long";
|
|
2663
|
+
}
|
|
2664
|
+
if (message.toLowerCase().includes("timeout") || message.toLowerCase().includes("timed out")) {
|
|
2665
|
+
return "Request timed out - the API took too long to respond";
|
|
2666
|
+
}
|
|
2667
|
+
if (message.toLowerCase().includes("econnrefused")) {
|
|
2668
|
+
return "Connection refused - unable to reach the API";
|
|
2669
|
+
}
|
|
2670
|
+
if (message.toLowerCase().includes("econnreset")) {
|
|
2671
|
+
return "Connection reset - the API closed the connection";
|
|
2672
|
+
}
|
|
2673
|
+
if (message.toLowerCase().includes("enotfound")) {
|
|
2674
|
+
return "DNS error - unable to resolve API hostname";
|
|
2675
|
+
}
|
|
2676
|
+
if (message.includes("401") || message.toLowerCase().includes("unauthorized") || name === "AuthenticationError") {
|
|
2677
|
+
return "Authentication failed - check your API key";
|
|
2678
|
+
}
|
|
2679
|
+
if (message.includes("403") || message.toLowerCase().includes("forbidden") || name === "PermissionDeniedError") {
|
|
2680
|
+
return "Permission denied - your API key lacks required permissions";
|
|
2681
|
+
}
|
|
2682
|
+
if (message.includes("400") || name === "BadRequestError") {
|
|
2683
|
+
const match = message.match(/message['":\s]+['"]?([^'"}\]]+)/i);
|
|
2684
|
+
if (match) {
|
|
2685
|
+
return `Bad request: ${match[1].trim()}`;
|
|
2686
|
+
}
|
|
2687
|
+
return "Bad request - check your input parameters";
|
|
2688
|
+
}
|
|
2689
|
+
if (message.toLowerCase().includes("content policy") || message.toLowerCase().includes("safety")) {
|
|
2690
|
+
return "Content policy violation - the request was blocked";
|
|
2691
|
+
}
|
|
2692
|
+
try {
|
|
2693
|
+
const parsed = JSON.parse(message);
|
|
2694
|
+
const extractedMessage = parsed?.error?.message || parsed?.message;
|
|
2695
|
+
if (typeof extractedMessage === "string" && extractedMessage.length > 0) {
|
|
2696
|
+
return extractedMessage.trim();
|
|
2697
|
+
}
|
|
2698
|
+
} catch {
|
|
2699
|
+
}
|
|
2700
|
+
const jsonMatch = message.match(/["']?message["']?\s*[:=]\s*["']([^"']+)["']/i);
|
|
2701
|
+
if (jsonMatch) {
|
|
2702
|
+
return jsonMatch[1].trim();
|
|
2703
|
+
}
|
|
2704
|
+
if (message.length > 200) {
|
|
2705
|
+
const firstPart = message.split(/[.!?\n]/)[0];
|
|
2706
|
+
if (firstPart && firstPart.length > 10 && firstPart.length < 150) {
|
|
2707
|
+
return firstPart.trim();
|
|
2708
|
+
}
|
|
2709
|
+
return message.slice(0, 150).trim() + "...";
|
|
2710
|
+
}
|
|
2711
|
+
return message;
|
|
2712
|
+
}
|
|
2713
|
+
var DEFAULT_RETRY_CONFIG;
|
|
2714
|
+
var init_retry = __esm({
|
|
2715
|
+
"src/core/retry.ts"() {
|
|
2716
|
+
"use strict";
|
|
2717
|
+
DEFAULT_RETRY_CONFIG = {
|
|
2718
|
+
enabled: true,
|
|
2719
|
+
retries: 3,
|
|
2720
|
+
minTimeout: 1e3,
|
|
2721
|
+
maxTimeout: 3e4,
|
|
2722
|
+
factor: 2,
|
|
2723
|
+
randomize: true
|
|
2724
|
+
};
|
|
2547
2725
|
}
|
|
2548
2726
|
});
|
|
2549
2727
|
|
|
@@ -3013,6 +3191,9 @@ var init_conversation_manager = __esm({
|
|
|
3013
3191
|
}
|
|
3014
3192
|
}
|
|
3015
3193
|
}
|
|
3194
|
+
getConversationHistory() {
|
|
3195
|
+
return [...this.initialMessages, ...this.historyBuilder.build()];
|
|
3196
|
+
}
|
|
3016
3197
|
};
|
|
3017
3198
|
}
|
|
3018
3199
|
});
|
|
@@ -3655,19 +3836,6 @@ var init_base_provider = __esm({
|
|
|
3655
3836
|
}
|
|
3656
3837
|
});
|
|
3657
3838
|
|
|
3658
|
-
// src/providers/constants.ts
|
|
3659
|
-
var ANTHROPIC_DEFAULT_MAX_OUTPUT_TOKENS, FALLBACK_CHARS_PER_TOKEN, OPENAI_MESSAGE_OVERHEAD_TOKENS, OPENAI_REPLY_PRIMING_TOKENS, OPENAI_NAME_FIELD_OVERHEAD_TOKENS;
|
|
3660
|
-
var init_constants2 = __esm({
|
|
3661
|
-
"src/providers/constants.ts"() {
|
|
3662
|
-
"use strict";
|
|
3663
|
-
ANTHROPIC_DEFAULT_MAX_OUTPUT_TOKENS = 4096;
|
|
3664
|
-
FALLBACK_CHARS_PER_TOKEN = 4;
|
|
3665
|
-
OPENAI_MESSAGE_OVERHEAD_TOKENS = 4;
|
|
3666
|
-
OPENAI_REPLY_PRIMING_TOKENS = 2;
|
|
3667
|
-
OPENAI_NAME_FIELD_OVERHEAD_TOKENS = 1;
|
|
3668
|
-
}
|
|
3669
|
-
});
|
|
3670
|
-
|
|
3671
3839
|
// src/providers/utils.ts
|
|
3672
3840
|
function readEnvVar(key) {
|
|
3673
3841
|
if (typeof process === "undefined" || typeof process.env === "undefined") {
|
|
@@ -5900,11 +6068,13 @@ var init_model_registry = __esm({
|
|
|
5900
6068
|
}
|
|
5901
6069
|
/**
|
|
5902
6070
|
* Get model specification by model ID
|
|
5903
|
-
* @param modelId - Full model identifier
|
|
6071
|
+
* @param modelId - Full model identifier, optionally with provider prefix
|
|
6072
|
+
* (e.g., 'gpt-5', 'claude-sonnet-4-5-20250929', 'anthropic:claude-sonnet-4-5')
|
|
5904
6073
|
* @returns ModelSpec if found, undefined otherwise
|
|
5905
6074
|
*/
|
|
5906
6075
|
getModelSpec(modelId) {
|
|
5907
|
-
|
|
6076
|
+
const normalizedId = modelId.includes(":") ? modelId.split(":")[1] : modelId;
|
|
6077
|
+
return this.modelSpecs.find((model) => model.modelId === normalizedId);
|
|
5908
6078
|
}
|
|
5909
6079
|
/**
|
|
5910
6080
|
* List all models, optionally filtered by provider
|
|
@@ -7146,9 +7316,8 @@ var init_cost_reporting_client = __esm({
|
|
|
7146
7316
|
*/
|
|
7147
7317
|
reportCostFromUsage(model, inputTokens, outputTokens, cachedInputTokens = 0, cacheCreationInputTokens = 0) {
|
|
7148
7318
|
if (inputTokens === 0 && outputTokens === 0) return;
|
|
7149
|
-
const modelName = model.includes(":") ? model.split(":")[1] : model;
|
|
7150
7319
|
const estimate = this.client.modelRegistry.estimateCost(
|
|
7151
|
-
|
|
7320
|
+
model,
|
|
7152
7321
|
inputTokens,
|
|
7153
7322
|
outputTokens,
|
|
7154
7323
|
cachedInputTokens,
|
|
@@ -7471,10 +7640,11 @@ function getHostExportsInternal() {
|
|
|
7471
7640
|
}
|
|
7472
7641
|
return cachedHostExports;
|
|
7473
7642
|
}
|
|
7474
|
-
var import_zod2, cachedHostExports, GadgetExecutor;
|
|
7643
|
+
var import_fast_deep_equal, import_zod2, cachedHostExports, GadgetExecutor;
|
|
7475
7644
|
var init_executor = __esm({
|
|
7476
7645
|
"src/gadgets/executor.ts"() {
|
|
7477
7646
|
"use strict";
|
|
7647
|
+
import_fast_deep_equal = __toESM(require("fast-deep-equal"), 1);
|
|
7478
7648
|
import_zod2 = require("zod");
|
|
7479
7649
|
init_builder();
|
|
7480
7650
|
init_client();
|
|
@@ -7585,7 +7755,7 @@ var init_executor = __esm({
|
|
|
7585
7755
|
try {
|
|
7586
7756
|
const cleanedRaw = stripMarkdownFences(call.parametersRaw);
|
|
7587
7757
|
const initialParse = parseBlockParams(cleanedRaw, { argPrefix: this.argPrefix });
|
|
7588
|
-
const parametersWereModified = !
|
|
7758
|
+
const parametersWereModified = !(0, import_fast_deep_equal.default)(rawParameters, initialParse);
|
|
7589
7759
|
if (parametersWereModified) {
|
|
7590
7760
|
this.logger.debug("Parameters modified by interceptor, skipping re-parse", {
|
|
7591
7761
|
gadgetName: call.gadgetName
|
|
@@ -7662,7 +7832,9 @@ var init_executor = __esm({
|
|
|
7662
7832
|
nodeId: gadgetNodeId,
|
|
7663
7833
|
depth: gadgetDepth,
|
|
7664
7834
|
// Host exports for external gadgets to use host's llmist classes
|
|
7665
|
-
hostExports: getHostExportsInternal()
|
|
7835
|
+
hostExports: getHostExportsInternal(),
|
|
7836
|
+
// Logger for structured logging (respects CLI's log level/file config)
|
|
7837
|
+
logger: this.logger
|
|
7666
7838
|
};
|
|
7667
7839
|
let rawResult;
|
|
7668
7840
|
if (timeoutMs && timeoutMs > 0) {
|
|
@@ -7769,14 +7941,16 @@ var init_executor = __esm({
|
|
|
7769
7941
|
executionTimeMs: Date.now() - startTime
|
|
7770
7942
|
};
|
|
7771
7943
|
}
|
|
7772
|
-
|
|
7944
|
+
const isHumanInputError = error instanceof Error && error.name === "HumanInputRequiredException" && "question" in error;
|
|
7945
|
+
if (isHumanInputError) {
|
|
7946
|
+
const question = error.question;
|
|
7773
7947
|
this.logger.info("Gadget requested human input", {
|
|
7774
7948
|
gadgetName: call.gadgetName,
|
|
7775
|
-
question
|
|
7949
|
+
question
|
|
7776
7950
|
});
|
|
7777
7951
|
if (this.requestHumanInput) {
|
|
7778
7952
|
try {
|
|
7779
|
-
const answer = await this.requestHumanInput(
|
|
7953
|
+
const answer = await this.requestHumanInput(question);
|
|
7780
7954
|
this.logger.debug("Human input received", {
|
|
7781
7955
|
gadgetName: call.gadgetName,
|
|
7782
7956
|
answerLength: answer.length
|
|
@@ -7832,27 +8006,6 @@ var init_executor = __esm({
|
|
|
7832
8006
|
async executeAll(calls) {
|
|
7833
8007
|
return Promise.all(calls.map((call) => this.execute(call)));
|
|
7834
8008
|
}
|
|
7835
|
-
/**
|
|
7836
|
-
* Deep equality check for objects/arrays.
|
|
7837
|
-
* Used to detect if parameters were modified by an interceptor.
|
|
7838
|
-
*/
|
|
7839
|
-
deepEquals(a, b) {
|
|
7840
|
-
if (a === b) return true;
|
|
7841
|
-
if (a === null || b === null) return a === b;
|
|
7842
|
-
if (typeof a !== typeof b) return false;
|
|
7843
|
-
if (typeof a !== "object") return a === b;
|
|
7844
|
-
if (Array.isArray(a) !== Array.isArray(b)) return false;
|
|
7845
|
-
if (Array.isArray(a) && Array.isArray(b)) {
|
|
7846
|
-
if (a.length !== b.length) return false;
|
|
7847
|
-
return a.every((val, i) => this.deepEquals(val, b[i]));
|
|
7848
|
-
}
|
|
7849
|
-
const aObj = a;
|
|
7850
|
-
const bObj = b;
|
|
7851
|
-
const aKeys = Object.keys(aObj);
|
|
7852
|
-
const bKeys = Object.keys(bObj);
|
|
7853
|
-
if (aKeys.length !== bKeys.length) return false;
|
|
7854
|
-
return aKeys.every((key) => this.deepEquals(aObj[key], bObj[key]));
|
|
7855
|
-
}
|
|
7856
8009
|
};
|
|
7857
8010
|
}
|
|
7858
8011
|
});
|
|
@@ -7890,6 +8043,11 @@ var init_stream_processor = __esm({
|
|
|
7890
8043
|
inFlightExecutions = /* @__PURE__ */ new Map();
|
|
7891
8044
|
/** Queue of completed gadget results ready to be yielded (for real-time streaming) */
|
|
7892
8045
|
completedResultsQueue = [];
|
|
8046
|
+
// Cross-iteration dependency tracking
|
|
8047
|
+
/** Invocation IDs completed in previous iterations (read-only reference from Agent) */
|
|
8048
|
+
priorCompletedInvocations;
|
|
8049
|
+
/** Invocation IDs that failed in previous iterations (read-only reference from Agent) */
|
|
8050
|
+
priorFailedInvocations;
|
|
7893
8051
|
constructor(options) {
|
|
7894
8052
|
this.iteration = options.iteration;
|
|
7895
8053
|
this.registry = options.registry;
|
|
@@ -7898,6 +8056,8 @@ var init_stream_processor = __esm({
|
|
|
7898
8056
|
this.tree = options.tree;
|
|
7899
8057
|
this.parentNodeId = options.parentNodeId ?? null;
|
|
7900
8058
|
this.baseDepth = options.baseDepth ?? 0;
|
|
8059
|
+
this.priorCompletedInvocations = options.priorCompletedInvocations ?? /* @__PURE__ */ new Set();
|
|
8060
|
+
this.priorFailedInvocations = options.priorFailedInvocations ?? /* @__PURE__ */ new Set();
|
|
7901
8061
|
this.parser = new GadgetCallParser({
|
|
7902
8062
|
startPrefix: options.gadgetStartPrefix,
|
|
7903
8063
|
endPrefix: options.gadgetEndPrefix,
|
|
@@ -8096,62 +8256,11 @@ var init_stream_processor = __esm({
|
|
|
8096
8256
|
}
|
|
8097
8257
|
return [{ type: "text", content }];
|
|
8098
8258
|
}
|
|
8099
|
-
/**
|
|
8100
|
-
* Process a gadget call through the full lifecycle, handling dependencies.
|
|
8101
|
-
*
|
|
8102
|
-
* Gadgets without dependencies (or with all dependencies satisfied) execute immediately.
|
|
8103
|
-
* Gadgets with unsatisfied dependencies are queued for later execution.
|
|
8104
|
-
* After each execution, pending gadgets are checked to see if they can now run.
|
|
8105
|
-
*/
|
|
8106
|
-
async processGadgetCall(call) {
|
|
8107
|
-
const events = [];
|
|
8108
|
-
events.push({ type: "gadget_call", call });
|
|
8109
|
-
if (call.dependencies.length > 0) {
|
|
8110
|
-
if (call.dependencies.includes(call.invocationId)) {
|
|
8111
|
-
this.logger.warn("Gadget has self-referential dependency (depends on itself)", {
|
|
8112
|
-
gadgetName: call.gadgetName,
|
|
8113
|
-
invocationId: call.invocationId
|
|
8114
|
-
});
|
|
8115
|
-
this.failedInvocations.add(call.invocationId);
|
|
8116
|
-
const skipEvent = {
|
|
8117
|
-
type: "gadget_skipped",
|
|
8118
|
-
gadgetName: call.gadgetName,
|
|
8119
|
-
invocationId: call.invocationId,
|
|
8120
|
-
parameters: call.parameters ?? {},
|
|
8121
|
-
failedDependency: call.invocationId,
|
|
8122
|
-
failedDependencyError: `Gadget "${call.invocationId}" cannot depend on itself (self-referential dependency)`
|
|
8123
|
-
};
|
|
8124
|
-
events.push(skipEvent);
|
|
8125
|
-
return events;
|
|
8126
|
-
}
|
|
8127
|
-
const failedDep = call.dependencies.find((dep) => this.failedInvocations.has(dep));
|
|
8128
|
-
if (failedDep) {
|
|
8129
|
-
const skipEvents = await this.handleFailedDependency(call, failedDep);
|
|
8130
|
-
events.push(...skipEvents);
|
|
8131
|
-
return events;
|
|
8132
|
-
}
|
|
8133
|
-
const unsatisfied = call.dependencies.filter((dep) => !this.completedResults.has(dep));
|
|
8134
|
-
if (unsatisfied.length > 0) {
|
|
8135
|
-
this.logger.debug("Queueing gadget for later - waiting on dependencies", {
|
|
8136
|
-
gadgetName: call.gadgetName,
|
|
8137
|
-
invocationId: call.invocationId,
|
|
8138
|
-
waitingOn: unsatisfied
|
|
8139
|
-
});
|
|
8140
|
-
this.gadgetsAwaitingDependencies.set(call.invocationId, call);
|
|
8141
|
-
return events;
|
|
8142
|
-
}
|
|
8143
|
-
}
|
|
8144
|
-
const executeEvents = await this.executeGadgetWithHooks(call);
|
|
8145
|
-
events.push(...executeEvents);
|
|
8146
|
-
const triggeredEvents = await this.processPendingGadgets();
|
|
8147
|
-
events.push(...triggeredEvents);
|
|
8148
|
-
return events;
|
|
8149
|
-
}
|
|
8150
8259
|
/**
|
|
8151
8260
|
* Process a gadget call, yielding events in real-time.
|
|
8152
8261
|
*
|
|
8153
|
-
*
|
|
8154
|
-
*
|
|
8262
|
+
* Yields gadget_call event IMMEDIATELY when parsed (before execution),
|
|
8263
|
+
* enabling real-time UI feedback.
|
|
8155
8264
|
*/
|
|
8156
8265
|
async *processGadgetCallGenerator(call) {
|
|
8157
8266
|
yield { type: "gadget_call", call };
|
|
@@ -8182,7 +8291,9 @@ var init_stream_processor = __esm({
|
|
|
8182
8291
|
yield skipEvent;
|
|
8183
8292
|
return;
|
|
8184
8293
|
}
|
|
8185
|
-
const failedDep = call.dependencies.find(
|
|
8294
|
+
const failedDep = call.dependencies.find(
|
|
8295
|
+
(dep) => this.failedInvocations.has(dep) || this.priorFailedInvocations.has(dep)
|
|
8296
|
+
);
|
|
8186
8297
|
if (failedDep) {
|
|
8187
8298
|
const skipEvents = await this.handleFailedDependency(call, failedDep);
|
|
8188
8299
|
for (const evt of skipEvents) {
|
|
@@ -8190,7 +8301,9 @@ var init_stream_processor = __esm({
|
|
|
8190
8301
|
}
|
|
8191
8302
|
return;
|
|
8192
8303
|
}
|
|
8193
|
-
const unsatisfied = call.dependencies.filter(
|
|
8304
|
+
const unsatisfied = call.dependencies.filter(
|
|
8305
|
+
(dep) => !this.completedResults.has(dep) && !this.priorCompletedInvocations.has(dep)
|
|
8306
|
+
);
|
|
8194
8307
|
if (unsatisfied.length > 0) {
|
|
8195
8308
|
this.logger.debug("Queueing gadget for later - waiting on dependencies", {
|
|
8196
8309
|
gadgetName: call.gadgetName,
|
|
@@ -8212,12 +8325,11 @@ var init_stream_processor = __esm({
|
|
|
8212
8325
|
this.inFlightExecutions.set(call.invocationId, executionPromise);
|
|
8213
8326
|
}
|
|
8214
8327
|
/**
|
|
8215
|
-
* Execute a gadget through the full hook lifecycle.
|
|
8216
|
-
*
|
|
8217
|
-
*
|
|
8328
|
+
* Execute a gadget through the full hook lifecycle and yield events.
|
|
8329
|
+
* Handles parameter interception, before/after controllers, observers,
|
|
8330
|
+
* execution, result interception, and tree tracking.
|
|
8218
8331
|
*/
|
|
8219
|
-
async
|
|
8220
|
-
const events = [];
|
|
8332
|
+
async *executeGadgetGenerator(call) {
|
|
8221
8333
|
if (call.parseError) {
|
|
8222
8334
|
this.logger.warn("Gadget has parse error", {
|
|
8223
8335
|
gadgetName: call.gadgetName,
|
|
@@ -8270,6 +8382,12 @@ var init_stream_processor = __esm({
|
|
|
8270
8382
|
});
|
|
8271
8383
|
}
|
|
8272
8384
|
await this.runObserversInParallel(startObservers);
|
|
8385
|
+
if (this.tree) {
|
|
8386
|
+
const gadgetNode = this.tree.getNodeByInvocationId(call.invocationId);
|
|
8387
|
+
if (gadgetNode) {
|
|
8388
|
+
this.tree.startGadget(gadgetNode.id);
|
|
8389
|
+
}
|
|
8390
|
+
}
|
|
8273
8391
|
let result;
|
|
8274
8392
|
if (shouldSkip) {
|
|
8275
8393
|
result = {
|
|
@@ -8339,256 +8457,117 @@ var init_stream_processor = __esm({
|
|
|
8339
8457
|
});
|
|
8340
8458
|
}
|
|
8341
8459
|
await this.runObserversInParallel(completeObservers);
|
|
8460
|
+
if (this.tree) {
|
|
8461
|
+
const gadgetNode = this.tree.getNodeByInvocationId(result.invocationId);
|
|
8462
|
+
if (gadgetNode) {
|
|
8463
|
+
if (result.error) {
|
|
8464
|
+
this.tree.completeGadget(gadgetNode.id, {
|
|
8465
|
+
error: result.error,
|
|
8466
|
+
executionTimeMs: result.executionTimeMs,
|
|
8467
|
+
cost: result.cost
|
|
8468
|
+
});
|
|
8469
|
+
} else {
|
|
8470
|
+
this.tree.completeGadget(gadgetNode.id, {
|
|
8471
|
+
result: result.result,
|
|
8472
|
+
executionTimeMs: result.executionTimeMs,
|
|
8473
|
+
cost: result.cost,
|
|
8474
|
+
media: result.media
|
|
8475
|
+
});
|
|
8476
|
+
}
|
|
8477
|
+
}
|
|
8478
|
+
}
|
|
8342
8479
|
this.completedResults.set(result.invocationId, result);
|
|
8343
8480
|
if (result.error) {
|
|
8344
8481
|
this.failedInvocations.add(result.invocationId);
|
|
8345
8482
|
}
|
|
8346
|
-
|
|
8347
|
-
|
|
8483
|
+
yield { type: "gadget_result", result };
|
|
8484
|
+
}
|
|
8485
|
+
/**
|
|
8486
|
+
* Execute a gadget and push events to the completed results queue (non-blocking).
|
|
8487
|
+
* Used for fire-and-forget parallel execution of independent gadgets.
|
|
8488
|
+
* Results are pushed to completedResultsQueue for real-time streaming to the caller.
|
|
8489
|
+
*/
|
|
8490
|
+
async executeGadgetAndCollect(call) {
|
|
8491
|
+
for await (const evt of this.executeGadgetGenerator(call)) {
|
|
8492
|
+
this.completedResultsQueue.push(evt);
|
|
8493
|
+
}
|
|
8348
8494
|
}
|
|
8349
8495
|
/**
|
|
8350
|
-
*
|
|
8351
|
-
*
|
|
8496
|
+
* Drain all completed results from the queue.
|
|
8497
|
+
* Used to yield results as they complete during stream processing.
|
|
8498
|
+
* @returns Generator that yields all events currently in the queue
|
|
8352
8499
|
*/
|
|
8353
|
-
|
|
8354
|
-
|
|
8355
|
-
this.
|
|
8356
|
-
gadgetName: call.gadgetName,
|
|
8357
|
-
error: call.parseError,
|
|
8358
|
-
rawParameters: call.parametersRaw
|
|
8359
|
-
});
|
|
8500
|
+
*drainCompletedResults() {
|
|
8501
|
+
while (this.completedResultsQueue.length > 0) {
|
|
8502
|
+
yield this.completedResultsQueue.shift();
|
|
8360
8503
|
}
|
|
8361
|
-
|
|
8362
|
-
|
|
8504
|
+
}
|
|
8505
|
+
/**
|
|
8506
|
+
* Wait for all in-flight gadget executions to complete, yielding events in real-time.
|
|
8507
|
+
* Called at stream end to ensure all parallel executions finish.
|
|
8508
|
+
* Results and subagent events are pushed to completedResultsQueue during execution.
|
|
8509
|
+
* This generator yields queued events while polling, enabling real-time display
|
|
8510
|
+
* of subagent activity (LLM calls, nested gadgets) during long-running gadgets.
|
|
8511
|
+
* Clears the inFlightExecutions map after all gadgets complete.
|
|
8512
|
+
*/
|
|
8513
|
+
async *waitForInFlightExecutions() {
|
|
8514
|
+
if (this.inFlightExecutions.size === 0) {
|
|
8515
|
+
return;
|
|
8516
|
+
}
|
|
8517
|
+
this.logger.debug("Waiting for in-flight gadget executions", {
|
|
8518
|
+
count: this.inFlightExecutions.size,
|
|
8519
|
+
invocationIds: Array.from(this.inFlightExecutions.keys())
|
|
8520
|
+
});
|
|
8521
|
+
const allDone = Promise.all(this.inFlightExecutions.values()).then(() => "done");
|
|
8522
|
+
const POLL_INTERVAL_MS = 100;
|
|
8523
|
+
while (true) {
|
|
8524
|
+
const result = await Promise.race([
|
|
8525
|
+
allDone,
|
|
8526
|
+
new Promise((resolve) => setTimeout(() => resolve("poll"), POLL_INTERVAL_MS))
|
|
8527
|
+
]);
|
|
8528
|
+
yield* this.drainCompletedResults();
|
|
8529
|
+
if (result === "done") {
|
|
8530
|
+
break;
|
|
8531
|
+
}
|
|
8532
|
+
}
|
|
8533
|
+
this.inFlightExecutions.clear();
|
|
8534
|
+
}
|
|
8535
|
+
/**
|
|
8536
|
+
* Handle a gadget that cannot execute because a dependency failed.
|
|
8537
|
+
* Calls the onDependencySkipped controller to allow customization.
|
|
8538
|
+
*/
|
|
8539
|
+
async handleFailedDependency(call, failedDep) {
|
|
8540
|
+
const events = [];
|
|
8541
|
+
const depResult = this.completedResults.get(failedDep);
|
|
8542
|
+
const depError = depResult?.error ?? "Dependency failed";
|
|
8543
|
+
let action = { action: "skip" };
|
|
8544
|
+
if (this.hooks.controllers?.onDependencySkipped) {
|
|
8363
8545
|
const context = {
|
|
8364
8546
|
iteration: this.iteration,
|
|
8365
8547
|
gadgetName: call.gadgetName,
|
|
8366
8548
|
invocationId: call.invocationId,
|
|
8549
|
+
parameters: call.parameters ?? {},
|
|
8550
|
+
failedDependency: failedDep,
|
|
8551
|
+
failedDependencyError: depError,
|
|
8367
8552
|
logger: this.logger
|
|
8368
8553
|
};
|
|
8369
|
-
|
|
8554
|
+
action = await this.hooks.controllers.onDependencySkipped(context);
|
|
8370
8555
|
}
|
|
8371
|
-
|
|
8372
|
-
|
|
8373
|
-
|
|
8374
|
-
|
|
8375
|
-
|
|
8376
|
-
|
|
8556
|
+
if (action.action === "skip") {
|
|
8557
|
+
this.failedInvocations.add(call.invocationId);
|
|
8558
|
+
if (this.tree) {
|
|
8559
|
+
const gadgetNode = this.tree.getNodeByInvocationId(call.invocationId);
|
|
8560
|
+
if (gadgetNode) {
|
|
8561
|
+
this.tree.skipGadget(gadgetNode.id, failedDep, depError, "dependency_failed");
|
|
8562
|
+
}
|
|
8563
|
+
}
|
|
8564
|
+
const skipEvent = {
|
|
8565
|
+
type: "gadget_skipped",
|
|
8377
8566
|
gadgetName: call.gadgetName,
|
|
8378
8567
|
invocationId: call.invocationId,
|
|
8379
|
-
parameters,
|
|
8380
|
-
|
|
8381
|
-
|
|
8382
|
-
const action = await this.hooks.controllers.beforeGadgetExecution(context);
|
|
8383
|
-
validateBeforeGadgetExecutionAction(action);
|
|
8384
|
-
if (action.action === "skip") {
|
|
8385
|
-
shouldSkip = true;
|
|
8386
|
-
syntheticResult = action.syntheticResult;
|
|
8387
|
-
this.logger.info("Controller skipped gadget execution", {
|
|
8388
|
-
gadgetName: call.gadgetName
|
|
8389
|
-
});
|
|
8390
|
-
}
|
|
8391
|
-
}
|
|
8392
|
-
const startObservers = [];
|
|
8393
|
-
if (this.hooks.observers?.onGadgetExecutionStart) {
|
|
8394
|
-
startObservers.push(async () => {
|
|
8395
|
-
const context = {
|
|
8396
|
-
iteration: this.iteration,
|
|
8397
|
-
gadgetName: call.gadgetName,
|
|
8398
|
-
invocationId: call.invocationId,
|
|
8399
|
-
parameters,
|
|
8400
|
-
logger: this.logger
|
|
8401
|
-
};
|
|
8402
|
-
await this.hooks.observers?.onGadgetExecutionStart?.(context);
|
|
8403
|
-
});
|
|
8404
|
-
}
|
|
8405
|
-
await this.runObserversInParallel(startObservers);
|
|
8406
|
-
if (this.tree) {
|
|
8407
|
-
const gadgetNode = this.tree.getNodeByInvocationId(call.invocationId);
|
|
8408
|
-
if (gadgetNode) {
|
|
8409
|
-
this.tree.startGadget(gadgetNode.id);
|
|
8410
|
-
}
|
|
8411
|
-
}
|
|
8412
|
-
let result;
|
|
8413
|
-
if (shouldSkip) {
|
|
8414
|
-
result = {
|
|
8415
|
-
gadgetName: call.gadgetName,
|
|
8416
|
-
invocationId: call.invocationId,
|
|
8417
|
-
parameters,
|
|
8418
|
-
result: syntheticResult ?? "Execution skipped",
|
|
8419
|
-
executionTimeMs: 0
|
|
8420
|
-
};
|
|
8421
|
-
} else {
|
|
8422
|
-
result = await this.executor.execute(call);
|
|
8423
|
-
}
|
|
8424
|
-
const originalResult = result.result;
|
|
8425
|
-
if (result.result && this.hooks.interceptors?.interceptGadgetResult) {
|
|
8426
|
-
const context = {
|
|
8427
|
-
iteration: this.iteration,
|
|
8428
|
-
gadgetName: result.gadgetName,
|
|
8429
|
-
invocationId: result.invocationId,
|
|
8430
|
-
parameters,
|
|
8431
|
-
executionTimeMs: result.executionTimeMs,
|
|
8432
|
-
logger: this.logger
|
|
8433
|
-
};
|
|
8434
|
-
result.result = this.hooks.interceptors.interceptGadgetResult(result.result, context);
|
|
8435
|
-
}
|
|
8436
|
-
if (this.hooks.controllers?.afterGadgetExecution) {
|
|
8437
|
-
const context = {
|
|
8438
|
-
iteration: this.iteration,
|
|
8439
|
-
gadgetName: result.gadgetName,
|
|
8440
|
-
invocationId: result.invocationId,
|
|
8441
|
-
parameters,
|
|
8442
|
-
result: result.result,
|
|
8443
|
-
error: result.error,
|
|
8444
|
-
executionTimeMs: result.executionTimeMs,
|
|
8445
|
-
logger: this.logger
|
|
8446
|
-
};
|
|
8447
|
-
const action = await this.hooks.controllers.afterGadgetExecution(context);
|
|
8448
|
-
validateAfterGadgetExecutionAction(action);
|
|
8449
|
-
if (action.action === "recover" && result.error) {
|
|
8450
|
-
this.logger.info("Controller recovered from gadget error", {
|
|
8451
|
-
gadgetName: result.gadgetName,
|
|
8452
|
-
originalError: result.error
|
|
8453
|
-
});
|
|
8454
|
-
result = {
|
|
8455
|
-
...result,
|
|
8456
|
-
error: void 0,
|
|
8457
|
-
result: action.fallbackResult
|
|
8458
|
-
};
|
|
8459
|
-
}
|
|
8460
|
-
}
|
|
8461
|
-
const completeObservers = [];
|
|
8462
|
-
if (this.hooks.observers?.onGadgetExecutionComplete) {
|
|
8463
|
-
completeObservers.push(async () => {
|
|
8464
|
-
const context = {
|
|
8465
|
-
iteration: this.iteration,
|
|
8466
|
-
gadgetName: result.gadgetName,
|
|
8467
|
-
invocationId: result.invocationId,
|
|
8468
|
-
parameters,
|
|
8469
|
-
originalResult,
|
|
8470
|
-
finalResult: result.result,
|
|
8471
|
-
error: result.error,
|
|
8472
|
-
executionTimeMs: result.executionTimeMs,
|
|
8473
|
-
breaksLoop: result.breaksLoop,
|
|
8474
|
-
cost: result.cost,
|
|
8475
|
-
logger: this.logger
|
|
8476
|
-
};
|
|
8477
|
-
await this.hooks.observers?.onGadgetExecutionComplete?.(context);
|
|
8478
|
-
});
|
|
8479
|
-
}
|
|
8480
|
-
await this.runObserversInParallel(completeObservers);
|
|
8481
|
-
if (this.tree) {
|
|
8482
|
-
const gadgetNode = this.tree.getNodeByInvocationId(result.invocationId);
|
|
8483
|
-
if (gadgetNode) {
|
|
8484
|
-
if (result.error) {
|
|
8485
|
-
this.tree.completeGadget(gadgetNode.id, {
|
|
8486
|
-
error: result.error,
|
|
8487
|
-
executionTimeMs: result.executionTimeMs,
|
|
8488
|
-
cost: result.cost
|
|
8489
|
-
});
|
|
8490
|
-
} else {
|
|
8491
|
-
this.tree.completeGadget(gadgetNode.id, {
|
|
8492
|
-
result: result.result,
|
|
8493
|
-
executionTimeMs: result.executionTimeMs,
|
|
8494
|
-
cost: result.cost,
|
|
8495
|
-
media: result.media
|
|
8496
|
-
});
|
|
8497
|
-
}
|
|
8498
|
-
}
|
|
8499
|
-
}
|
|
8500
|
-
this.completedResults.set(result.invocationId, result);
|
|
8501
|
-
if (result.error) {
|
|
8502
|
-
this.failedInvocations.add(result.invocationId);
|
|
8503
|
-
}
|
|
8504
|
-
yield { type: "gadget_result", result };
|
|
8505
|
-
}
|
|
8506
|
-
/**
|
|
8507
|
-
* Execute a gadget and push events to the completed results queue (non-blocking).
|
|
8508
|
-
* Used for fire-and-forget parallel execution of independent gadgets.
|
|
8509
|
-
* Results are pushed to completedResultsQueue for real-time streaming to the caller.
|
|
8510
|
-
*/
|
|
8511
|
-
async executeGadgetAndCollect(call) {
|
|
8512
|
-
for await (const evt of this.executeGadgetGenerator(call)) {
|
|
8513
|
-
this.completedResultsQueue.push(evt);
|
|
8514
|
-
}
|
|
8515
|
-
}
|
|
8516
|
-
/**
|
|
8517
|
-
* Drain all completed results from the queue.
|
|
8518
|
-
* Used to yield results as they complete during stream processing.
|
|
8519
|
-
* @returns Generator that yields all events currently in the queue
|
|
8520
|
-
*/
|
|
8521
|
-
*drainCompletedResults() {
|
|
8522
|
-
while (this.completedResultsQueue.length > 0) {
|
|
8523
|
-
yield this.completedResultsQueue.shift();
|
|
8524
|
-
}
|
|
8525
|
-
}
|
|
8526
|
-
/**
|
|
8527
|
-
* Wait for all in-flight gadget executions to complete, yielding events in real-time.
|
|
8528
|
-
* Called at stream end to ensure all parallel executions finish.
|
|
8529
|
-
* Results and subagent events are pushed to completedResultsQueue during execution.
|
|
8530
|
-
* This generator yields queued events while polling, enabling real-time display
|
|
8531
|
-
* of subagent activity (LLM calls, nested gadgets) during long-running gadgets.
|
|
8532
|
-
* Clears the inFlightExecutions map after all gadgets complete.
|
|
8533
|
-
*/
|
|
8534
|
-
async *waitForInFlightExecutions() {
|
|
8535
|
-
if (this.inFlightExecutions.size === 0) {
|
|
8536
|
-
return;
|
|
8537
|
-
}
|
|
8538
|
-
this.logger.debug("Waiting for in-flight gadget executions", {
|
|
8539
|
-
count: this.inFlightExecutions.size,
|
|
8540
|
-
invocationIds: Array.from(this.inFlightExecutions.keys())
|
|
8541
|
-
});
|
|
8542
|
-
const allDone = Promise.all(this.inFlightExecutions.values()).then(() => "done");
|
|
8543
|
-
const POLL_INTERVAL_MS = 100;
|
|
8544
|
-
while (true) {
|
|
8545
|
-
const result = await Promise.race([
|
|
8546
|
-
allDone,
|
|
8547
|
-
new Promise((resolve) => setTimeout(() => resolve("poll"), POLL_INTERVAL_MS))
|
|
8548
|
-
]);
|
|
8549
|
-
yield* this.drainCompletedResults();
|
|
8550
|
-
if (result === "done") {
|
|
8551
|
-
break;
|
|
8552
|
-
}
|
|
8553
|
-
}
|
|
8554
|
-
this.inFlightExecutions.clear();
|
|
8555
|
-
}
|
|
8556
|
-
/**
|
|
8557
|
-
* Handle a gadget that cannot execute because a dependency failed.
|
|
8558
|
-
* Calls the onDependencySkipped controller to allow customization.
|
|
8559
|
-
*/
|
|
8560
|
-
async handleFailedDependency(call, failedDep) {
|
|
8561
|
-
const events = [];
|
|
8562
|
-
const depResult = this.completedResults.get(failedDep);
|
|
8563
|
-
const depError = depResult?.error ?? "Dependency failed";
|
|
8564
|
-
let action = { action: "skip" };
|
|
8565
|
-
if (this.hooks.controllers?.onDependencySkipped) {
|
|
8566
|
-
const context = {
|
|
8567
|
-
iteration: this.iteration,
|
|
8568
|
-
gadgetName: call.gadgetName,
|
|
8569
|
-
invocationId: call.invocationId,
|
|
8570
|
-
parameters: call.parameters ?? {},
|
|
8571
|
-
failedDependency: failedDep,
|
|
8572
|
-
failedDependencyError: depError,
|
|
8573
|
-
logger: this.logger
|
|
8574
|
-
};
|
|
8575
|
-
action = await this.hooks.controllers.onDependencySkipped(context);
|
|
8576
|
-
}
|
|
8577
|
-
if (action.action === "skip") {
|
|
8578
|
-
this.failedInvocations.add(call.invocationId);
|
|
8579
|
-
if (this.tree) {
|
|
8580
|
-
const gadgetNode = this.tree.getNodeByInvocationId(call.invocationId);
|
|
8581
|
-
if (gadgetNode) {
|
|
8582
|
-
this.tree.skipGadget(gadgetNode.id, failedDep, depError, "dependency_failed");
|
|
8583
|
-
}
|
|
8584
|
-
}
|
|
8585
|
-
const skipEvent = {
|
|
8586
|
-
type: "gadget_skipped",
|
|
8587
|
-
gadgetName: call.gadgetName,
|
|
8588
|
-
invocationId: call.invocationId,
|
|
8589
|
-
parameters: call.parameters ?? {},
|
|
8590
|
-
failedDependency: failedDep,
|
|
8591
|
-
failedDependencyError: depError
|
|
8568
|
+
parameters: call.parameters ?? {},
|
|
8569
|
+
failedDependency: failedDep,
|
|
8570
|
+
failedDependencyError: depError
|
|
8592
8571
|
};
|
|
8593
8572
|
events.push(skipEvent);
|
|
8594
8573
|
if (this.hooks.observers?.onGadgetSkipped) {
|
|
@@ -8614,8 +8593,9 @@ var init_stream_processor = __esm({
|
|
|
8614
8593
|
invocationId: call.invocationId,
|
|
8615
8594
|
failedDependency: failedDep
|
|
8616
8595
|
});
|
|
8617
|
-
const
|
|
8618
|
-
|
|
8596
|
+
for await (const evt of this.executeGadgetGenerator(call)) {
|
|
8597
|
+
events.push(evt);
|
|
8598
|
+
}
|
|
8619
8599
|
} else if (action.action === "use_fallback") {
|
|
8620
8600
|
const fallbackResult = {
|
|
8621
8601
|
gadgetName: call.gadgetName,
|
|
@@ -8636,22 +8616,28 @@ var init_stream_processor = __esm({
|
|
|
8636
8616
|
}
|
|
8637
8617
|
/**
|
|
8638
8618
|
* Process pending gadgets whose dependencies are now satisfied.
|
|
8639
|
-
*
|
|
8619
|
+
* Yields events in real-time as gadgets complete.
|
|
8620
|
+
*
|
|
8621
|
+
* Gadgets are executed in parallel for efficiency,
|
|
8622
|
+
* but results are yielded as they become available.
|
|
8640
8623
|
*/
|
|
8641
|
-
async
|
|
8642
|
-
const events = [];
|
|
8624
|
+
async *processPendingGadgetsGenerator() {
|
|
8643
8625
|
let progress = true;
|
|
8644
8626
|
while (progress && this.gadgetsAwaitingDependencies.size > 0) {
|
|
8645
8627
|
progress = false;
|
|
8646
8628
|
const readyToExecute = [];
|
|
8647
8629
|
const readyToSkip = [];
|
|
8648
|
-
for (const [
|
|
8649
|
-
const failedDep = call.dependencies.find(
|
|
8630
|
+
for (const [_invocationId, call] of this.gadgetsAwaitingDependencies) {
|
|
8631
|
+
const failedDep = call.dependencies.find(
|
|
8632
|
+
(dep) => this.failedInvocations.has(dep) || this.priorFailedInvocations.has(dep)
|
|
8633
|
+
);
|
|
8650
8634
|
if (failedDep) {
|
|
8651
8635
|
readyToSkip.push({ call, failedDep });
|
|
8652
8636
|
continue;
|
|
8653
8637
|
}
|
|
8654
|
-
const allSatisfied = call.dependencies.every(
|
|
8638
|
+
const allSatisfied = call.dependencies.every(
|
|
8639
|
+
(dep) => this.completedResults.has(dep) || this.priorCompletedInvocations.has(dep)
|
|
8640
|
+
);
|
|
8655
8641
|
if (allSatisfied) {
|
|
8656
8642
|
readyToExecute.push(call);
|
|
8657
8643
|
}
|
|
@@ -8659,7 +8645,9 @@ var init_stream_processor = __esm({
|
|
|
8659
8645
|
for (const { call, failedDep } of readyToSkip) {
|
|
8660
8646
|
this.gadgetsAwaitingDependencies.delete(call.invocationId);
|
|
8661
8647
|
const skipEvents = await this.handleFailedDependency(call, failedDep);
|
|
8662
|
-
|
|
8648
|
+
for (const evt of skipEvents) {
|
|
8649
|
+
yield evt;
|
|
8650
|
+
}
|
|
8663
8651
|
progress = true;
|
|
8664
8652
|
}
|
|
8665
8653
|
if (readyToExecute.length > 0) {
|
|
@@ -8670,10 +8658,19 @@ var init_stream_processor = __esm({
|
|
|
8670
8658
|
for (const call of readyToExecute) {
|
|
8671
8659
|
this.gadgetsAwaitingDependencies.delete(call.invocationId);
|
|
8672
8660
|
}
|
|
8673
|
-
const
|
|
8674
|
-
|
|
8675
|
-
|
|
8676
|
-
|
|
8661
|
+
const eventSets = await Promise.all(
|
|
8662
|
+
readyToExecute.map(async (call) => {
|
|
8663
|
+
const events = [];
|
|
8664
|
+
for await (const evt of this.executeGadgetGenerator(call)) {
|
|
8665
|
+
events.push(evt);
|
|
8666
|
+
}
|
|
8667
|
+
return events;
|
|
8668
|
+
})
|
|
8669
|
+
);
|
|
8670
|
+
for (const events of eventSets) {
|
|
8671
|
+
for (const evt of events) {
|
|
8672
|
+
yield evt;
|
|
8673
|
+
}
|
|
8677
8674
|
}
|
|
8678
8675
|
progress = true;
|
|
8679
8676
|
}
|
|
@@ -8681,7 +8678,9 @@ var init_stream_processor = __esm({
|
|
|
8681
8678
|
if (this.gadgetsAwaitingDependencies.size > 0) {
|
|
8682
8679
|
const pendingIds = new Set(this.gadgetsAwaitingDependencies.keys());
|
|
8683
8680
|
for (const [invocationId, call] of this.gadgetsAwaitingDependencies) {
|
|
8684
|
-
const missingDeps = call.dependencies.filter(
|
|
8681
|
+
const missingDeps = call.dependencies.filter(
|
|
8682
|
+
(dep) => !this.completedResults.has(dep) && !this.priorCompletedInvocations.has(dep)
|
|
8683
|
+
);
|
|
8685
8684
|
const circularDeps = missingDeps.filter((dep) => pendingIds.has(dep));
|
|
8686
8685
|
const trulyMissingDeps = missingDeps.filter((dep) => !pendingIds.has(dep));
|
|
8687
8686
|
let errorMessage;
|
|
@@ -8709,108 +8708,14 @@ var init_stream_processor = __esm({
|
|
|
8709
8708
|
failedDependency: missingDeps[0],
|
|
8710
8709
|
failedDependencyError: errorMessage
|
|
8711
8710
|
};
|
|
8712
|
-
|
|
8711
|
+
yield skipEvent;
|
|
8713
8712
|
}
|
|
8714
8713
|
this.gadgetsAwaitingDependencies.clear();
|
|
8715
8714
|
}
|
|
8716
|
-
return events;
|
|
8717
8715
|
}
|
|
8718
8716
|
/**
|
|
8719
|
-
*
|
|
8720
|
-
*
|
|
8721
|
-
*
|
|
8722
|
-
* Note: Gadgets are still executed in parallel for efficiency,
|
|
8723
|
-
* but results are yielded as they become available.
|
|
8724
|
-
*/
|
|
8725
|
-
async *processPendingGadgetsGenerator() {
|
|
8726
|
-
let progress = true;
|
|
8727
|
-
while (progress && this.gadgetsAwaitingDependencies.size > 0) {
|
|
8728
|
-
progress = false;
|
|
8729
|
-
const readyToExecute = [];
|
|
8730
|
-
const readyToSkip = [];
|
|
8731
|
-
for (const [_invocationId, call] of this.gadgetsAwaitingDependencies) {
|
|
8732
|
-
const failedDep = call.dependencies.find((dep) => this.failedInvocations.has(dep));
|
|
8733
|
-
if (failedDep) {
|
|
8734
|
-
readyToSkip.push({ call, failedDep });
|
|
8735
|
-
continue;
|
|
8736
|
-
}
|
|
8737
|
-
const allSatisfied = call.dependencies.every((dep) => this.completedResults.has(dep));
|
|
8738
|
-
if (allSatisfied) {
|
|
8739
|
-
readyToExecute.push(call);
|
|
8740
|
-
}
|
|
8741
|
-
}
|
|
8742
|
-
for (const { call, failedDep } of readyToSkip) {
|
|
8743
|
-
this.gadgetsAwaitingDependencies.delete(call.invocationId);
|
|
8744
|
-
const skipEvents = await this.handleFailedDependency(call, failedDep);
|
|
8745
|
-
for (const evt of skipEvents) {
|
|
8746
|
-
yield evt;
|
|
8747
|
-
}
|
|
8748
|
-
progress = true;
|
|
8749
|
-
}
|
|
8750
|
-
if (readyToExecute.length > 0) {
|
|
8751
|
-
this.logger.debug("Executing ready gadgets in parallel", {
|
|
8752
|
-
count: readyToExecute.length,
|
|
8753
|
-
invocationIds: readyToExecute.map((c) => c.invocationId)
|
|
8754
|
-
});
|
|
8755
|
-
for (const call of readyToExecute) {
|
|
8756
|
-
this.gadgetsAwaitingDependencies.delete(call.invocationId);
|
|
8757
|
-
}
|
|
8758
|
-
const eventSets = await Promise.all(
|
|
8759
|
-
readyToExecute.map(async (call) => {
|
|
8760
|
-
const events = [];
|
|
8761
|
-
for await (const evt of this.executeGadgetGenerator(call)) {
|
|
8762
|
-
events.push(evt);
|
|
8763
|
-
}
|
|
8764
|
-
return events;
|
|
8765
|
-
})
|
|
8766
|
-
);
|
|
8767
|
-
for (const events of eventSets) {
|
|
8768
|
-
for (const evt of events) {
|
|
8769
|
-
yield evt;
|
|
8770
|
-
}
|
|
8771
|
-
}
|
|
8772
|
-
progress = true;
|
|
8773
|
-
}
|
|
8774
|
-
}
|
|
8775
|
-
if (this.gadgetsAwaitingDependencies.size > 0) {
|
|
8776
|
-
const pendingIds = new Set(this.gadgetsAwaitingDependencies.keys());
|
|
8777
|
-
for (const [invocationId, call] of this.gadgetsAwaitingDependencies) {
|
|
8778
|
-
const missingDeps = call.dependencies.filter((dep) => !this.completedResults.has(dep));
|
|
8779
|
-
const circularDeps = missingDeps.filter((dep) => pendingIds.has(dep));
|
|
8780
|
-
const trulyMissingDeps = missingDeps.filter((dep) => !pendingIds.has(dep));
|
|
8781
|
-
let errorMessage;
|
|
8782
|
-
let logLevel = "warn";
|
|
8783
|
-
if (circularDeps.length > 0 && trulyMissingDeps.length > 0) {
|
|
8784
|
-
errorMessage = `Dependencies unresolvable: circular=[${circularDeps.join(", ")}], missing=[${trulyMissingDeps.join(", ")}]`;
|
|
8785
|
-
logLevel = "error";
|
|
8786
|
-
} else if (circularDeps.length > 0) {
|
|
8787
|
-
errorMessage = `Circular dependency detected: "${invocationId}" depends on "${circularDeps[0]}" which also depends on "${invocationId}" (directly or indirectly)`;
|
|
8788
|
-
} else {
|
|
8789
|
-
errorMessage = `Dependency "${missingDeps[0]}" was never executed - check that the invocation ID exists and is spelled correctly`;
|
|
8790
|
-
}
|
|
8791
|
-
this.logger[logLevel]("Gadget has unresolvable dependencies", {
|
|
8792
|
-
gadgetName: call.gadgetName,
|
|
8793
|
-
invocationId,
|
|
8794
|
-
circularDependencies: circularDeps,
|
|
8795
|
-
missingDependencies: trulyMissingDeps
|
|
8796
|
-
});
|
|
8797
|
-
this.failedInvocations.add(invocationId);
|
|
8798
|
-
const skipEvent = {
|
|
8799
|
-
type: "gadget_skipped",
|
|
8800
|
-
gadgetName: call.gadgetName,
|
|
8801
|
-
invocationId,
|
|
8802
|
-
parameters: call.parameters ?? {},
|
|
8803
|
-
failedDependency: missingDeps[0],
|
|
8804
|
-
failedDependencyError: errorMessage
|
|
8805
|
-
};
|
|
8806
|
-
yield skipEvent;
|
|
8807
|
-
}
|
|
8808
|
-
this.gadgetsAwaitingDependencies.clear();
|
|
8809
|
-
}
|
|
8810
|
-
}
|
|
8811
|
-
/**
|
|
8812
|
-
* Safely execute an observer, catching and logging any errors.
|
|
8813
|
-
* Observers are non-critical, so errors are logged but don't crash the system.
|
|
8717
|
+
* Safely execute an observer, catching and logging any errors.
|
|
8718
|
+
* Observers are non-critical, so errors are logged but don't crash the system.
|
|
8814
8719
|
*/
|
|
8815
8720
|
async safeObserve(fn) {
|
|
8816
8721
|
try {
|
|
@@ -8829,16 +8734,33 @@ var init_stream_processor = __esm({
|
|
|
8829
8734
|
*/
|
|
8830
8735
|
async runObserversInParallel(observers) {
|
|
8831
8736
|
if (observers.length === 0) return;
|
|
8832
|
-
|
|
8737
|
+
await Promise.allSettled(
|
|
8833
8738
|
observers.map((observer) => this.safeObserve(observer))
|
|
8834
8739
|
);
|
|
8835
8740
|
}
|
|
8741
|
+
// ==========================================================================
|
|
8742
|
+
// Public accessors for cross-iteration dependency tracking
|
|
8743
|
+
// ==========================================================================
|
|
8744
|
+
/**
|
|
8745
|
+
* Get all invocation IDs that completed successfully in this iteration.
|
|
8746
|
+
* Used by Agent to accumulate completed IDs across iterations.
|
|
8747
|
+
*/
|
|
8748
|
+
getCompletedInvocationIds() {
|
|
8749
|
+
return new Set(this.completedResults.keys());
|
|
8750
|
+
}
|
|
8751
|
+
/**
|
|
8752
|
+
* Get all invocation IDs that failed in this iteration.
|
|
8753
|
+
* Used by Agent to accumulate failed IDs across iterations.
|
|
8754
|
+
*/
|
|
8755
|
+
getFailedInvocationIds() {
|
|
8756
|
+
return new Set(this.failedInvocations);
|
|
8757
|
+
}
|
|
8836
8758
|
};
|
|
8837
8759
|
}
|
|
8838
8760
|
});
|
|
8839
8761
|
|
|
8840
8762
|
// src/agent/agent.ts
|
|
8841
|
-
var Agent;
|
|
8763
|
+
var import_p_retry, Agent;
|
|
8842
8764
|
var init_agent = __esm({
|
|
8843
8765
|
"src/agent/agent.ts"() {
|
|
8844
8766
|
"use strict";
|
|
@@ -8850,6 +8772,8 @@ var init_agent = __esm({
|
|
|
8850
8772
|
init_output_viewer();
|
|
8851
8773
|
init_logger();
|
|
8852
8774
|
init_agent_internal_key();
|
|
8775
|
+
init_retry();
|
|
8776
|
+
import_p_retry = __toESM(require("p-retry"), 1);
|
|
8853
8777
|
init_manager();
|
|
8854
8778
|
init_conversation_manager();
|
|
8855
8779
|
init_event_handlers();
|
|
@@ -8884,6 +8808,8 @@ var init_agent = __esm({
|
|
|
8884
8808
|
mediaStore;
|
|
8885
8809
|
// Cancellation
|
|
8886
8810
|
signal;
|
|
8811
|
+
// Retry configuration
|
|
8812
|
+
retryConfig;
|
|
8887
8813
|
// Subagent configuration
|
|
8888
8814
|
agentContextConfig;
|
|
8889
8815
|
subagentConfig;
|
|
@@ -8895,6 +8821,11 @@ var init_agent = __esm({
|
|
|
8895
8821
|
onSubagentEvent;
|
|
8896
8822
|
// Counter for generating synthetic invocation IDs for wrapped text content
|
|
8897
8823
|
syntheticInvocationCounter = 0;
|
|
8824
|
+
// Cross-iteration dependency tracking - allows gadgets to depend on results from prior iterations
|
|
8825
|
+
completedInvocationIds = /* @__PURE__ */ new Set();
|
|
8826
|
+
failedInvocationIds = /* @__PURE__ */ new Set();
|
|
8827
|
+
// Queue for user messages injected during agent execution (REPL mid-session input)
|
|
8828
|
+
pendingUserMessages = [];
|
|
8898
8829
|
// Execution Tree - first-class model for nested subagent support
|
|
8899
8830
|
tree;
|
|
8900
8831
|
parentNodeId;
|
|
@@ -8969,6 +8900,7 @@ var init_agent = __esm({
|
|
|
8969
8900
|
);
|
|
8970
8901
|
}
|
|
8971
8902
|
this.signal = options.signal;
|
|
8903
|
+
this.retryConfig = resolveRetryConfig(options.retryConfig);
|
|
8972
8904
|
this.agentContextConfig = {
|
|
8973
8905
|
model: this.model,
|
|
8974
8906
|
temperature: this.temperature
|
|
@@ -9186,6 +9118,44 @@ var init_agent = __esm({
|
|
|
9186
9118
|
getCompactionStats() {
|
|
9187
9119
|
return this.compactionManager?.getStats() ?? null;
|
|
9188
9120
|
}
|
|
9121
|
+
/**
|
|
9122
|
+
* Get the conversation manager for this agent.
|
|
9123
|
+
* Used by REPL mode to extract session history for continuation.
|
|
9124
|
+
*
|
|
9125
|
+
* @returns The conversation manager containing all messages
|
|
9126
|
+
*
|
|
9127
|
+
* @example
|
|
9128
|
+
* ```typescript
|
|
9129
|
+
* // After running agent, extract history for next session
|
|
9130
|
+
* const history = agent.getConversation().getConversationHistory();
|
|
9131
|
+
* // Pass to next agent via builder.withHistory()
|
|
9132
|
+
* ```
|
|
9133
|
+
*/
|
|
9134
|
+
getConversation() {
|
|
9135
|
+
return this.conversation;
|
|
9136
|
+
}
|
|
9137
|
+
/**
|
|
9138
|
+
* Inject a user message to be processed in the next iteration.
|
|
9139
|
+
* Used by REPL mode to allow user input during a running session.
|
|
9140
|
+
*
|
|
9141
|
+
* The message is queued and will be added to the conversation before
|
|
9142
|
+
* the next LLM call. This allows users to provide additional context
|
|
9143
|
+
* or instructions while the agent is executing.
|
|
9144
|
+
*
|
|
9145
|
+
* @param message - The user message to inject
|
|
9146
|
+
*
|
|
9147
|
+
* @example
|
|
9148
|
+
* ```typescript
|
|
9149
|
+
* // While agent is running in TUI:
|
|
9150
|
+
* tui.onMidSessionInput((msg) => {
|
|
9151
|
+
* agent.injectUserMessage(msg);
|
|
9152
|
+
* });
|
|
9153
|
+
* ```
|
|
9154
|
+
*/
|
|
9155
|
+
injectUserMessage(message) {
|
|
9156
|
+
this.pendingUserMessages.push(message);
|
|
9157
|
+
this.logger.debug("User message queued for injection", { message });
|
|
9158
|
+
}
|
|
9189
9159
|
/**
|
|
9190
9160
|
* Run the agent loop.
|
|
9191
9161
|
* Clean, simple orchestration - all complexity is in StreamProcessor.
|
|
@@ -9204,96 +9174,30 @@ var init_agent = __esm({
|
|
|
9204
9174
|
maxIterations: this.maxIterations
|
|
9205
9175
|
});
|
|
9206
9176
|
while (currentIteration < this.maxIterations) {
|
|
9207
|
-
if (this.
|
|
9208
|
-
|
|
9177
|
+
if (await this.checkAbortAndNotify(currentIteration)) {
|
|
9178
|
+
return;
|
|
9179
|
+
}
|
|
9180
|
+
while (this.pendingUserMessages.length > 0) {
|
|
9181
|
+
const msg = this.pendingUserMessages.shift();
|
|
9182
|
+
this.conversation.addUserMessage(msg);
|
|
9183
|
+
this.logger.info("Injected user message into conversation", {
|
|
9209
9184
|
iteration: currentIteration,
|
|
9210
|
-
|
|
9211
|
-
});
|
|
9212
|
-
await this.safeObserve(async () => {
|
|
9213
|
-
if (this.hooks.observers?.onAbort) {
|
|
9214
|
-
const context = {
|
|
9215
|
-
iteration: currentIteration,
|
|
9216
|
-
reason: this.signal?.reason,
|
|
9217
|
-
logger: this.logger
|
|
9218
|
-
};
|
|
9219
|
-
await this.hooks.observers.onAbort(context);
|
|
9220
|
-
}
|
|
9185
|
+
messageLength: msg.length
|
|
9221
9186
|
});
|
|
9222
|
-
return;
|
|
9223
9187
|
}
|
|
9224
9188
|
this.logger.debug("Starting iteration", { iteration: currentIteration });
|
|
9225
9189
|
try {
|
|
9226
|
-
|
|
9227
|
-
|
|
9228
|
-
|
|
9229
|
-
currentIteration
|
|
9230
|
-
);
|
|
9231
|
-
if (compactionEvent) {
|
|
9232
|
-
this.logger.info("Context compacted", {
|
|
9233
|
-
strategy: compactionEvent.strategy,
|
|
9234
|
-
tokensBefore: compactionEvent.tokensBefore,
|
|
9235
|
-
tokensAfter: compactionEvent.tokensAfter
|
|
9236
|
-
});
|
|
9237
|
-
yield { type: "compaction", event: compactionEvent };
|
|
9238
|
-
await this.safeObserve(async () => {
|
|
9239
|
-
if (this.hooks.observers?.onCompaction) {
|
|
9240
|
-
await this.hooks.observers.onCompaction({
|
|
9241
|
-
iteration: currentIteration,
|
|
9242
|
-
event: compactionEvent,
|
|
9243
|
-
// biome-ignore lint/style/noNonNullAssertion: compactionManager exists if compactionEvent is truthy
|
|
9244
|
-
stats: this.compactionManager.getStats(),
|
|
9245
|
-
logger: this.logger
|
|
9246
|
-
});
|
|
9247
|
-
}
|
|
9248
|
-
});
|
|
9249
|
-
}
|
|
9190
|
+
const compactionEvent = await this.checkAndPerformCompaction(currentIteration);
|
|
9191
|
+
if (compactionEvent) {
|
|
9192
|
+
yield compactionEvent;
|
|
9250
9193
|
}
|
|
9251
|
-
|
|
9252
|
-
|
|
9253
|
-
|
|
9254
|
-
|
|
9255
|
-
|
|
9256
|
-
|
|
9257
|
-
};
|
|
9258
|
-
await this.safeObserve(async () => {
|
|
9259
|
-
if (this.hooks.observers?.onLLMCallStart) {
|
|
9260
|
-
const context = {
|
|
9261
|
-
iteration: currentIteration,
|
|
9262
|
-
options: llmOptions,
|
|
9263
|
-
logger: this.logger
|
|
9264
|
-
};
|
|
9265
|
-
await this.hooks.observers.onLLMCallStart(context);
|
|
9266
|
-
}
|
|
9267
|
-
});
|
|
9268
|
-
if (this.hooks.controllers?.beforeLLMCall) {
|
|
9269
|
-
const context = {
|
|
9270
|
-
iteration: currentIteration,
|
|
9271
|
-
maxIterations: this.maxIterations,
|
|
9272
|
-
options: llmOptions,
|
|
9273
|
-
logger: this.logger
|
|
9274
|
-
};
|
|
9275
|
-
const action = await this.hooks.controllers.beforeLLMCall(context);
|
|
9276
|
-
validateBeforeLLMCallAction(action);
|
|
9277
|
-
if (action.action === "skip") {
|
|
9278
|
-
this.logger.info("Controller skipped LLM call, using synthetic response");
|
|
9279
|
-
this.conversation.addAssistantMessage(action.syntheticResponse);
|
|
9280
|
-
yield { type: "text", content: action.syntheticResponse };
|
|
9281
|
-
break;
|
|
9282
|
-
} else if (action.action === "proceed" && action.modifiedOptions) {
|
|
9283
|
-
llmOptions = { ...llmOptions, ...action.modifiedOptions };
|
|
9284
|
-
}
|
|
9194
|
+
const prepared = await this.prepareLLMCall(currentIteration);
|
|
9195
|
+
const llmOptions = prepared.options;
|
|
9196
|
+
if (prepared.skipWithSynthetic !== void 0) {
|
|
9197
|
+
this.conversation.addAssistantMessage(prepared.skipWithSynthetic);
|
|
9198
|
+
yield { type: "text", content: prepared.skipWithSynthetic };
|
|
9199
|
+
break;
|
|
9285
9200
|
}
|
|
9286
|
-
await this.safeObserve(async () => {
|
|
9287
|
-
if (this.hooks.observers?.onLLMCallReady) {
|
|
9288
|
-
const context = {
|
|
9289
|
-
iteration: currentIteration,
|
|
9290
|
-
maxIterations: this.maxIterations,
|
|
9291
|
-
options: llmOptions,
|
|
9292
|
-
logger: this.logger
|
|
9293
|
-
};
|
|
9294
|
-
await this.hooks.observers.onLLMCallReady(context);
|
|
9295
|
-
}
|
|
9296
|
-
});
|
|
9297
9201
|
this.logger.info("Calling LLM", { model: this.model });
|
|
9298
9202
|
this.logger.silly("LLM request details", {
|
|
9299
9203
|
model: llmOptions.model,
|
|
@@ -9309,7 +9213,7 @@ var init_agent = __esm({
|
|
|
9309
9213
|
request: llmOptions.messages
|
|
9310
9214
|
});
|
|
9311
9215
|
const currentLLMNodeId = llmNode.id;
|
|
9312
|
-
const stream2 = this.
|
|
9216
|
+
const stream2 = await this.createStreamWithRetry(llmOptions, currentIteration);
|
|
9313
9217
|
const processor = new StreamProcessor({
|
|
9314
9218
|
iteration: currentIteration,
|
|
9315
9219
|
registry: this.registry,
|
|
@@ -9329,7 +9233,10 @@ var init_agent = __esm({
|
|
|
9329
9233
|
tree: this.tree,
|
|
9330
9234
|
parentNodeId: currentLLMNodeId,
|
|
9331
9235
|
// Gadgets are children of this LLM call
|
|
9332
|
-
baseDepth: this.baseDepth
|
|
9236
|
+
baseDepth: this.baseDepth,
|
|
9237
|
+
// Cross-iteration dependency tracking
|
|
9238
|
+
priorCompletedInvocations: this.completedInvocationIds,
|
|
9239
|
+
priorFailedInvocations: this.failedInvocationIds
|
|
9333
9240
|
});
|
|
9334
9241
|
let streamMetadata = null;
|
|
9335
9242
|
let gadgetCallCount = 0;
|
|
@@ -9352,6 +9259,12 @@ var init_agent = __esm({
|
|
|
9352
9259
|
if (!streamMetadata) {
|
|
9353
9260
|
throw new Error("Stream processing completed without metadata event");
|
|
9354
9261
|
}
|
|
9262
|
+
for (const id of processor.getCompletedInvocationIds()) {
|
|
9263
|
+
this.completedInvocationIds.add(id);
|
|
9264
|
+
}
|
|
9265
|
+
for (const id of processor.getFailedInvocationIds()) {
|
|
9266
|
+
this.failedInvocationIds.add(id);
|
|
9267
|
+
}
|
|
9355
9268
|
const result = streamMetadata;
|
|
9356
9269
|
this.logger.info("LLM response completed", {
|
|
9357
9270
|
finishReason: result.finishReason,
|
|
@@ -9375,81 +9288,21 @@ var init_agent = __esm({
|
|
|
9375
9288
|
await this.hooks.observers.onLLMCallComplete(context);
|
|
9376
9289
|
}
|
|
9377
9290
|
});
|
|
9378
|
-
this.
|
|
9379
|
-
|
|
9380
|
-
|
|
9381
|
-
|
|
9382
|
-
|
|
9383
|
-
|
|
9384
|
-
|
|
9385
|
-
|
|
9386
|
-
|
|
9387
|
-
|
|
9388
|
-
|
|
9389
|
-
|
|
9390
|
-
|
|
9391
|
-
|
|
9392
|
-
|
|
9393
|
-
logger: this.logger
|
|
9394
|
-
};
|
|
9395
|
-
const action = await this.hooks.controllers.afterLLMCall(context);
|
|
9396
|
-
validateAfterLLMCallAction(action);
|
|
9397
|
-
if (action.action === "modify_and_continue" || action.action === "append_and_modify") {
|
|
9398
|
-
finalMessage = action.modifiedMessage;
|
|
9399
|
-
}
|
|
9400
|
-
if (action.action === "append_messages" || action.action === "append_and_modify") {
|
|
9401
|
-
for (const msg of action.messages) {
|
|
9402
|
-
if (msg.role === "user") {
|
|
9403
|
-
this.conversation.addUserMessage(msg.content);
|
|
9404
|
-
} else if (msg.role === "assistant") {
|
|
9405
|
-
this.conversation.addAssistantMessage(extractMessageText(msg.content));
|
|
9406
|
-
} else if (msg.role === "system") {
|
|
9407
|
-
this.conversation.addUserMessage(`[System] ${extractMessageText(msg.content)}`);
|
|
9408
|
-
}
|
|
9409
|
-
}
|
|
9410
|
-
}
|
|
9411
|
-
}
|
|
9412
|
-
if (result.didExecuteGadgets) {
|
|
9413
|
-
if (this.textWithGadgetsHandler) {
|
|
9414
|
-
const textContent = textOutputs.join("");
|
|
9415
|
-
if (textContent.trim()) {
|
|
9416
|
-
const { gadgetName, parameterMapping, resultMapping } = this.textWithGadgetsHandler;
|
|
9417
|
-
const syntheticId = `gc_text_${++this.syntheticInvocationCounter}`;
|
|
9418
|
-
this.conversation.addGadgetCallResult(
|
|
9419
|
-
gadgetName,
|
|
9420
|
-
parameterMapping(textContent),
|
|
9421
|
-
resultMapping ? resultMapping(textContent) : textContent,
|
|
9422
|
-
syntheticId
|
|
9423
|
-
);
|
|
9424
|
-
}
|
|
9425
|
-
}
|
|
9426
|
-
for (const output of gadgetResults) {
|
|
9427
|
-
if (output.type === "gadget_result") {
|
|
9428
|
-
const gadgetResult = output.result;
|
|
9429
|
-
this.conversation.addGadgetCallResult(
|
|
9430
|
-
gadgetResult.gadgetName,
|
|
9431
|
-
gadgetResult.parameters,
|
|
9432
|
-
gadgetResult.error ?? gadgetResult.result ?? "",
|
|
9433
|
-
gadgetResult.invocationId,
|
|
9434
|
-
gadgetResult.media,
|
|
9435
|
-
gadgetResult.mediaIds
|
|
9436
|
-
);
|
|
9437
|
-
}
|
|
9438
|
-
}
|
|
9439
|
-
} else {
|
|
9440
|
-
if (finalMessage.trim()) {
|
|
9441
|
-
const syntheticId = `gc_tell_${++this.syntheticInvocationCounter}`;
|
|
9442
|
-
this.conversation.addGadgetCallResult(
|
|
9443
|
-
"TellUser",
|
|
9444
|
-
{ message: finalMessage, done: false, type: "info" },
|
|
9445
|
-
`\u2139\uFE0F ${finalMessage}`,
|
|
9446
|
-
syntheticId
|
|
9447
|
-
);
|
|
9448
|
-
}
|
|
9449
|
-
const shouldBreak = await this.handleTextOnlyResponse(finalMessage);
|
|
9450
|
-
if (shouldBreak) {
|
|
9451
|
-
break;
|
|
9452
|
-
}
|
|
9291
|
+
this.completeLLMCallInTree(currentLLMNodeId, result);
|
|
9292
|
+
const finalMessage = await this.processAfterLLMCallController(
|
|
9293
|
+
currentIteration,
|
|
9294
|
+
llmOptions,
|
|
9295
|
+
result,
|
|
9296
|
+
gadgetCallCount
|
|
9297
|
+
);
|
|
9298
|
+
const shouldBreakFromTextOnly = await this.updateConversationWithResults(
|
|
9299
|
+
result.didExecuteGadgets,
|
|
9300
|
+
textOutputs,
|
|
9301
|
+
gadgetResults,
|
|
9302
|
+
finalMessage
|
|
9303
|
+
);
|
|
9304
|
+
if (shouldBreakFromTextOnly) {
|
|
9305
|
+
break;
|
|
9453
9306
|
}
|
|
9454
9307
|
if (result.shouldBreakLoop) {
|
|
9455
9308
|
this.logger.info("Loop terminated by gadget or processor");
|
|
@@ -9485,6 +9338,53 @@ var init_agent = __esm({
|
|
|
9485
9338
|
reason: currentIteration >= this.maxIterations ? "max_iterations" : "natural_completion"
|
|
9486
9339
|
});
|
|
9487
9340
|
}
|
|
9341
|
+
/**
|
|
9342
|
+
* Create LLM stream with retry logic.
|
|
9343
|
+
* Wraps the stream creation with exponential backoff for transient failures.
|
|
9344
|
+
*/
|
|
9345
|
+
async createStreamWithRetry(llmOptions, iteration) {
|
|
9346
|
+
if (!this.retryConfig.enabled) {
|
|
9347
|
+
return this.client.stream(llmOptions);
|
|
9348
|
+
}
|
|
9349
|
+
const { retries, minTimeout, maxTimeout, factor, randomize, onRetry, onRetriesExhausted, shouldRetry } = this.retryConfig;
|
|
9350
|
+
try {
|
|
9351
|
+
return await (0, import_p_retry.default)(
|
|
9352
|
+
async (attemptNumber) => {
|
|
9353
|
+
this.logger.debug("Creating LLM stream", { attempt: attemptNumber, maxAttempts: retries + 1 });
|
|
9354
|
+
return this.client.stream(llmOptions);
|
|
9355
|
+
},
|
|
9356
|
+
{
|
|
9357
|
+
retries,
|
|
9358
|
+
minTimeout,
|
|
9359
|
+
maxTimeout,
|
|
9360
|
+
factor,
|
|
9361
|
+
randomize,
|
|
9362
|
+
signal: this.signal,
|
|
9363
|
+
onFailedAttempt: (context) => {
|
|
9364
|
+
const { error, attemptNumber, retriesLeft } = context;
|
|
9365
|
+
this.logger.warn(
|
|
9366
|
+
`LLM call failed (attempt ${attemptNumber}/${attemptNumber + retriesLeft}), retrying...`,
|
|
9367
|
+
{ error: error.message, retriesLeft }
|
|
9368
|
+
);
|
|
9369
|
+
onRetry?.(error, attemptNumber);
|
|
9370
|
+
},
|
|
9371
|
+
shouldRetry: (context) => {
|
|
9372
|
+
if (shouldRetry) {
|
|
9373
|
+
return shouldRetry(context.error);
|
|
9374
|
+
}
|
|
9375
|
+
return isRetryableError(context.error);
|
|
9376
|
+
}
|
|
9377
|
+
}
|
|
9378
|
+
);
|
|
9379
|
+
} catch (error) {
|
|
9380
|
+
this.logger.error(`LLM call failed after ${retries + 1} attempts`, {
|
|
9381
|
+
error: error.message,
|
|
9382
|
+
iteration
|
|
9383
|
+
});
|
|
9384
|
+
onRetriesExhausted?.(error, retries + 1);
|
|
9385
|
+
throw error;
|
|
9386
|
+
}
|
|
9387
|
+
}
|
|
9488
9388
|
/**
|
|
9489
9389
|
* Handle LLM error through controller.
|
|
9490
9390
|
*/
|
|
@@ -9602,6 +9502,210 @@ var init_agent = __esm({
|
|
|
9602
9502
|
}
|
|
9603
9503
|
};
|
|
9604
9504
|
}
|
|
9505
|
+
// ==========================================================================
|
|
9506
|
+
// Agent Loop Helper Methods (extracted from run() for readability)
|
|
9507
|
+
// ==========================================================================
|
|
9508
|
+
/**
|
|
9509
|
+
* Check abort signal and notify observers if aborted.
|
|
9510
|
+
* @returns true if agent should terminate
|
|
9511
|
+
*/
|
|
9512
|
+
async checkAbortAndNotify(iteration) {
|
|
9513
|
+
if (!this.signal?.aborted) return false;
|
|
9514
|
+
this.logger.info("Agent loop terminated by abort signal", {
|
|
9515
|
+
iteration,
|
|
9516
|
+
reason: this.signal.reason
|
|
9517
|
+
});
|
|
9518
|
+
await this.safeObserve(async () => {
|
|
9519
|
+
if (this.hooks.observers?.onAbort) {
|
|
9520
|
+
const context = {
|
|
9521
|
+
iteration,
|
|
9522
|
+
reason: this.signal?.reason,
|
|
9523
|
+
logger: this.logger
|
|
9524
|
+
};
|
|
9525
|
+
await this.hooks.observers.onAbort(context);
|
|
9526
|
+
}
|
|
9527
|
+
});
|
|
9528
|
+
return true;
|
|
9529
|
+
}
|
|
9530
|
+
/**
|
|
9531
|
+
* Check and perform context compaction if needed.
|
|
9532
|
+
* @returns compaction stream event if compaction occurred, null otherwise
|
|
9533
|
+
*/
|
|
9534
|
+
async checkAndPerformCompaction(iteration) {
|
|
9535
|
+
if (!this.compactionManager) return null;
|
|
9536
|
+
const compactionEvent = await this.compactionManager.checkAndCompact(
|
|
9537
|
+
this.conversation,
|
|
9538
|
+
iteration
|
|
9539
|
+
);
|
|
9540
|
+
if (!compactionEvent) return null;
|
|
9541
|
+
this.logger.info("Context compacted", {
|
|
9542
|
+
strategy: compactionEvent.strategy,
|
|
9543
|
+
tokensBefore: compactionEvent.tokensBefore,
|
|
9544
|
+
tokensAfter: compactionEvent.tokensAfter
|
|
9545
|
+
});
|
|
9546
|
+
await this.safeObserve(async () => {
|
|
9547
|
+
if (this.hooks.observers?.onCompaction) {
|
|
9548
|
+
await this.hooks.observers.onCompaction({
|
|
9549
|
+
iteration,
|
|
9550
|
+
event: compactionEvent,
|
|
9551
|
+
// biome-ignore lint/style/noNonNullAssertion: compactionManager exists if compactionEvent is truthy
|
|
9552
|
+
stats: this.compactionManager.getStats(),
|
|
9553
|
+
logger: this.logger
|
|
9554
|
+
});
|
|
9555
|
+
}
|
|
9556
|
+
});
|
|
9557
|
+
return { type: "compaction", event: compactionEvent };
|
|
9558
|
+
}
|
|
9559
|
+
/**
|
|
9560
|
+
* Prepare LLM call options and process beforeLLMCall controller.
|
|
9561
|
+
* @returns options and optional skipWithSynthetic response if controller wants to skip
|
|
9562
|
+
*/
|
|
9563
|
+
async prepareLLMCall(iteration) {
|
|
9564
|
+
let llmOptions = {
|
|
9565
|
+
model: this.model,
|
|
9566
|
+
messages: this.conversation.getMessages(),
|
|
9567
|
+
temperature: this.temperature,
|
|
9568
|
+
maxTokens: this.defaultMaxTokens,
|
|
9569
|
+
signal: this.signal
|
|
9570
|
+
};
|
|
9571
|
+
await this.safeObserve(async () => {
|
|
9572
|
+
if (this.hooks.observers?.onLLMCallStart) {
|
|
9573
|
+
const context = {
|
|
9574
|
+
iteration,
|
|
9575
|
+
options: llmOptions,
|
|
9576
|
+
logger: this.logger
|
|
9577
|
+
};
|
|
9578
|
+
await this.hooks.observers.onLLMCallStart(context);
|
|
9579
|
+
}
|
|
9580
|
+
});
|
|
9581
|
+
if (this.hooks.controllers?.beforeLLMCall) {
|
|
9582
|
+
const context = {
|
|
9583
|
+
iteration,
|
|
9584
|
+
maxIterations: this.maxIterations,
|
|
9585
|
+
options: llmOptions,
|
|
9586
|
+
logger: this.logger
|
|
9587
|
+
};
|
|
9588
|
+
const action = await this.hooks.controllers.beforeLLMCall(context);
|
|
9589
|
+
validateBeforeLLMCallAction(action);
|
|
9590
|
+
if (action.action === "skip") {
|
|
9591
|
+
this.logger.info("Controller skipped LLM call, using synthetic response");
|
|
9592
|
+
return { options: llmOptions, skipWithSynthetic: action.syntheticResponse };
|
|
9593
|
+
} else if (action.action === "proceed" && action.modifiedOptions) {
|
|
9594
|
+
llmOptions = { ...llmOptions, ...action.modifiedOptions };
|
|
9595
|
+
}
|
|
9596
|
+
}
|
|
9597
|
+
await this.safeObserve(async () => {
|
|
9598
|
+
if (this.hooks.observers?.onLLMCallReady) {
|
|
9599
|
+
const context = {
|
|
9600
|
+
iteration,
|
|
9601
|
+
maxIterations: this.maxIterations,
|
|
9602
|
+
options: llmOptions,
|
|
9603
|
+
logger: this.logger
|
|
9604
|
+
};
|
|
9605
|
+
await this.hooks.observers.onLLMCallReady(context);
|
|
9606
|
+
}
|
|
9607
|
+
});
|
|
9608
|
+
return { options: llmOptions };
|
|
9609
|
+
}
|
|
9610
|
+
/**
|
|
9611
|
+
* Calculate cost and complete LLM call in execution tree.
|
|
9612
|
+
*/
|
|
9613
|
+
completeLLMCallInTree(nodeId, result) {
|
|
9614
|
+
const llmCost = this.client.modelRegistry?.estimateCost?.(
|
|
9615
|
+
this.model,
|
|
9616
|
+
result.usage?.inputTokens ?? 0,
|
|
9617
|
+
result.usage?.outputTokens ?? 0,
|
|
9618
|
+
result.usage?.cachedInputTokens ?? 0,
|
|
9619
|
+
result.usage?.cacheCreationInputTokens ?? 0
|
|
9620
|
+
)?.totalCost;
|
|
9621
|
+
this.tree.completeLLMCall(nodeId, {
|
|
9622
|
+
response: result.rawResponse,
|
|
9623
|
+
usage: result.usage,
|
|
9624
|
+
finishReason: result.finishReason,
|
|
9625
|
+
cost: llmCost
|
|
9626
|
+
});
|
|
9627
|
+
}
|
|
9628
|
+
/**
|
|
9629
|
+
* Process afterLLMCall controller and return modified final message.
|
|
9630
|
+
*/
|
|
9631
|
+
async processAfterLLMCallController(iteration, llmOptions, result, gadgetCallCount) {
|
|
9632
|
+
let finalMessage = result.finalMessage;
|
|
9633
|
+
if (!this.hooks.controllers?.afterLLMCall) {
|
|
9634
|
+
return finalMessage;
|
|
9635
|
+
}
|
|
9636
|
+
const context = {
|
|
9637
|
+
iteration,
|
|
9638
|
+
maxIterations: this.maxIterations,
|
|
9639
|
+
options: llmOptions,
|
|
9640
|
+
finishReason: result.finishReason,
|
|
9641
|
+
usage: result.usage,
|
|
9642
|
+
finalMessage: result.finalMessage,
|
|
9643
|
+
gadgetCallCount,
|
|
9644
|
+
logger: this.logger
|
|
9645
|
+
};
|
|
9646
|
+
const action = await this.hooks.controllers.afterLLMCall(context);
|
|
9647
|
+
validateAfterLLMCallAction(action);
|
|
9648
|
+
if (action.action === "modify_and_continue" || action.action === "append_and_modify") {
|
|
9649
|
+
finalMessage = action.modifiedMessage;
|
|
9650
|
+
}
|
|
9651
|
+
if (action.action === "append_messages" || action.action === "append_and_modify") {
|
|
9652
|
+
for (const msg of action.messages) {
|
|
9653
|
+
if (msg.role === "user") {
|
|
9654
|
+
this.conversation.addUserMessage(msg.content);
|
|
9655
|
+
} else if (msg.role === "assistant") {
|
|
9656
|
+
this.conversation.addAssistantMessage(extractMessageText(msg.content));
|
|
9657
|
+
} else if (msg.role === "system") {
|
|
9658
|
+
this.conversation.addUserMessage(`[System] ${extractMessageText(msg.content)}`);
|
|
9659
|
+
}
|
|
9660
|
+
}
|
|
9661
|
+
}
|
|
9662
|
+
return finalMessage;
|
|
9663
|
+
}
|
|
9664
|
+
/**
|
|
9665
|
+
* Update conversation history with gadget results or text-only response.
|
|
9666
|
+
* @returns true if loop should break (text-only handler requested termination)
|
|
9667
|
+
*/
|
|
9668
|
+
async updateConversationWithResults(didExecuteGadgets, textOutputs, gadgetResults, finalMessage) {
|
|
9669
|
+
if (didExecuteGadgets) {
|
|
9670
|
+
if (this.textWithGadgetsHandler) {
|
|
9671
|
+
const textContent = textOutputs.join("");
|
|
9672
|
+
if (textContent.trim()) {
|
|
9673
|
+
const { gadgetName, parameterMapping, resultMapping } = this.textWithGadgetsHandler;
|
|
9674
|
+
const syntheticId = `gc_text_${++this.syntheticInvocationCounter}`;
|
|
9675
|
+
this.conversation.addGadgetCallResult(
|
|
9676
|
+
gadgetName,
|
|
9677
|
+
parameterMapping(textContent),
|
|
9678
|
+
resultMapping ? resultMapping(textContent) : textContent,
|
|
9679
|
+
syntheticId
|
|
9680
|
+
);
|
|
9681
|
+
}
|
|
9682
|
+
}
|
|
9683
|
+
for (const output of gadgetResults) {
|
|
9684
|
+
if (output.type === "gadget_result") {
|
|
9685
|
+
const gadgetResult = output.result;
|
|
9686
|
+
this.conversation.addGadgetCallResult(
|
|
9687
|
+
gadgetResult.gadgetName,
|
|
9688
|
+
gadgetResult.parameters,
|
|
9689
|
+
gadgetResult.error ?? gadgetResult.result ?? "",
|
|
9690
|
+
gadgetResult.invocationId,
|
|
9691
|
+
gadgetResult.media,
|
|
9692
|
+
gadgetResult.mediaIds
|
|
9693
|
+
);
|
|
9694
|
+
}
|
|
9695
|
+
}
|
|
9696
|
+
return false;
|
|
9697
|
+
}
|
|
9698
|
+
if (finalMessage.trim()) {
|
|
9699
|
+
const syntheticId = `gc_tell_${++this.syntheticInvocationCounter}`;
|
|
9700
|
+
this.conversation.addGadgetCallResult(
|
|
9701
|
+
"TellUser",
|
|
9702
|
+
{ message: finalMessage, done: false, type: "info" },
|
|
9703
|
+
`\u2139\uFE0F ${finalMessage}`,
|
|
9704
|
+
syntheticId
|
|
9705
|
+
);
|
|
9706
|
+
}
|
|
9707
|
+
return await this.handleTextOnlyResponse(finalMessage);
|
|
9708
|
+
}
|
|
9605
9709
|
/**
|
|
9606
9710
|
* Run agent with named event handlers (syntactic sugar).
|
|
9607
9711
|
*
|
|
@@ -9658,6 +9762,7 @@ var init_builder = __esm({
|
|
|
9658
9762
|
gadgetOutputLimit;
|
|
9659
9763
|
gadgetOutputLimitPercent;
|
|
9660
9764
|
compactionConfig;
|
|
9765
|
+
retryConfig;
|
|
9661
9766
|
signal;
|
|
9662
9767
|
trailingMessage;
|
|
9663
9768
|
subagentConfig;
|
|
@@ -9828,37 +9933,90 @@ var init_builder = __esm({
|
|
|
9828
9933
|
return this.withHistory([message]);
|
|
9829
9934
|
}
|
|
9830
9935
|
/**
|
|
9831
|
-
*
|
|
9936
|
+
* Clear any previously set conversation history.
|
|
9937
|
+
* Used before setting new cumulative history in REPL mode.
|
|
9832
9938
|
*
|
|
9833
|
-
* @param handler - Function to handle human input requests
|
|
9834
9939
|
* @returns This builder for chaining
|
|
9835
9940
|
*
|
|
9836
9941
|
* @example
|
|
9837
9942
|
* ```typescript
|
|
9838
|
-
*
|
|
9839
|
-
*
|
|
9840
|
-
* })
|
|
9943
|
+
* // Reset history before setting new cumulative history
|
|
9944
|
+
* builder.clearHistory().withHistory(cumulativeHistory);
|
|
9841
9945
|
* ```
|
|
9842
9946
|
*/
|
|
9843
|
-
|
|
9844
|
-
this.
|
|
9947
|
+
clearHistory() {
|
|
9948
|
+
this.initialMessages = [];
|
|
9845
9949
|
return this;
|
|
9846
9950
|
}
|
|
9847
9951
|
/**
|
|
9848
|
-
*
|
|
9952
|
+
* Continue conversation from a previous agent's history.
|
|
9953
|
+
* Extracts full conversation history and sets it as initial messages.
|
|
9849
9954
|
*
|
|
9850
|
-
*
|
|
9955
|
+
* This is the recommended way to implement REPL session continuation.
|
|
9956
|
+
* It automatically handles history extraction and format conversion.
|
|
9957
|
+
*
|
|
9958
|
+
* @param agent - The previous agent to continue from
|
|
9851
9959
|
* @returns This builder for chaining
|
|
9852
9960
|
*
|
|
9853
9961
|
* @example
|
|
9854
9962
|
* ```typescript
|
|
9855
|
-
*
|
|
9963
|
+
* // REPL loop with session continuity
|
|
9964
|
+
* let previousAgent: Agent | null = null;
|
|
9965
|
+
*
|
|
9966
|
+
* while (true) {
|
|
9967
|
+
* if (previousAgent) {
|
|
9968
|
+
* builder.continueFrom(previousAgent);
|
|
9969
|
+
* }
|
|
9970
|
+
* const agent = builder.ask(prompt);
|
|
9971
|
+
* await runAgent(agent);
|
|
9972
|
+
* previousAgent = agent;
|
|
9973
|
+
* }
|
|
9856
9974
|
* ```
|
|
9857
9975
|
*/
|
|
9858
|
-
|
|
9859
|
-
|
|
9860
|
-
|
|
9861
|
-
|
|
9976
|
+
continueFrom(agent) {
|
|
9977
|
+
const history = agent.getConversation().getConversationHistory();
|
|
9978
|
+
this.clearHistory();
|
|
9979
|
+
for (const msg of history) {
|
|
9980
|
+
if (msg.role === "user") {
|
|
9981
|
+
this.initialMessages.push({ role: "user", content: msg.content });
|
|
9982
|
+
} else if (msg.role === "assistant") {
|
|
9983
|
+
this.initialMessages.push({ role: "assistant", content: msg.content });
|
|
9984
|
+
}
|
|
9985
|
+
}
|
|
9986
|
+
return this;
|
|
9987
|
+
}
|
|
9988
|
+
/**
|
|
9989
|
+
* Set the human input handler for interactive conversations.
|
|
9990
|
+
*
|
|
9991
|
+
* @param handler - Function to handle human input requests
|
|
9992
|
+
* @returns This builder for chaining
|
|
9993
|
+
*
|
|
9994
|
+
* @example
|
|
9995
|
+
* ```typescript
|
|
9996
|
+
* .onHumanInput(async (question) => {
|
|
9997
|
+
* return await promptUser(question);
|
|
9998
|
+
* })
|
|
9999
|
+
* ```
|
|
10000
|
+
*/
|
|
10001
|
+
onHumanInput(handler) {
|
|
10002
|
+
this.requestHumanInput = handler;
|
|
10003
|
+
return this;
|
|
10004
|
+
}
|
|
10005
|
+
/**
|
|
10006
|
+
* Set custom gadget marker prefix.
|
|
10007
|
+
*
|
|
10008
|
+
* @param prefix - Custom start prefix for gadget markers
|
|
10009
|
+
* @returns This builder for chaining
|
|
10010
|
+
*
|
|
10011
|
+
* @example
|
|
10012
|
+
* ```typescript
|
|
10013
|
+
* .withGadgetStartPrefix("<<GADGET_START>>")
|
|
10014
|
+
* ```
|
|
10015
|
+
*/
|
|
10016
|
+
withGadgetStartPrefix(prefix) {
|
|
10017
|
+
this.gadgetStartPrefix = prefix;
|
|
10018
|
+
return this;
|
|
10019
|
+
}
|
|
9862
10020
|
/**
|
|
9863
10021
|
* Set custom gadget marker suffix.
|
|
9864
10022
|
*
|
|
@@ -10055,6 +10213,60 @@ var init_builder = __esm({
|
|
|
10055
10213
|
this.compactionConfig = { enabled: false };
|
|
10056
10214
|
return this;
|
|
10057
10215
|
}
|
|
10216
|
+
/**
|
|
10217
|
+
* Configure retry behavior for LLM API calls.
|
|
10218
|
+
*
|
|
10219
|
+
* Retry is enabled by default with conservative settings (3 retries, exponential backoff).
|
|
10220
|
+
* Use this method to customize retry behavior for rate limits, timeouts, and transient errors.
|
|
10221
|
+
*
|
|
10222
|
+
* @param config - Retry configuration options
|
|
10223
|
+
* @returns This builder for chaining
|
|
10224
|
+
*
|
|
10225
|
+
* @example
|
|
10226
|
+
* ```typescript
|
|
10227
|
+
* // Custom retry configuration
|
|
10228
|
+
* .withRetry({
|
|
10229
|
+
* retries: 5,
|
|
10230
|
+
* minTimeout: 2000,
|
|
10231
|
+
* maxTimeout: 60000,
|
|
10232
|
+
* })
|
|
10233
|
+
*
|
|
10234
|
+
* // With monitoring callbacks
|
|
10235
|
+
* .withRetry({
|
|
10236
|
+
* onRetry: (error, attempt) => {
|
|
10237
|
+
* console.log(`Retry ${attempt}: ${error.message}`);
|
|
10238
|
+
* },
|
|
10239
|
+
* onRetriesExhausted: (error, attempts) => {
|
|
10240
|
+
* alerting.warn(`Failed after ${attempts} attempts`);
|
|
10241
|
+
* }
|
|
10242
|
+
* })
|
|
10243
|
+
*
|
|
10244
|
+
* // Custom retry logic
|
|
10245
|
+
* .withRetry({
|
|
10246
|
+
* shouldRetry: (error) => error.message.includes('429'),
|
|
10247
|
+
* })
|
|
10248
|
+
* ```
|
|
10249
|
+
*/
|
|
10250
|
+
withRetry(config) {
|
|
10251
|
+
this.retryConfig = { ...config, enabled: config.enabled ?? true };
|
|
10252
|
+
return this;
|
|
10253
|
+
}
|
|
10254
|
+
/**
|
|
10255
|
+
* Disable automatic retry for LLM API calls.
|
|
10256
|
+
*
|
|
10257
|
+
* By default, retry is enabled. Use this method to explicitly disable it.
|
|
10258
|
+
*
|
|
10259
|
+
* @returns This builder for chaining
|
|
10260
|
+
*
|
|
10261
|
+
* @example
|
|
10262
|
+
* ```typescript
|
|
10263
|
+
* .withoutRetry() // Disable automatic retry
|
|
10264
|
+
* ```
|
|
10265
|
+
*/
|
|
10266
|
+
withoutRetry() {
|
|
10267
|
+
this.retryConfig = { enabled: false };
|
|
10268
|
+
return this;
|
|
10269
|
+
}
|
|
10058
10270
|
/**
|
|
10059
10271
|
* Set an abort signal for cancelling requests mid-flight.
|
|
10060
10272
|
*
|
|
@@ -10372,6 +10584,7 @@ ${endPrefix}`
|
|
|
10372
10584
|
gadgetOutputLimit: this.gadgetOutputLimit,
|
|
10373
10585
|
gadgetOutputLimitPercent: this.gadgetOutputLimitPercent,
|
|
10374
10586
|
compactionConfig: this.compactionConfig,
|
|
10587
|
+
retryConfig: this.retryConfig,
|
|
10375
10588
|
signal: this.signal,
|
|
10376
10589
|
subagentConfig: this.subagentConfig,
|
|
10377
10590
|
onSubagentEvent: this.subagentEventCallback,
|
|
@@ -10557,6 +10770,7 @@ ${endPrefix}`
|
|
|
10557
10770
|
gadgetOutputLimit: this.gadgetOutputLimit,
|
|
10558
10771
|
gadgetOutputLimitPercent: this.gadgetOutputLimitPercent,
|
|
10559
10772
|
compactionConfig: this.compactionConfig,
|
|
10773
|
+
retryConfig: this.retryConfig,
|
|
10560
10774
|
signal: this.signal,
|
|
10561
10775
|
subagentConfig: this.subagentConfig,
|
|
10562
10776
|
onSubagentEvent: this.subagentEventCallback,
|
|
@@ -10576,6 +10790,7 @@ var index_exports = {};
|
|
|
10576
10790
|
__export(index_exports, {
|
|
10577
10791
|
AbortException: () => AbortException,
|
|
10578
10792
|
AbstractGadget: () => AbstractGadget,
|
|
10793
|
+
Agent: () => Agent,
|
|
10579
10794
|
AgentBuilder: () => AgentBuilder,
|
|
10580
10795
|
AnthropicMessagesProvider: () => AnthropicMessagesProvider,
|
|
10581
10796
|
CompactionManager: () => CompactionManager,
|
|
@@ -10583,8 +10798,13 @@ __export(index_exports, {
|
|
|
10583
10798
|
DEFAULT_COMPACTION_CONFIG: () => DEFAULT_COMPACTION_CONFIG,
|
|
10584
10799
|
DEFAULT_HINTS: () => DEFAULT_HINTS,
|
|
10585
10800
|
DEFAULT_PROMPTS: () => DEFAULT_PROMPTS,
|
|
10801
|
+
DEFAULT_RETRY_CONFIG: () => DEFAULT_RETRY_CONFIG,
|
|
10586
10802
|
DEFAULT_SUMMARIZATION_PROMPT: () => DEFAULT_SUMMARIZATION_PROMPT,
|
|
10587
10803
|
ExecutionTree: () => ExecutionTree,
|
|
10804
|
+
FALLBACK_CHARS_PER_TOKEN: () => FALLBACK_CHARS_PER_TOKEN,
|
|
10805
|
+
GADGET_ARG_PREFIX: () => GADGET_ARG_PREFIX,
|
|
10806
|
+
GADGET_END_PREFIX: () => GADGET_END_PREFIX,
|
|
10807
|
+
GADGET_START_PREFIX: () => GADGET_START_PREFIX,
|
|
10588
10808
|
Gadget: () => Gadget,
|
|
10589
10809
|
GadgetCallParser: () => GadgetCallParser,
|
|
10590
10810
|
GadgetExecutor: () => GadgetExecutor,
|
|
@@ -10598,9 +10818,6 @@ __export(index_exports, {
|
|
|
10598
10818
|
LLMist: () => LLMist,
|
|
10599
10819
|
MODEL_ALIASES: () => MODEL_ALIASES,
|
|
10600
10820
|
MediaStore: () => MediaStore,
|
|
10601
|
-
MockBuilder: () => MockBuilder,
|
|
10602
|
-
MockManager: () => MockManager,
|
|
10603
|
-
MockProviderAdapter: () => MockProviderAdapter,
|
|
10604
10821
|
ModelIdentifierParser: () => ModelIdentifierParser,
|
|
10605
10822
|
ModelRegistry: () => ModelRegistry,
|
|
10606
10823
|
OpenAIChatProvider: () => OpenAIChatProvider,
|
|
@@ -10621,11 +10838,7 @@ __export(index_exports, {
|
|
|
10621
10838
|
createHints: () => createHints,
|
|
10622
10839
|
createLogger: () => createLogger,
|
|
10623
10840
|
createMediaOutput: () => createMediaOutput,
|
|
10624
|
-
createMockAdapter: () => createMockAdapter,
|
|
10625
|
-
createMockClient: () => createMockClient,
|
|
10626
|
-
createMockStream: () => createMockStream,
|
|
10627
10841
|
createOpenAIProviderFromEnv: () => createOpenAIProviderFromEnv,
|
|
10628
|
-
createTextMockStream: () => createTextMockStream,
|
|
10629
10842
|
defaultLogger: () => defaultLogger,
|
|
10630
10843
|
detectAudioMimeType: () => detectAudioMimeType,
|
|
10631
10844
|
detectImageMimeType: () => detectImageMimeType,
|
|
@@ -10634,8 +10847,8 @@ __export(index_exports, {
|
|
|
10634
10847
|
filterByDepth: () => filterByDepth,
|
|
10635
10848
|
filterByParent: () => filterByParent,
|
|
10636
10849
|
filterRootEvents: () => filterRootEvents,
|
|
10850
|
+
formatLLMError: () => formatLLMError,
|
|
10637
10851
|
getHostExports: () => getHostExports,
|
|
10638
|
-
getMockManager: () => getMockManager,
|
|
10639
10852
|
getModelId: () => getModelId,
|
|
10640
10853
|
getProvider: () => getProvider,
|
|
10641
10854
|
groupByParent: () => groupByParent,
|
|
@@ -10643,16 +10856,17 @@ __export(index_exports, {
|
|
|
10643
10856
|
imageFromBase64: () => imageFromBase64,
|
|
10644
10857
|
imageFromBuffer: () => imageFromBuffer,
|
|
10645
10858
|
imageFromUrl: () => imageFromUrl,
|
|
10859
|
+
isAbortError: () => isAbortError,
|
|
10646
10860
|
isAudioPart: () => isAudioPart,
|
|
10647
10861
|
isDataUrl: () => isDataUrl,
|
|
10648
10862
|
isGadgetEvent: () => isGadgetEvent,
|
|
10649
10863
|
isImagePart: () => isImagePart,
|
|
10650
10864
|
isLLMEvent: () => isLLMEvent,
|
|
10865
|
+
isRetryableError: () => isRetryableError,
|
|
10651
10866
|
isRootEvent: () => isRootEvent,
|
|
10652
10867
|
isSubagentEvent: () => isSubagentEvent,
|
|
10653
10868
|
isTextPart: () => isTextPart,
|
|
10654
10869
|
iterationProgressHint: () => iterationProgressHint,
|
|
10655
|
-
mockLLM: () => mockLLM,
|
|
10656
10870
|
normalizeMessageContent: () => normalizeMessageContent,
|
|
10657
10871
|
parallelGadgetHint: () => parallelGadgetHint,
|
|
10658
10872
|
parseDataUrl: () => parseDataUrl,
|
|
@@ -10660,6 +10874,7 @@ __export(index_exports, {
|
|
|
10660
10874
|
resolveHintTemplate: () => resolveHintTemplate,
|
|
10661
10875
|
resolveModel: () => resolveModel,
|
|
10662
10876
|
resolvePromptTemplate: () => resolvePromptTemplate,
|
|
10877
|
+
resolveRetryConfig: () => resolveRetryConfig,
|
|
10663
10878
|
resolveRulesTemplate: () => resolveRulesTemplate,
|
|
10664
10879
|
resolveSubagentModel: () => resolveSubagentModel,
|
|
10665
10880
|
resolveValue: () => resolveValue,
|
|
@@ -10669,15 +10884,19 @@ __export(index_exports, {
|
|
|
10669
10884
|
resultWithImages: () => resultWithImages,
|
|
10670
10885
|
resultWithMedia: () => resultWithMedia,
|
|
10671
10886
|
runWithHandlers: () => runWithHandlers,
|
|
10887
|
+
schemaToJSONSchema: () => schemaToJSONSchema,
|
|
10672
10888
|
stream: () => stream,
|
|
10673
10889
|
text: () => text,
|
|
10674
10890
|
toBase64: () => toBase64,
|
|
10675
10891
|
validateAndApplyDefaults: () => validateAndApplyDefaults,
|
|
10676
10892
|
validateGadgetParams: () => validateGadgetParams,
|
|
10893
|
+
validateGadgetSchema: () => validateGadgetSchema,
|
|
10677
10894
|
z: () => import_zod3.z
|
|
10678
10895
|
});
|
|
10679
10896
|
module.exports = __toCommonJS(index_exports);
|
|
10680
10897
|
var import_zod3 = require("zod");
|
|
10898
|
+
init_constants();
|
|
10899
|
+
init_constants2();
|
|
10681
10900
|
init_builder();
|
|
10682
10901
|
init_event_handlers();
|
|
10683
10902
|
|
|
@@ -11460,7 +11679,6 @@ var HookPresets = class _HookPresets {
|
|
|
11460
11679
|
init_config();
|
|
11461
11680
|
init_manager();
|
|
11462
11681
|
init_strategies();
|
|
11463
|
-
init_strategy();
|
|
11464
11682
|
|
|
11465
11683
|
// src/agent/index.ts
|
|
11466
11684
|
init_conversation_manager();
|
|
@@ -11568,6 +11786,23 @@ init_stream_processor();
|
|
|
11568
11786
|
|
|
11569
11787
|
// src/index.ts
|
|
11570
11788
|
init_client();
|
|
11789
|
+
|
|
11790
|
+
// src/core/errors.ts
|
|
11791
|
+
function isAbortError(error) {
|
|
11792
|
+
if (!(error instanceof Error)) return false;
|
|
11793
|
+
if (error.name === "AbortError") return true;
|
|
11794
|
+
if (error.name === "APIConnectionAbortedError") return true;
|
|
11795
|
+
if (error.name === "APIUserAbortError") return true;
|
|
11796
|
+
const message = error.message.toLowerCase();
|
|
11797
|
+
if (message.includes("abort")) return true;
|
|
11798
|
+
if (message.includes("cancelled")) return true;
|
|
11799
|
+
if (message.includes("canceled")) return true;
|
|
11800
|
+
return false;
|
|
11801
|
+
}
|
|
11802
|
+
|
|
11803
|
+
// src/index.ts
|
|
11804
|
+
init_agent();
|
|
11805
|
+
init_retry();
|
|
11571
11806
|
init_execution_tree();
|
|
11572
11807
|
|
|
11573
11808
|
// src/core/execution-events.ts
|
|
@@ -11765,6 +12000,8 @@ function validateGadgetParams(gadget, params) {
|
|
|
11765
12000
|
}
|
|
11766
12001
|
|
|
11767
12002
|
// src/index.ts
|
|
12003
|
+
init_schema_to_json();
|
|
12004
|
+
init_schema_validator();
|
|
11768
12005
|
init_logger();
|
|
11769
12006
|
|
|
11770
12007
|
// src/utils/config-resolver.ts
|
|
@@ -11816,1006 +12053,6 @@ init_anthropic();
|
|
|
11816
12053
|
init_discovery();
|
|
11817
12054
|
init_gemini();
|
|
11818
12055
|
init_openai();
|
|
11819
|
-
|
|
11820
|
-
// src/testing/cli-helpers.ts
|
|
11821
|
-
var import_node_stream = require("stream");
|
|
11822
|
-
|
|
11823
|
-
// src/testing/mock-manager.ts
|
|
11824
|
-
init_logger();
|
|
11825
|
-
var MockManager = class _MockManager {
|
|
11826
|
-
static instance = null;
|
|
11827
|
-
mocks = /* @__PURE__ */ new Map();
|
|
11828
|
-
stats = /* @__PURE__ */ new Map();
|
|
11829
|
-
options;
|
|
11830
|
-
logger;
|
|
11831
|
-
nextId = 1;
|
|
11832
|
-
constructor(options = {}) {
|
|
11833
|
-
this.options = {
|
|
11834
|
-
strictMode: options.strictMode ?? false,
|
|
11835
|
-
debug: options.debug ?? false,
|
|
11836
|
-
recordStats: options.recordStats ?? true
|
|
11837
|
-
};
|
|
11838
|
-
this.logger = createLogger({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
|
|
11839
|
-
}
|
|
11840
|
-
/**
|
|
11841
|
-
* Get the global MockManager instance.
|
|
11842
|
-
* Creates one if it doesn't exist.
|
|
11843
|
-
*/
|
|
11844
|
-
static getInstance(options) {
|
|
11845
|
-
if (!_MockManager.instance) {
|
|
11846
|
-
_MockManager.instance = new _MockManager(options);
|
|
11847
|
-
} else if (options) {
|
|
11848
|
-
console.warn(
|
|
11849
|
-
"MockManager.getInstance() called with options, but instance already exists. Options are ignored. Use setOptions() to update options or reset() to reinitialize."
|
|
11850
|
-
);
|
|
11851
|
-
}
|
|
11852
|
-
return _MockManager.instance;
|
|
11853
|
-
}
|
|
11854
|
-
/**
|
|
11855
|
-
* Reset the global instance (useful for testing).
|
|
11856
|
-
*/
|
|
11857
|
-
static reset() {
|
|
11858
|
-
_MockManager.instance = null;
|
|
11859
|
-
}
|
|
11860
|
-
/**
|
|
11861
|
-
* Register a new mock.
|
|
11862
|
-
*
|
|
11863
|
-
* @param registration - The mock registration configuration
|
|
11864
|
-
* @returns The ID of the registered mock
|
|
11865
|
-
*
|
|
11866
|
-
* @example
|
|
11867
|
-
* const manager = MockManager.getInstance();
|
|
11868
|
-
* const mockId = manager.register({
|
|
11869
|
-
* label: 'GPT-4 mock',
|
|
11870
|
-
* matcher: (ctx) => ctx.modelName.includes('gpt-4'),
|
|
11871
|
-
* response: { text: 'Mocked response' }
|
|
11872
|
-
* });
|
|
11873
|
-
*/
|
|
11874
|
-
register(registration) {
|
|
11875
|
-
const id = registration.id ?? `mock-${this.nextId++}`;
|
|
11876
|
-
const mock = {
|
|
11877
|
-
id,
|
|
11878
|
-
matcher: registration.matcher,
|
|
11879
|
-
response: registration.response,
|
|
11880
|
-
label: registration.label,
|
|
11881
|
-
once: registration.once
|
|
11882
|
-
};
|
|
11883
|
-
this.mocks.set(id, mock);
|
|
11884
|
-
if (this.options.recordStats) {
|
|
11885
|
-
this.stats.set(id, { matchCount: 0 });
|
|
11886
|
-
}
|
|
11887
|
-
this.logger.debug(
|
|
11888
|
-
`Registered mock: ${id}${mock.label ? ` (${mock.label})` : ""}${mock.once ? " [once]" : ""}`
|
|
11889
|
-
);
|
|
11890
|
-
return id;
|
|
11891
|
-
}
|
|
11892
|
-
/**
|
|
11893
|
-
* Unregister a mock by ID.
|
|
11894
|
-
*/
|
|
11895
|
-
unregister(id) {
|
|
11896
|
-
const deleted = this.mocks.delete(id);
|
|
11897
|
-
if (deleted) {
|
|
11898
|
-
this.stats.delete(id);
|
|
11899
|
-
this.logger.debug(`Unregistered mock: ${id}`);
|
|
11900
|
-
}
|
|
11901
|
-
return deleted;
|
|
11902
|
-
}
|
|
11903
|
-
/**
|
|
11904
|
-
* Clear all registered mocks.
|
|
11905
|
-
*/
|
|
11906
|
-
clear() {
|
|
11907
|
-
this.mocks.clear();
|
|
11908
|
-
this.stats.clear();
|
|
11909
|
-
this.logger.debug("Cleared all mocks");
|
|
11910
|
-
}
|
|
11911
|
-
/**
|
|
11912
|
-
* Find and return a matching mock for the given context.
|
|
11913
|
-
* Returns the mock response if found, null otherwise.
|
|
11914
|
-
*/
|
|
11915
|
-
async findMatch(context) {
|
|
11916
|
-
this.logger.debug(
|
|
11917
|
-
`Finding match for: ${context.provider}:${context.modelName} (${this.mocks.size} mocks registered)`
|
|
11918
|
-
);
|
|
11919
|
-
for (const [id, mock] of this.mocks.entries()) {
|
|
11920
|
-
let matches = false;
|
|
11921
|
-
try {
|
|
11922
|
-
matches = await Promise.resolve(mock.matcher(context));
|
|
11923
|
-
} catch (error) {
|
|
11924
|
-
this.logger.warn(`Error in matcher ${id}:`, error);
|
|
11925
|
-
if (this.options.strictMode) {
|
|
11926
|
-
throw new Error(`Matcher error in mock ${id}: ${error}`);
|
|
11927
|
-
}
|
|
11928
|
-
continue;
|
|
11929
|
-
}
|
|
11930
|
-
if (matches) {
|
|
11931
|
-
this.logger.debug(`Mock matched: ${id}${mock.label ? ` (${mock.label})` : ""}`);
|
|
11932
|
-
if (this.options.recordStats) {
|
|
11933
|
-
const stats = this.stats.get(id);
|
|
11934
|
-
if (stats) {
|
|
11935
|
-
stats.matchCount++;
|
|
11936
|
-
stats.lastUsed = /* @__PURE__ */ new Date();
|
|
11937
|
-
}
|
|
11938
|
-
}
|
|
11939
|
-
if (mock.once) {
|
|
11940
|
-
this.mocks.delete(id);
|
|
11941
|
-
this.stats.delete(id);
|
|
11942
|
-
this.logger.debug(`Removed one-time mock: ${id}`);
|
|
11943
|
-
}
|
|
11944
|
-
const response = typeof mock.response === "function" ? await Promise.resolve(mock.response(context)) : mock.response;
|
|
11945
|
-
return response;
|
|
11946
|
-
}
|
|
11947
|
-
}
|
|
11948
|
-
this.logger.debug("No mock matched");
|
|
11949
|
-
if (this.options.strictMode) {
|
|
11950
|
-
throw new Error(
|
|
11951
|
-
`No mock registered for ${context.provider}:${context.modelName}. Register a mock using MockManager.getInstance().register() or disable strictMode.`
|
|
11952
|
-
);
|
|
11953
|
-
}
|
|
11954
|
-
return {
|
|
11955
|
-
text: "",
|
|
11956
|
-
usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
|
|
11957
|
-
finishReason: "stop"
|
|
11958
|
-
};
|
|
11959
|
-
}
|
|
11960
|
-
/**
|
|
11961
|
-
* Get statistics for a specific mock.
|
|
11962
|
-
*/
|
|
11963
|
-
getStats(id) {
|
|
11964
|
-
return this.stats.get(id);
|
|
11965
|
-
}
|
|
11966
|
-
/**
|
|
11967
|
-
* Get all registered mock IDs.
|
|
11968
|
-
*/
|
|
11969
|
-
getMockIds() {
|
|
11970
|
-
return Array.from(this.mocks.keys());
|
|
11971
|
-
}
|
|
11972
|
-
/**
|
|
11973
|
-
* Get the number of registered mocks.
|
|
11974
|
-
*/
|
|
11975
|
-
getCount() {
|
|
11976
|
-
return this.mocks.size;
|
|
11977
|
-
}
|
|
11978
|
-
/**
|
|
11979
|
-
* Update the mock manager options.
|
|
11980
|
-
*/
|
|
11981
|
-
setOptions(options) {
|
|
11982
|
-
this.options = { ...this.options, ...options };
|
|
11983
|
-
this.logger = createLogger({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
|
|
11984
|
-
}
|
|
11985
|
-
};
|
|
11986
|
-
function getMockManager(options) {
|
|
11987
|
-
return MockManager.getInstance(options);
|
|
11988
|
-
}
|
|
11989
|
-
|
|
11990
|
-
// src/testing/mock-stream.ts
|
|
11991
|
-
init_constants();
|
|
11992
|
-
function sleep(ms) {
|
|
11993
|
-
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
11994
|
-
}
|
|
11995
|
-
function generateInvocationId() {
|
|
11996
|
-
return `inv-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
|
|
11997
|
-
}
|
|
11998
|
-
function splitIntoChunks(text3, minChunkSize = 5, maxChunkSize = 30) {
|
|
11999
|
-
const chunks = [];
|
|
12000
|
-
let remaining = text3;
|
|
12001
|
-
while (remaining.length > 0) {
|
|
12002
|
-
const chunkSize = Math.min(
|
|
12003
|
-
Math.floor(Math.random() * (maxChunkSize - minChunkSize + 1)) + minChunkSize,
|
|
12004
|
-
remaining.length
|
|
12005
|
-
);
|
|
12006
|
-
let chunk;
|
|
12007
|
-
if (chunkSize < remaining.length) {
|
|
12008
|
-
const substr = remaining.substring(0, chunkSize);
|
|
12009
|
-
const lastSpace = substr.lastIndexOf(" ");
|
|
12010
|
-
if (lastSpace > minChunkSize / 2) {
|
|
12011
|
-
chunk = substr.substring(0, lastSpace + 1);
|
|
12012
|
-
} else {
|
|
12013
|
-
chunk = substr;
|
|
12014
|
-
}
|
|
12015
|
-
} else {
|
|
12016
|
-
chunk = remaining;
|
|
12017
|
-
}
|
|
12018
|
-
chunks.push(chunk);
|
|
12019
|
-
remaining = remaining.substring(chunk.length);
|
|
12020
|
-
}
|
|
12021
|
-
return chunks;
|
|
12022
|
-
}
|
|
12023
|
-
function serializeToBlockFormat(obj, prefix = "") {
|
|
12024
|
-
let result = "";
|
|
12025
|
-
for (const [key, value] of Object.entries(obj)) {
|
|
12026
|
-
const pointer = prefix ? `${prefix}/${key}` : key;
|
|
12027
|
-
if (value === null || value === void 0) {
|
|
12028
|
-
continue;
|
|
12029
|
-
}
|
|
12030
|
-
if (Array.isArray(value)) {
|
|
12031
|
-
for (let i = 0; i < value.length; i++) {
|
|
12032
|
-
const item = value[i];
|
|
12033
|
-
const itemPointer = `${pointer}/${i}`;
|
|
12034
|
-
if (typeof item === "object" && item !== null && !Array.isArray(item)) {
|
|
12035
|
-
result += serializeToBlockFormat(item, itemPointer);
|
|
12036
|
-
} else if (Array.isArray(item)) {
|
|
12037
|
-
for (let j = 0; j < item.length; j++) {
|
|
12038
|
-
result += `${GADGET_ARG_PREFIX}${itemPointer}/${j}
|
|
12039
|
-
${String(item[j])}
|
|
12040
|
-
`;
|
|
12041
|
-
}
|
|
12042
|
-
} else {
|
|
12043
|
-
result += `${GADGET_ARG_PREFIX}${itemPointer}
|
|
12044
|
-
${String(item)}
|
|
12045
|
-
`;
|
|
12046
|
-
}
|
|
12047
|
-
}
|
|
12048
|
-
} else if (typeof value === "object") {
|
|
12049
|
-
result += serializeToBlockFormat(value, pointer);
|
|
12050
|
-
} else {
|
|
12051
|
-
result += `${GADGET_ARG_PREFIX}${pointer}
|
|
12052
|
-
${String(value)}
|
|
12053
|
-
`;
|
|
12054
|
-
}
|
|
12055
|
-
}
|
|
12056
|
-
return result;
|
|
12057
|
-
}
|
|
12058
|
-
function formatGadgetCalls(gadgetCalls) {
|
|
12059
|
-
let text3 = "";
|
|
12060
|
-
const calls = [];
|
|
12061
|
-
for (const call of gadgetCalls) {
|
|
12062
|
-
const invocationId = call.invocationId ?? generateInvocationId();
|
|
12063
|
-
calls.push({ name: call.gadgetName, invocationId });
|
|
12064
|
-
const blockParams = serializeToBlockFormat(call.parameters);
|
|
12065
|
-
text3 += `
|
|
12066
|
-
${GADGET_START_PREFIX}${call.gadgetName}
|
|
12067
|
-
${blockParams}${GADGET_END_PREFIX}`;
|
|
12068
|
-
}
|
|
12069
|
-
return { text: text3, calls };
|
|
12070
|
-
}
|
|
12071
|
-
async function* createMockStream(response) {
|
|
12072
|
-
if (response.delayMs) {
|
|
12073
|
-
await sleep(response.delayMs);
|
|
12074
|
-
}
|
|
12075
|
-
const streamDelay = response.streamDelayMs ?? 0;
|
|
12076
|
-
let fullText = response.text ?? "";
|
|
12077
|
-
if (response.gadgetCalls && response.gadgetCalls.length > 0) {
|
|
12078
|
-
const { text: gadgetText } = formatGadgetCalls(response.gadgetCalls);
|
|
12079
|
-
fullText += gadgetText;
|
|
12080
|
-
}
|
|
12081
|
-
if (fullText.length > 0) {
|
|
12082
|
-
const chunks = streamDelay > 0 ? splitIntoChunks(fullText) : [fullText];
|
|
12083
|
-
for (let i = 0; i < chunks.length; i++) {
|
|
12084
|
-
const isLast = i === chunks.length - 1;
|
|
12085
|
-
const chunk = {
|
|
12086
|
-
text: chunks[i]
|
|
12087
|
-
};
|
|
12088
|
-
if (isLast) {
|
|
12089
|
-
if (response.finishReason !== void 0) {
|
|
12090
|
-
chunk.finishReason = response.finishReason;
|
|
12091
|
-
}
|
|
12092
|
-
if (response.usage) {
|
|
12093
|
-
chunk.usage = response.usage;
|
|
12094
|
-
}
|
|
12095
|
-
}
|
|
12096
|
-
yield chunk;
|
|
12097
|
-
if (streamDelay > 0 && !isLast) {
|
|
12098
|
-
await sleep(streamDelay);
|
|
12099
|
-
}
|
|
12100
|
-
}
|
|
12101
|
-
} else {
|
|
12102
|
-
yield {
|
|
12103
|
-
text: "",
|
|
12104
|
-
finishReason: response.finishReason ?? "stop",
|
|
12105
|
-
usage: response.usage ?? { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
12106
|
-
};
|
|
12107
|
-
}
|
|
12108
|
-
}
|
|
12109
|
-
function createTextMockStream(text3, options) {
|
|
12110
|
-
return createMockStream({
|
|
12111
|
-
text: text3,
|
|
12112
|
-
delayMs: options?.delayMs,
|
|
12113
|
-
streamDelayMs: options?.streamDelayMs,
|
|
12114
|
-
usage: options?.usage,
|
|
12115
|
-
finishReason: "stop"
|
|
12116
|
-
});
|
|
12117
|
-
}
|
|
12118
|
-
|
|
12119
|
-
// src/testing/mock-adapter.ts
|
|
12120
|
-
var MockProviderAdapter = class {
|
|
12121
|
-
providerId = "mock";
|
|
12122
|
-
priority = 100;
|
|
12123
|
-
// High priority: check mocks before real providers
|
|
12124
|
-
mockManager;
|
|
12125
|
-
constructor(options) {
|
|
12126
|
-
this.mockManager = getMockManager(options);
|
|
12127
|
-
}
|
|
12128
|
-
supports(_descriptor) {
|
|
12129
|
-
return true;
|
|
12130
|
-
}
|
|
12131
|
-
stream(options, descriptor, _spec) {
|
|
12132
|
-
const context = {
|
|
12133
|
-
model: options.model,
|
|
12134
|
-
provider: descriptor.provider,
|
|
12135
|
-
modelName: descriptor.name,
|
|
12136
|
-
options,
|
|
12137
|
-
messages: options.messages
|
|
12138
|
-
};
|
|
12139
|
-
return this.createMockStreamFromContext(context);
|
|
12140
|
-
}
|
|
12141
|
-
async *createMockStreamFromContext(context) {
|
|
12142
|
-
const mockResponse = await this.mockManager.findMatch(context);
|
|
12143
|
-
if (!mockResponse) {
|
|
12144
|
-
yield {
|
|
12145
|
-
text: "",
|
|
12146
|
-
finishReason: "stop",
|
|
12147
|
-
usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
|
|
12148
|
-
};
|
|
12149
|
-
return;
|
|
12150
|
-
}
|
|
12151
|
-
yield* createMockStream(mockResponse);
|
|
12152
|
-
}
|
|
12153
|
-
// ==========================================================================
|
|
12154
|
-
// Image Generation Support
|
|
12155
|
-
// ==========================================================================
|
|
12156
|
-
/**
|
|
12157
|
-
* Check if this adapter supports image generation for a given model.
|
|
12158
|
-
* Returns true if there's a registered mock with images for this model.
|
|
12159
|
-
*/
|
|
12160
|
-
supportsImageGeneration(_modelId) {
|
|
12161
|
-
return true;
|
|
12162
|
-
}
|
|
12163
|
-
/**
|
|
12164
|
-
* Generate mock images based on registered mocks.
|
|
12165
|
-
*
|
|
12166
|
-
* @param options - Image generation options
|
|
12167
|
-
* @returns Mock image generation result
|
|
12168
|
-
*/
|
|
12169
|
-
async generateImage(options) {
|
|
12170
|
-
const context = {
|
|
12171
|
-
model: options.model,
|
|
12172
|
-
provider: "mock",
|
|
12173
|
-
modelName: options.model,
|
|
12174
|
-
options: {
|
|
12175
|
-
model: options.model,
|
|
12176
|
-
messages: [{ role: "user", content: options.prompt }]
|
|
12177
|
-
},
|
|
12178
|
-
messages: [{ role: "user", content: options.prompt }]
|
|
12179
|
-
};
|
|
12180
|
-
const mockResponse = await this.mockManager.findMatch(context);
|
|
12181
|
-
if (!mockResponse?.images || mockResponse.images.length === 0) {
|
|
12182
|
-
throw new Error(
|
|
12183
|
-
`No mock registered for image generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsImage(...).register() to add one.`
|
|
12184
|
-
);
|
|
12185
|
-
}
|
|
12186
|
-
return this.createImageResult(options, mockResponse);
|
|
12187
|
-
}
|
|
12188
|
-
/**
|
|
12189
|
-
* Transform mock response into ImageGenerationResult format.
|
|
12190
|
-
*
|
|
12191
|
-
* @param options - Original image generation options
|
|
12192
|
-
* @param mockResponse - Mock response containing image data
|
|
12193
|
-
* @returns ImageGenerationResult with mock data and zero cost
|
|
12194
|
-
*/
|
|
12195
|
-
createImageResult(options, mockResponse) {
|
|
12196
|
-
const images = mockResponse.images ?? [];
|
|
12197
|
-
return {
|
|
12198
|
-
images: images.map((img) => ({
|
|
12199
|
-
b64Json: img.data,
|
|
12200
|
-
revisedPrompt: img.revisedPrompt
|
|
12201
|
-
})),
|
|
12202
|
-
model: options.model,
|
|
12203
|
-
usage: {
|
|
12204
|
-
imagesGenerated: images.length,
|
|
12205
|
-
size: options.size ?? "1024x1024",
|
|
12206
|
-
quality: options.quality ?? "standard"
|
|
12207
|
-
},
|
|
12208
|
-
cost: 0
|
|
12209
|
-
// Mock cost is always 0
|
|
12210
|
-
};
|
|
12211
|
-
}
|
|
12212
|
-
// ==========================================================================
|
|
12213
|
-
// Speech Generation Support
|
|
12214
|
-
// ==========================================================================
|
|
12215
|
-
/**
|
|
12216
|
-
* Check if this adapter supports speech generation for a given model.
|
|
12217
|
-
* Returns true if there's a registered mock with audio for this model.
|
|
12218
|
-
*/
|
|
12219
|
-
supportsSpeechGeneration(_modelId) {
|
|
12220
|
-
return true;
|
|
12221
|
-
}
|
|
12222
|
-
/**
|
|
12223
|
-
* Generate mock speech based on registered mocks.
|
|
12224
|
-
*
|
|
12225
|
-
* @param options - Speech generation options
|
|
12226
|
-
* @returns Mock speech generation result
|
|
12227
|
-
*/
|
|
12228
|
-
async generateSpeech(options) {
|
|
12229
|
-
const context = {
|
|
12230
|
-
model: options.model,
|
|
12231
|
-
provider: "mock",
|
|
12232
|
-
modelName: options.model,
|
|
12233
|
-
options: {
|
|
12234
|
-
model: options.model,
|
|
12235
|
-
messages: [{ role: "user", content: options.input }]
|
|
12236
|
-
},
|
|
12237
|
-
messages: [{ role: "user", content: options.input }]
|
|
12238
|
-
};
|
|
12239
|
-
const mockResponse = await this.mockManager.findMatch(context);
|
|
12240
|
-
if (!mockResponse?.audio) {
|
|
12241
|
-
throw new Error(
|
|
12242
|
-
`No mock registered for speech generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsAudio(...).register() to add one.`
|
|
12243
|
-
);
|
|
12244
|
-
}
|
|
12245
|
-
return this.createSpeechResult(options, mockResponse);
|
|
12246
|
-
}
|
|
12247
|
-
/**
|
|
12248
|
-
* Transform mock response into SpeechGenerationResult format.
|
|
12249
|
-
* Converts base64 audio data to ArrayBuffer.
|
|
12250
|
-
*
|
|
12251
|
-
* @param options - Original speech generation options
|
|
12252
|
-
* @param mockResponse - Mock response containing audio data
|
|
12253
|
-
* @returns SpeechGenerationResult with mock data and zero cost
|
|
12254
|
-
*/
|
|
12255
|
-
createSpeechResult(options, mockResponse) {
|
|
12256
|
-
const audio = mockResponse.audio;
|
|
12257
|
-
const binaryString = atob(audio.data);
|
|
12258
|
-
const bytes = new Uint8Array(binaryString.length);
|
|
12259
|
-
for (let i = 0; i < binaryString.length; i++) {
|
|
12260
|
-
bytes[i] = binaryString.charCodeAt(i);
|
|
12261
|
-
}
|
|
12262
|
-
const format = this.mimeTypeToAudioFormat(audio.mimeType);
|
|
12263
|
-
return {
|
|
12264
|
-
audio: bytes.buffer,
|
|
12265
|
-
model: options.model,
|
|
12266
|
-
usage: {
|
|
12267
|
-
characterCount: options.input.length
|
|
12268
|
-
},
|
|
12269
|
-
cost: 0,
|
|
12270
|
-
// Mock cost is always 0
|
|
12271
|
-
format
|
|
12272
|
-
};
|
|
12273
|
-
}
|
|
12274
|
-
/**
|
|
12275
|
-
* Map MIME type to audio format for SpeechGenerationResult.
|
|
12276
|
-
* Defaults to "mp3" for unknown MIME types.
|
|
12277
|
-
*
|
|
12278
|
-
* @param mimeType - Audio MIME type string
|
|
12279
|
-
* @returns Audio format identifier
|
|
12280
|
-
*/
|
|
12281
|
-
mimeTypeToAudioFormat(mimeType) {
|
|
12282
|
-
const mapping = {
|
|
12283
|
-
"audio/mp3": "mp3",
|
|
12284
|
-
"audio/mpeg": "mp3",
|
|
12285
|
-
"audio/wav": "wav",
|
|
12286
|
-
"audio/webm": "opus",
|
|
12287
|
-
"audio/ogg": "opus"
|
|
12288
|
-
};
|
|
12289
|
-
return mapping[mimeType] ?? "mp3";
|
|
12290
|
-
}
|
|
12291
|
-
};
|
|
12292
|
-
function createMockAdapter(options) {
|
|
12293
|
-
return new MockProviderAdapter(options);
|
|
12294
|
-
}
|
|
12295
|
-
|
|
12296
|
-
// src/testing/mock-builder.ts
|
|
12297
|
-
init_input_content();
|
|
12298
|
-
init_messages();
|
|
12299
|
-
function hasImageContent(content) {
|
|
12300
|
-
if (typeof content === "string") return false;
|
|
12301
|
-
return content.some((part) => isImagePart(part));
|
|
12302
|
-
}
|
|
12303
|
-
function hasAudioContent(content) {
|
|
12304
|
-
if (typeof content === "string") return false;
|
|
12305
|
-
return content.some((part) => isAudioPart(part));
|
|
12306
|
-
}
|
|
12307
|
-
function countImages(content) {
|
|
12308
|
-
if (typeof content === "string") return 0;
|
|
12309
|
-
return content.filter((part) => isImagePart(part)).length;
|
|
12310
|
-
}
|
|
12311
|
-
var MockBuilder = class {
|
|
12312
|
-
matchers = [];
|
|
12313
|
-
response = {};
|
|
12314
|
-
label;
|
|
12315
|
-
isOnce = false;
|
|
12316
|
-
id;
|
|
12317
|
-
/**
|
|
12318
|
-
* Match calls to a specific model (by name, supports partial matching).
|
|
12319
|
-
*
|
|
12320
|
-
* @example
|
|
12321
|
-
* mockLLM().forModel('gpt-5')
|
|
12322
|
-
* mockLLM().forModel('claude') // matches any Claude model
|
|
12323
|
-
*/
|
|
12324
|
-
forModel(modelName) {
|
|
12325
|
-
if (!modelName || modelName.trim() === "") {
|
|
12326
|
-
throw new Error("Model name cannot be empty");
|
|
12327
|
-
}
|
|
12328
|
-
this.matchers.push((ctx) => ctx.modelName.includes(modelName));
|
|
12329
|
-
return this;
|
|
12330
|
-
}
|
|
12331
|
-
/**
|
|
12332
|
-
* Match calls to any model.
|
|
12333
|
-
* Useful when you want to mock responses regardless of the model used.
|
|
12334
|
-
*
|
|
12335
|
-
* @example
|
|
12336
|
-
* mockLLM().forAnyModel()
|
|
12337
|
-
*/
|
|
12338
|
-
forAnyModel() {
|
|
12339
|
-
this.matchers.push(() => true);
|
|
12340
|
-
return this;
|
|
12341
|
-
}
|
|
12342
|
-
/**
|
|
12343
|
-
* Match calls to a specific provider.
|
|
12344
|
-
*
|
|
12345
|
-
* @example
|
|
12346
|
-
* mockLLM().forProvider('openai')
|
|
12347
|
-
* mockLLM().forProvider('anthropic')
|
|
12348
|
-
*/
|
|
12349
|
-
forProvider(provider) {
|
|
12350
|
-
if (!provider || provider.trim() === "") {
|
|
12351
|
-
throw new Error("Provider name cannot be empty");
|
|
12352
|
-
}
|
|
12353
|
-
this.matchers.push((ctx) => ctx.provider === provider);
|
|
12354
|
-
return this;
|
|
12355
|
-
}
|
|
12356
|
-
/**
|
|
12357
|
-
* Match calls to any provider.
|
|
12358
|
-
* Useful when you want to mock responses regardless of the provider used.
|
|
12359
|
-
*
|
|
12360
|
-
* @example
|
|
12361
|
-
* mockLLM().forAnyProvider()
|
|
12362
|
-
*/
|
|
12363
|
-
forAnyProvider() {
|
|
12364
|
-
this.matchers.push(() => true);
|
|
12365
|
-
return this;
|
|
12366
|
-
}
|
|
12367
|
-
/**
|
|
12368
|
-
* Match when any message contains the given text (case-insensitive).
|
|
12369
|
-
*
|
|
12370
|
-
* @example
|
|
12371
|
-
* mockLLM().whenMessageContains('hello')
|
|
12372
|
-
*/
|
|
12373
|
-
whenMessageContains(text3) {
|
|
12374
|
-
this.matchers.push(
|
|
12375
|
-
(ctx) => ctx.messages.some(
|
|
12376
|
-
(msg) => extractMessageText(msg.content).toLowerCase().includes(text3.toLowerCase())
|
|
12377
|
-
)
|
|
12378
|
-
);
|
|
12379
|
-
return this;
|
|
12380
|
-
}
|
|
12381
|
-
/**
|
|
12382
|
-
* Match when the last message contains the given text (case-insensitive).
|
|
12383
|
-
*
|
|
12384
|
-
* @example
|
|
12385
|
-
* mockLLM().whenLastMessageContains('goodbye')
|
|
12386
|
-
*/
|
|
12387
|
-
whenLastMessageContains(text3) {
|
|
12388
|
-
this.matchers.push((ctx) => {
|
|
12389
|
-
const lastMsg = ctx.messages[ctx.messages.length - 1];
|
|
12390
|
-
if (!lastMsg) return false;
|
|
12391
|
-
return extractMessageText(lastMsg.content).toLowerCase().includes(text3.toLowerCase());
|
|
12392
|
-
});
|
|
12393
|
-
return this;
|
|
12394
|
-
}
|
|
12395
|
-
/**
|
|
12396
|
-
* Match when any message matches the given regex.
|
|
12397
|
-
*
|
|
12398
|
-
* @example
|
|
12399
|
-
* mockLLM().whenMessageMatches(/calculate \d+/)
|
|
12400
|
-
*/
|
|
12401
|
-
whenMessageMatches(regex) {
|
|
12402
|
-
this.matchers.push((ctx) => ctx.messages.some((msg) => regex.test(extractMessageText(msg.content))));
|
|
12403
|
-
return this;
|
|
12404
|
-
}
|
|
12405
|
-
/**
|
|
12406
|
-
* Match when a message with a specific role contains text.
|
|
12407
|
-
*
|
|
12408
|
-
* @example
|
|
12409
|
-
* mockLLM().whenRoleContains('system', 'You are a helpful assistant')
|
|
12410
|
-
*/
|
|
12411
|
-
whenRoleContains(role, text3) {
|
|
12412
|
-
this.matchers.push(
|
|
12413
|
-
(ctx) => ctx.messages.some(
|
|
12414
|
-
(msg) => msg.role === role && extractMessageText(msg.content).toLowerCase().includes(text3.toLowerCase())
|
|
12415
|
-
)
|
|
12416
|
-
);
|
|
12417
|
-
return this;
|
|
12418
|
-
}
|
|
12419
|
-
/**
|
|
12420
|
-
* Match based on the number of messages in the conversation.
|
|
12421
|
-
*
|
|
12422
|
-
* @example
|
|
12423
|
-
* mockLLM().whenMessageCount((count) => count > 10)
|
|
12424
|
-
*/
|
|
12425
|
-
whenMessageCount(predicate) {
|
|
12426
|
-
this.matchers.push((ctx) => predicate(ctx.messages.length));
|
|
12427
|
-
return this;
|
|
12428
|
-
}
|
|
12429
|
-
/**
|
|
12430
|
-
* Add a custom matcher function.
|
|
12431
|
-
* This provides full control over matching logic.
|
|
12432
|
-
*
|
|
12433
|
-
* @example
|
|
12434
|
-
* mockLLM().when((ctx) => {
|
|
12435
|
-
* return ctx.options.temperature > 0.8;
|
|
12436
|
-
* })
|
|
12437
|
-
*/
|
|
12438
|
-
when(matcher) {
|
|
12439
|
-
this.matchers.push(matcher);
|
|
12440
|
-
return this;
|
|
12441
|
-
}
|
|
12442
|
-
// ==========================================================================
|
|
12443
|
-
// Multimodal Matchers
|
|
12444
|
-
// ==========================================================================
|
|
12445
|
-
/**
|
|
12446
|
-
* Match when any message contains an image.
|
|
12447
|
-
*
|
|
12448
|
-
* @example
|
|
12449
|
-
* mockLLM().whenMessageHasImage().returns("I see an image of a sunset.")
|
|
12450
|
-
*/
|
|
12451
|
-
whenMessageHasImage() {
|
|
12452
|
-
this.matchers.push((ctx) => ctx.messages.some((msg) => hasImageContent(msg.content)));
|
|
12453
|
-
return this;
|
|
12454
|
-
}
|
|
12455
|
-
/**
|
|
12456
|
-
* Match when any message contains audio.
|
|
12457
|
-
*
|
|
12458
|
-
* @example
|
|
12459
|
-
* mockLLM().whenMessageHasAudio().returns("I hear music playing.")
|
|
12460
|
-
*/
|
|
12461
|
-
whenMessageHasAudio() {
|
|
12462
|
-
this.matchers.push((ctx) => ctx.messages.some((msg) => hasAudioContent(msg.content)));
|
|
12463
|
-
return this;
|
|
12464
|
-
}
|
|
12465
|
-
/**
|
|
12466
|
-
* Match based on the number of images in the last message.
|
|
12467
|
-
*
|
|
12468
|
-
* @example
|
|
12469
|
-
* mockLLM().whenImageCount((n) => n >= 2).returns("Comparing multiple images...")
|
|
12470
|
-
*/
|
|
12471
|
-
whenImageCount(predicate) {
|
|
12472
|
-
this.matchers.push((ctx) => {
|
|
12473
|
-
const lastMsg = ctx.messages[ctx.messages.length - 1];
|
|
12474
|
-
if (!lastMsg) return false;
|
|
12475
|
-
return predicate(countImages(lastMsg.content));
|
|
12476
|
-
});
|
|
12477
|
-
return this;
|
|
12478
|
-
}
|
|
12479
|
-
/**
|
|
12480
|
-
* Set the text response to return.
|
|
12481
|
-
* Can be a static string or a function that returns a string dynamically.
|
|
12482
|
-
*
|
|
12483
|
-
* @example
|
|
12484
|
-
* mockLLM().returns('Hello, world!')
|
|
12485
|
-
* mockLLM().returns(() => `Response at ${Date.now()}`)
|
|
12486
|
-
* mockLLM().returns((ctx) => `You said: ${ctx.messages[0]?.content}`)
|
|
12487
|
-
*/
|
|
12488
|
-
returns(text3) {
|
|
12489
|
-
if (typeof text3 === "function") {
|
|
12490
|
-
this.response = async (ctx) => {
|
|
12491
|
-
const resolvedText = await Promise.resolve().then(() => text3(ctx));
|
|
12492
|
-
return { text: resolvedText };
|
|
12493
|
-
};
|
|
12494
|
-
} else {
|
|
12495
|
-
if (typeof this.response === "function") {
|
|
12496
|
-
throw new Error("Cannot use returns() after withResponse() with a function");
|
|
12497
|
-
}
|
|
12498
|
-
this.response.text = text3;
|
|
12499
|
-
}
|
|
12500
|
-
return this;
|
|
12501
|
-
}
|
|
12502
|
-
/**
|
|
12503
|
-
* Set gadget calls to include in the response.
|
|
12504
|
-
*
|
|
12505
|
-
* @example
|
|
12506
|
-
* mockLLM().returnsGadgetCalls([
|
|
12507
|
-
* { gadgetName: 'calculator', parameters: { op: 'add', a: 1, b: 2 } }
|
|
12508
|
-
* ])
|
|
12509
|
-
*/
|
|
12510
|
-
returnsGadgetCalls(calls) {
|
|
12511
|
-
if (typeof this.response === "function") {
|
|
12512
|
-
throw new Error("Cannot use returnsGadgetCalls() after withResponse() with a function");
|
|
12513
|
-
}
|
|
12514
|
-
this.response.gadgetCalls = calls;
|
|
12515
|
-
return this;
|
|
12516
|
-
}
|
|
12517
|
-
/**
|
|
12518
|
-
* Add a single gadget call to the response.
|
|
12519
|
-
*
|
|
12520
|
-
* @example
|
|
12521
|
-
* mockLLM()
|
|
12522
|
-
* .returnsGadgetCall('calculator', { op: 'add', a: 1, b: 2 })
|
|
12523
|
-
* .returnsGadgetCall('logger', { message: 'Done!' })
|
|
12524
|
-
*/
|
|
12525
|
-
returnsGadgetCall(gadgetName, parameters) {
|
|
12526
|
-
if (typeof this.response === "function") {
|
|
12527
|
-
throw new Error("Cannot use returnsGadgetCall() after withResponse() with a function");
|
|
12528
|
-
}
|
|
12529
|
-
if (!this.response.gadgetCalls) {
|
|
12530
|
-
this.response.gadgetCalls = [];
|
|
12531
|
-
}
|
|
12532
|
-
this.response.gadgetCalls.push({ gadgetName, parameters });
|
|
12533
|
-
return this;
|
|
12534
|
-
}
|
|
12535
|
-
// ==========================================================================
|
|
12536
|
-
// Multimodal Response Helpers
|
|
12537
|
-
// ==========================================================================
|
|
12538
|
-
/**
|
|
12539
|
-
* Return a single image in the response.
|
|
12540
|
-
* Useful for mocking image generation endpoints.
|
|
12541
|
-
*
|
|
12542
|
-
* @param data - Image data (base64 string or Buffer)
|
|
12543
|
-
* @param mimeType - MIME type (auto-detected if Buffer provided without type)
|
|
12544
|
-
*
|
|
12545
|
-
* @example
|
|
12546
|
-
* mockLLM()
|
|
12547
|
-
* .forModel('dall-e-3')
|
|
12548
|
-
* .returnsImage(pngBuffer)
|
|
12549
|
-
* .register();
|
|
12550
|
-
*/
|
|
12551
|
-
returnsImage(data, mimeType) {
|
|
12552
|
-
if (typeof this.response === "function") {
|
|
12553
|
-
throw new Error("Cannot use returnsImage() after withResponse() with a function");
|
|
12554
|
-
}
|
|
12555
|
-
let imageData;
|
|
12556
|
-
let imageMime;
|
|
12557
|
-
if (typeof data === "string") {
|
|
12558
|
-
imageData = data;
|
|
12559
|
-
if (!mimeType) {
|
|
12560
|
-
throw new Error("MIME type is required when providing base64 string data");
|
|
12561
|
-
}
|
|
12562
|
-
imageMime = mimeType;
|
|
12563
|
-
} else {
|
|
12564
|
-
imageData = toBase64(data);
|
|
12565
|
-
const detected = mimeType ?? detectImageMimeType(data);
|
|
12566
|
-
if (!detected) {
|
|
12567
|
-
throw new Error(
|
|
12568
|
-
"Could not detect image MIME type. Please provide the mimeType parameter explicitly."
|
|
12569
|
-
);
|
|
12570
|
-
}
|
|
12571
|
-
imageMime = detected;
|
|
12572
|
-
}
|
|
12573
|
-
if (!this.response.images) {
|
|
12574
|
-
this.response.images = [];
|
|
12575
|
-
}
|
|
12576
|
-
this.response.images.push({ data: imageData, mimeType: imageMime });
|
|
12577
|
-
return this;
|
|
12578
|
-
}
|
|
12579
|
-
/**
|
|
12580
|
-
* Return multiple images in the response.
|
|
12581
|
-
*
|
|
12582
|
-
* @example
|
|
12583
|
-
* mockLLM()
|
|
12584
|
-
* .forModel('dall-e-3')
|
|
12585
|
-
* .returnsImages([
|
|
12586
|
-
* { data: pngBuffer1 },
|
|
12587
|
-
* { data: pngBuffer2 },
|
|
12588
|
-
* ])
|
|
12589
|
-
* .register();
|
|
12590
|
-
*/
|
|
12591
|
-
returnsImages(images) {
|
|
12592
|
-
for (const img of images) {
|
|
12593
|
-
this.returnsImage(img.data, img.mimeType);
|
|
12594
|
-
if (img.revisedPrompt && this.response && typeof this.response !== "function") {
|
|
12595
|
-
const lastImage = this.response.images?.[this.response.images.length - 1];
|
|
12596
|
-
if (lastImage) {
|
|
12597
|
-
lastImage.revisedPrompt = img.revisedPrompt;
|
|
12598
|
-
}
|
|
12599
|
-
}
|
|
12600
|
-
}
|
|
12601
|
-
return this;
|
|
12602
|
-
}
|
|
12603
|
-
/**
|
|
12604
|
-
* Return audio data in the response.
|
|
12605
|
-
* Useful for mocking speech synthesis endpoints.
|
|
12606
|
-
*
|
|
12607
|
-
* @param data - Audio data (base64 string or Buffer)
|
|
12608
|
-
* @param mimeType - MIME type (auto-detected if Buffer provided without type)
|
|
12609
|
-
*
|
|
12610
|
-
* @example
|
|
12611
|
-
* mockLLM()
|
|
12612
|
-
* .forModel('tts-1')
|
|
12613
|
-
* .returnsAudio(mp3Buffer)
|
|
12614
|
-
* .register();
|
|
12615
|
-
*/
|
|
12616
|
-
returnsAudio(data, mimeType) {
|
|
12617
|
-
if (typeof this.response === "function") {
|
|
12618
|
-
throw new Error("Cannot use returnsAudio() after withResponse() with a function");
|
|
12619
|
-
}
|
|
12620
|
-
let audioData;
|
|
12621
|
-
let audioMime;
|
|
12622
|
-
if (typeof data === "string") {
|
|
12623
|
-
audioData = data;
|
|
12624
|
-
if (!mimeType) {
|
|
12625
|
-
throw new Error("MIME type is required when providing base64 string data");
|
|
12626
|
-
}
|
|
12627
|
-
audioMime = mimeType;
|
|
12628
|
-
} else {
|
|
12629
|
-
audioData = toBase64(data);
|
|
12630
|
-
const detected = mimeType ?? detectAudioMimeType(data);
|
|
12631
|
-
if (!detected) {
|
|
12632
|
-
throw new Error(
|
|
12633
|
-
"Could not detect audio MIME type. Please provide the mimeType parameter explicitly."
|
|
12634
|
-
);
|
|
12635
|
-
}
|
|
12636
|
-
audioMime = detected;
|
|
12637
|
-
}
|
|
12638
|
-
this.response.audio = { data: audioData, mimeType: audioMime };
|
|
12639
|
-
return this;
|
|
12640
|
-
}
|
|
12641
|
-
/**
|
|
12642
|
-
* Set the complete mock response object.
|
|
12643
|
-
* This allows full control over all response properties.
|
|
12644
|
-
* Can also be a function that generates the response dynamically based on context.
|
|
12645
|
-
*
|
|
12646
|
-
* @example
|
|
12647
|
-
* // Static response
|
|
12648
|
-
* mockLLM().withResponse({
|
|
12649
|
-
* text: 'Hello',
|
|
12650
|
-
* usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
|
|
12651
|
-
* finishReason: 'stop'
|
|
12652
|
-
* })
|
|
12653
|
-
*
|
|
12654
|
-
* @example
|
|
12655
|
-
* // Dynamic response
|
|
12656
|
-
* mockLLM().withResponse((ctx) => ({
|
|
12657
|
-
* text: `You said: ${ctx.messages[ctx.messages.length - 1]?.content}`,
|
|
12658
|
-
* usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
|
|
12659
|
-
* }))
|
|
12660
|
-
*/
|
|
12661
|
-
withResponse(response) {
|
|
12662
|
-
this.response = response;
|
|
12663
|
-
return this;
|
|
12664
|
-
}
|
|
12665
|
-
/**
|
|
12666
|
-
* Set simulated token usage.
|
|
12667
|
-
*
|
|
12668
|
-
* @example
|
|
12669
|
-
* mockLLM().withUsage({ inputTokens: 100, outputTokens: 50, totalTokens: 150 })
|
|
12670
|
-
*/
|
|
12671
|
-
withUsage(usage) {
|
|
12672
|
-
if (typeof this.response === "function") {
|
|
12673
|
-
throw new Error("Cannot use withUsage() after withResponse() with a function");
|
|
12674
|
-
}
|
|
12675
|
-
if (usage.inputTokens < 0 || usage.outputTokens < 0 || usage.totalTokens < 0) {
|
|
12676
|
-
throw new Error("Token counts cannot be negative");
|
|
12677
|
-
}
|
|
12678
|
-
if (usage.totalTokens !== usage.inputTokens + usage.outputTokens) {
|
|
12679
|
-
throw new Error("totalTokens must equal inputTokens + outputTokens");
|
|
12680
|
-
}
|
|
12681
|
-
this.response.usage = usage;
|
|
12682
|
-
return this;
|
|
12683
|
-
}
|
|
12684
|
-
/**
|
|
12685
|
-
* Set the finish reason.
|
|
12686
|
-
*
|
|
12687
|
-
* @example
|
|
12688
|
-
* mockLLM().withFinishReason('stop')
|
|
12689
|
-
* mockLLM().withFinishReason('length')
|
|
12690
|
-
*/
|
|
12691
|
-
withFinishReason(reason) {
|
|
12692
|
-
if (typeof this.response === "function") {
|
|
12693
|
-
throw new Error("Cannot use withFinishReason() after withResponse() with a function");
|
|
12694
|
-
}
|
|
12695
|
-
this.response.finishReason = reason;
|
|
12696
|
-
return this;
|
|
12697
|
-
}
|
|
12698
|
-
/**
|
|
12699
|
-
* Set initial delay before streaming starts (simulates network latency).
|
|
12700
|
-
*
|
|
12701
|
-
* @example
|
|
12702
|
-
* mockLLM().withDelay(100) // 100ms delay
|
|
12703
|
-
*/
|
|
12704
|
-
withDelay(ms) {
|
|
12705
|
-
if (typeof this.response === "function") {
|
|
12706
|
-
throw new Error("Cannot use withDelay() after withResponse() with a function");
|
|
12707
|
-
}
|
|
12708
|
-
if (ms < 0) {
|
|
12709
|
-
throw new Error("Delay must be non-negative");
|
|
12710
|
-
}
|
|
12711
|
-
this.response.delayMs = ms;
|
|
12712
|
-
return this;
|
|
12713
|
-
}
|
|
12714
|
-
/**
|
|
12715
|
-
* Set delay between stream chunks (simulates realistic streaming).
|
|
12716
|
-
*
|
|
12717
|
-
* @example
|
|
12718
|
-
* mockLLM().withStreamDelay(10) // 10ms between chunks
|
|
12719
|
-
*/
|
|
12720
|
-
withStreamDelay(ms) {
|
|
12721
|
-
if (typeof this.response === "function") {
|
|
12722
|
-
throw new Error("Cannot use withStreamDelay() after withResponse() with a function");
|
|
12723
|
-
}
|
|
12724
|
-
if (ms < 0) {
|
|
12725
|
-
throw new Error("Stream delay must be non-negative");
|
|
12726
|
-
}
|
|
12727
|
-
this.response.streamDelayMs = ms;
|
|
12728
|
-
return this;
|
|
12729
|
-
}
|
|
12730
|
-
/**
|
|
12731
|
-
* Set a label for this mock (useful for debugging).
|
|
12732
|
-
*
|
|
12733
|
-
* @example
|
|
12734
|
-
* mockLLM().withLabel('greeting mock')
|
|
12735
|
-
*/
|
|
12736
|
-
withLabel(label) {
|
|
12737
|
-
this.label = label;
|
|
12738
|
-
return this;
|
|
12739
|
-
}
|
|
12740
|
-
/**
|
|
12741
|
-
* Set a specific ID for this mock.
|
|
12742
|
-
*
|
|
12743
|
-
* @example
|
|
12744
|
-
* mockLLM().withId('my-custom-mock-id')
|
|
12745
|
-
*/
|
|
12746
|
-
withId(id) {
|
|
12747
|
-
this.id = id;
|
|
12748
|
-
return this;
|
|
12749
|
-
}
|
|
12750
|
-
/**
|
|
12751
|
-
* Mark this mock as one-time use (will be removed after first match).
|
|
12752
|
-
*
|
|
12753
|
-
* @example
|
|
12754
|
-
* mockLLM().once()
|
|
12755
|
-
*/
|
|
12756
|
-
once() {
|
|
12757
|
-
this.isOnce = true;
|
|
12758
|
-
return this;
|
|
12759
|
-
}
|
|
12760
|
-
/**
|
|
12761
|
-
* Build the mock registration without registering it.
|
|
12762
|
-
* Useful if you want to register it manually later.
|
|
12763
|
-
*
|
|
12764
|
-
* @returns The built MockRegistration object (without id if not specified)
|
|
12765
|
-
*/
|
|
12766
|
-
build() {
|
|
12767
|
-
if (this.matchers.length === 0) {
|
|
12768
|
-
throw new Error(
|
|
12769
|
-
"Mock must have at least one matcher. Use .when(), .forModel(), .forProvider(), etc."
|
|
12770
|
-
);
|
|
12771
|
-
}
|
|
12772
|
-
const combinedMatcher = async (ctx) => {
|
|
12773
|
-
for (const matcher of this.matchers) {
|
|
12774
|
-
const matches = await Promise.resolve(matcher(ctx));
|
|
12775
|
-
if (!matches) return false;
|
|
12776
|
-
}
|
|
12777
|
-
return true;
|
|
12778
|
-
};
|
|
12779
|
-
return {
|
|
12780
|
-
id: this.id,
|
|
12781
|
-
matcher: combinedMatcher,
|
|
12782
|
-
response: this.response,
|
|
12783
|
-
label: this.label,
|
|
12784
|
-
once: this.isOnce
|
|
12785
|
-
};
|
|
12786
|
-
}
|
|
12787
|
-
/**
|
|
12788
|
-
* Register this mock with the global MockManager.
|
|
12789
|
-
* Returns the ID of the registered mock.
|
|
12790
|
-
*
|
|
12791
|
-
* @example
|
|
12792
|
-
* const mockId = mockLLM().forModel('gpt-5').returns('Hello!').register();
|
|
12793
|
-
* // Later: getMockManager().unregister(mockId);
|
|
12794
|
-
*/
|
|
12795
|
-
register() {
|
|
12796
|
-
const mockManager = getMockManager();
|
|
12797
|
-
const registration = this.build();
|
|
12798
|
-
return mockManager.register(registration);
|
|
12799
|
-
}
|
|
12800
|
-
};
|
|
12801
|
-
function mockLLM() {
|
|
12802
|
-
return new MockBuilder();
|
|
12803
|
-
}
|
|
12804
|
-
|
|
12805
|
-
// src/testing/mock-client.ts
|
|
12806
|
-
init_client();
|
|
12807
|
-
function createMockClient(options) {
|
|
12808
|
-
return new LLMist({
|
|
12809
|
-
adapters: [new MockProviderAdapter(options)],
|
|
12810
|
-
autoDiscoverProviders: false,
|
|
12811
|
-
defaultProvider: "mock"
|
|
12812
|
-
});
|
|
12813
|
-
}
|
|
12814
|
-
|
|
12815
|
-
// src/testing/mock-gadget.ts
|
|
12816
|
-
init_gadget();
|
|
12817
|
-
|
|
12818
|
-
// src/index.ts
|
|
12819
12056
|
function getHostExports(ctx) {
|
|
12820
12057
|
if (!ctx?.hostExports) {
|
|
12821
12058
|
throw new Error(
|
|
@@ -12828,6 +12065,7 @@ function getHostExports(ctx) {
|
|
|
12828
12065
|
0 && (module.exports = {
|
|
12829
12066
|
AbortException,
|
|
12830
12067
|
AbstractGadget,
|
|
12068
|
+
Agent,
|
|
12831
12069
|
AgentBuilder,
|
|
12832
12070
|
AnthropicMessagesProvider,
|
|
12833
12071
|
CompactionManager,
|
|
@@ -12835,8 +12073,13 @@ function getHostExports(ctx) {
|
|
|
12835
12073
|
DEFAULT_COMPACTION_CONFIG,
|
|
12836
12074
|
DEFAULT_HINTS,
|
|
12837
12075
|
DEFAULT_PROMPTS,
|
|
12076
|
+
DEFAULT_RETRY_CONFIG,
|
|
12838
12077
|
DEFAULT_SUMMARIZATION_PROMPT,
|
|
12839
12078
|
ExecutionTree,
|
|
12079
|
+
FALLBACK_CHARS_PER_TOKEN,
|
|
12080
|
+
GADGET_ARG_PREFIX,
|
|
12081
|
+
GADGET_END_PREFIX,
|
|
12082
|
+
GADGET_START_PREFIX,
|
|
12840
12083
|
Gadget,
|
|
12841
12084
|
GadgetCallParser,
|
|
12842
12085
|
GadgetExecutor,
|
|
@@ -12850,9 +12093,6 @@ function getHostExports(ctx) {
|
|
|
12850
12093
|
LLMist,
|
|
12851
12094
|
MODEL_ALIASES,
|
|
12852
12095
|
MediaStore,
|
|
12853
|
-
MockBuilder,
|
|
12854
|
-
MockManager,
|
|
12855
|
-
MockProviderAdapter,
|
|
12856
12096
|
ModelIdentifierParser,
|
|
12857
12097
|
ModelRegistry,
|
|
12858
12098
|
OpenAIChatProvider,
|
|
@@ -12873,11 +12113,7 @@ function getHostExports(ctx) {
|
|
|
12873
12113
|
createHints,
|
|
12874
12114
|
createLogger,
|
|
12875
12115
|
createMediaOutput,
|
|
12876
|
-
createMockAdapter,
|
|
12877
|
-
createMockClient,
|
|
12878
|
-
createMockStream,
|
|
12879
12116
|
createOpenAIProviderFromEnv,
|
|
12880
|
-
createTextMockStream,
|
|
12881
12117
|
defaultLogger,
|
|
12882
12118
|
detectAudioMimeType,
|
|
12883
12119
|
detectImageMimeType,
|
|
@@ -12886,8 +12122,8 @@ function getHostExports(ctx) {
|
|
|
12886
12122
|
filterByDepth,
|
|
12887
12123
|
filterByParent,
|
|
12888
12124
|
filterRootEvents,
|
|
12125
|
+
formatLLMError,
|
|
12889
12126
|
getHostExports,
|
|
12890
|
-
getMockManager,
|
|
12891
12127
|
getModelId,
|
|
12892
12128
|
getProvider,
|
|
12893
12129
|
groupByParent,
|
|
@@ -12895,16 +12131,17 @@ function getHostExports(ctx) {
|
|
|
12895
12131
|
imageFromBase64,
|
|
12896
12132
|
imageFromBuffer,
|
|
12897
12133
|
imageFromUrl,
|
|
12134
|
+
isAbortError,
|
|
12898
12135
|
isAudioPart,
|
|
12899
12136
|
isDataUrl,
|
|
12900
12137
|
isGadgetEvent,
|
|
12901
12138
|
isImagePart,
|
|
12902
12139
|
isLLMEvent,
|
|
12140
|
+
isRetryableError,
|
|
12903
12141
|
isRootEvent,
|
|
12904
12142
|
isSubagentEvent,
|
|
12905
12143
|
isTextPart,
|
|
12906
12144
|
iterationProgressHint,
|
|
12907
|
-
mockLLM,
|
|
12908
12145
|
normalizeMessageContent,
|
|
12909
12146
|
parallelGadgetHint,
|
|
12910
12147
|
parseDataUrl,
|
|
@@ -12912,6 +12149,7 @@ function getHostExports(ctx) {
|
|
|
12912
12149
|
resolveHintTemplate,
|
|
12913
12150
|
resolveModel,
|
|
12914
12151
|
resolvePromptTemplate,
|
|
12152
|
+
resolveRetryConfig,
|
|
12915
12153
|
resolveRulesTemplate,
|
|
12916
12154
|
resolveSubagentModel,
|
|
12917
12155
|
resolveValue,
|
|
@@ -12921,11 +12159,13 @@ function getHostExports(ctx) {
|
|
|
12921
12159
|
resultWithImages,
|
|
12922
12160
|
resultWithMedia,
|
|
12923
12161
|
runWithHandlers,
|
|
12162
|
+
schemaToJSONSchema,
|
|
12924
12163
|
stream,
|
|
12925
12164
|
text,
|
|
12926
12165
|
toBase64,
|
|
12927
12166
|
validateAndApplyDefaults,
|
|
12928
12167
|
validateGadgetParams,
|
|
12168
|
+
validateGadgetSchema,
|
|
12929
12169
|
z
|
|
12930
12170
|
});
|
|
12931
12171
|
//# sourceMappingURL=index.cjs.map
|