@inkeep/agents-core 0.48.2 → 0.48.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth/auth.d.ts +18 -18
- package/dist/auth/permissions.d.ts +9 -9
- package/dist/data-access/manage/agents.d.ts +21 -21
- package/dist/data-access/manage/artifactComponents.d.ts +6 -6
- package/dist/data-access/manage/contextConfigs.d.ts +8 -8
- package/dist/data-access/manage/dataComponents.d.ts +2 -2
- package/dist/data-access/manage/functionTools.d.ts +8 -8
- package/dist/data-access/manage/skills.d.ts +8 -8
- package/dist/data-access/manage/subAgentExternalAgentRelations.d.ts +12 -12
- package/dist/data-access/manage/subAgentRelations.d.ts +16 -16
- package/dist/data-access/manage/subAgentTeamAgentRelations.d.ts +12 -12
- package/dist/data-access/manage/subAgents.d.ts +15 -15
- package/dist/data-access/manage/tools.d.ts +12 -12
- package/dist/data-access/manage/triggers.d.ts +2 -2
- package/dist/data-access/runtime/apiKeys.d.ts +8 -8
- package/dist/data-access/runtime/conversations.d.ts +16 -16
- package/dist/data-access/runtime/messages.d.ts +6 -6
- package/dist/data-access/runtime/tasks.d.ts +5 -5
- package/dist/db/manage/manage-schema.d.ts +4 -4
- package/dist/db/runtime/runtime-schema.d.ts +2 -2
- package/dist/index.d.ts +4 -2
- package/dist/index.js +4 -2
- package/dist/types/@vercel__functions/index.d.ts +11 -0
- package/dist/utils/index.d.ts +4 -2
- package/dist/utils/index.js +4 -2
- package/dist/utils/mock-provider.d.ts +41 -0
- package/dist/utils/mock-provider.js +117 -0
- package/dist/utils/model-factory.js +5 -2
- package/dist/utils/tracer-factory.d.ts +13 -1
- package/dist/utils/tracer-factory.js +21 -1
- package/dist/utils/wait-until.d.ts +22 -0
- package/dist/utils/wait-until.js +40 -0
- package/dist/validation/schemas.d.ts +150 -150
- package/package.json +7 -5
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
//#region src/utils/mock-provider.ts
|
|
2
|
+
function extractLastUserMessage(prompt) {
|
|
3
|
+
for (let i = prompt.length - 1; i >= 0; i--) {
|
|
4
|
+
const msg = prompt[i];
|
|
5
|
+
if (msg.role === "user") {
|
|
6
|
+
for (const part of msg.content) if (part.type === "text") return part.text.length > 200 ? `${part.text.slice(0, 200)}...` : part.text;
|
|
7
|
+
}
|
|
8
|
+
}
|
|
9
|
+
return "(no user message)";
|
|
10
|
+
}
|
|
11
|
+
function countInputChars(prompt) {
|
|
12
|
+
let chars = 0;
|
|
13
|
+
for (const msg of prompt) if ("content" in msg) {
|
|
14
|
+
if (typeof msg.content === "string") chars += msg.content.length;
|
|
15
|
+
else if (Array.isArray(msg.content)) {
|
|
16
|
+
for (const part of msg.content) if ("text" in part) chars += part.text.length;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
return chars;
|
|
20
|
+
}
|
|
21
|
+
function buildMockResponse(modelName, prompt) {
|
|
22
|
+
const lastUserMessage = extractLastUserMessage(prompt);
|
|
23
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
|
|
24
|
+
return [
|
|
25
|
+
"Mock response.",
|
|
26
|
+
`Model: mock/${modelName}`,
|
|
27
|
+
`Input messages: ${prompt.length}`,
|
|
28
|
+
`Last user message: "${lastUserMessage}"`,
|
|
29
|
+
`Timestamp: ${timestamp}`
|
|
30
|
+
].join("\n");
|
|
31
|
+
}
|
|
32
|
+
var MockLanguageModel = class {
|
|
33
|
+
specificationVersion = "v2";
|
|
34
|
+
defaultObjectGenerationMode = void 0;
|
|
35
|
+
supportsImageUrls = false;
|
|
36
|
+
supportedUrls = {};
|
|
37
|
+
provider = "mock";
|
|
38
|
+
modelId;
|
|
39
|
+
constructor(modelId) {
|
|
40
|
+
this.modelId = modelId;
|
|
41
|
+
}
|
|
42
|
+
async doGenerate(options) {
|
|
43
|
+
const responseText = buildMockResponse(this.modelId, options.prompt);
|
|
44
|
+
const inputTokens = Math.ceil(countInputChars(options.prompt) / 4);
|
|
45
|
+
const outputTokens = Math.ceil(responseText.length / 4);
|
|
46
|
+
return {
|
|
47
|
+
content: [{
|
|
48
|
+
type: "text",
|
|
49
|
+
text: responseText
|
|
50
|
+
}],
|
|
51
|
+
finishReason: "stop",
|
|
52
|
+
usage: {
|
|
53
|
+
inputTokens,
|
|
54
|
+
outputTokens,
|
|
55
|
+
totalTokens: inputTokens + outputTokens
|
|
56
|
+
},
|
|
57
|
+
rawCall: {
|
|
58
|
+
rawPrompt: options.prompt,
|
|
59
|
+
rawSettings: {}
|
|
60
|
+
},
|
|
61
|
+
warnings: []
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
async doStream(options) {
|
|
65
|
+
const responseText = buildMockResponse(this.modelId, options.prompt);
|
|
66
|
+
const lines = responseText.split("\n");
|
|
67
|
+
const inputTokens = Math.ceil(countInputChars(options.prompt) / 4);
|
|
68
|
+
const outputTokens = Math.ceil(responseText.length / 4);
|
|
69
|
+
return {
|
|
70
|
+
stream: new ReadableStream({ async start(controller) {
|
|
71
|
+
controller.enqueue({
|
|
72
|
+
type: "stream-start",
|
|
73
|
+
warnings: []
|
|
74
|
+
});
|
|
75
|
+
const textId = "mock-text-0";
|
|
76
|
+
controller.enqueue({
|
|
77
|
+
type: "text-start",
|
|
78
|
+
id: textId
|
|
79
|
+
});
|
|
80
|
+
for (let i = 0; i < lines.length; i++) {
|
|
81
|
+
const delta = i < lines.length - 1 ? `${lines[i]}\n` : lines[i];
|
|
82
|
+
controller.enqueue({
|
|
83
|
+
type: "text-delta",
|
|
84
|
+
id: textId,
|
|
85
|
+
delta
|
|
86
|
+
});
|
|
87
|
+
if (i < lines.length - 1) await new Promise((resolve) => setTimeout(resolve, 5));
|
|
88
|
+
}
|
|
89
|
+
controller.enqueue({
|
|
90
|
+
type: "text-end",
|
|
91
|
+
id: textId
|
|
92
|
+
});
|
|
93
|
+
controller.enqueue({
|
|
94
|
+
type: "finish",
|
|
95
|
+
finishReason: "stop",
|
|
96
|
+
usage: {
|
|
97
|
+
inputTokens,
|
|
98
|
+
outputTokens,
|
|
99
|
+
totalTokens: inputTokens + outputTokens
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
controller.close();
|
|
103
|
+
} }),
|
|
104
|
+
rawCall: {
|
|
105
|
+
rawPrompt: options.prompt,
|
|
106
|
+
rawSettings: {}
|
|
107
|
+
},
|
|
108
|
+
warnings: []
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
};
|
|
112
|
+
function createMockModel(modelId) {
|
|
113
|
+
return new MockLanguageModel(modelId);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
//#endregion
|
|
117
|
+
export { MockLanguageModel, createMockModel };
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { getLogger } from "./logger.js";
|
|
2
|
+
import { createMockModel } from "./mock-provider.js";
|
|
2
3
|
import { anthropic, createAnthropic } from "@ai-sdk/anthropic";
|
|
3
4
|
import { createAzure } from "@ai-sdk/azure";
|
|
4
5
|
import { createGateway, gateway } from "@ai-sdk/gateway";
|
|
@@ -97,7 +98,7 @@ var ModelFactory = class ModelFactory {
|
|
|
97
98
|
hasProviderOptions: !!modelSettings.providerOptions
|
|
98
99
|
}, "Creating language model from config");
|
|
99
100
|
const providerConfig = ModelFactory.extractProviderConfig(modelSettings.providerOptions);
|
|
100
|
-
if (provider === "azure" || Object.keys(providerConfig).length > 0) {
|
|
101
|
+
if (provider !== "mock" && (provider === "azure" || Object.keys(providerConfig).length > 0)) {
|
|
101
102
|
logger.info({ config: providerConfig }, `Applying custom ${provider} provider configuration`);
|
|
102
103
|
return ModelFactory.createProvider(provider, providerConfig).languageModel(modelName);
|
|
103
104
|
}
|
|
@@ -108,6 +109,7 @@ var ModelFactory = class ModelFactory {
|
|
|
108
109
|
case "openrouter": return openrouter(modelName);
|
|
109
110
|
case "gateway": return gateway(modelName);
|
|
110
111
|
case "nim": return nimDefault(modelName);
|
|
112
|
+
case "mock": return createMockModel(modelName);
|
|
111
113
|
case "custom": throw new Error("Custom provider requires configuration. Please provide baseURL in providerOptions.custom.baseURL or providerOptions.baseURL");
|
|
112
114
|
default: throw new Error(`Unsupported provider: ${provider}. Supported providers are: ${ModelFactory.BUILT_IN_PROVIDERS.join(", ")}. To access other models, use OpenRouter (openrouter/model-id), Vercel AI Gateway (gateway/model-id), NVIDIA NIM (nim/model-id), or Custom OpenAI-compatible (custom/model-id).`);
|
|
113
115
|
}
|
|
@@ -123,7 +125,8 @@ var ModelFactory = class ModelFactory {
|
|
|
123
125
|
"openrouter",
|
|
124
126
|
"gateway",
|
|
125
127
|
"nim",
|
|
126
|
-
"custom"
|
|
128
|
+
"custom",
|
|
129
|
+
"mock"
|
|
127
130
|
];
|
|
128
131
|
/**
|
|
129
132
|
* Parse model string to extract provider and model name
|
|
@@ -23,5 +23,17 @@ declare function setSpanWithError(span: Span, error: Error, logger?: {
|
|
|
23
23
|
* Returns a no-op tracer if OpenTelemetry is not available
|
|
24
24
|
*/
|
|
25
25
|
declare function getTracer(serviceName: string, serviceVersion?: string): Tracer;
|
|
26
|
+
/**
|
|
27
|
+
* Force-flush all pending trace spans to the configured exporter.
|
|
28
|
+
*
|
|
29
|
+
* Uses the global TracerProvider registered by the OpenTelemetry SDK.
|
|
30
|
+
* This is safe to call from any package — it accesses the same provider
|
|
31
|
+
* that was set up in the host application's instrumentation.
|
|
32
|
+
*
|
|
33
|
+
* Use this in fire-and-forget handlers (e.g. Slack webhooks) where the
|
|
34
|
+
* HTTP response is sent before background work completes, so the
|
|
35
|
+
* per-request flush middleware runs too early to capture those spans.
|
|
36
|
+
*/
|
|
37
|
+
declare function flushTraces(): Promise<void>;
|
|
26
38
|
//#endregion
|
|
27
|
-
export { getTracer, setSpanWithError, unwrapError };
|
|
39
|
+
export { flushTraces, getTracer, setSpanWithError, unwrapError };
|
|
@@ -74,6 +74,26 @@ function getTracer(serviceName, serviceVersion) {
|
|
|
74
74
|
return noopTracer;
|
|
75
75
|
}
|
|
76
76
|
}
|
|
77
|
+
/**
|
|
78
|
+
* Force-flush all pending trace spans to the configured exporter.
|
|
79
|
+
*
|
|
80
|
+
* Uses the global TracerProvider registered by the OpenTelemetry SDK.
|
|
81
|
+
* This is safe to call from any package — it accesses the same provider
|
|
82
|
+
* that was set up in the host application's instrumentation.
|
|
83
|
+
*
|
|
84
|
+
* Use this in fire-and-forget handlers (e.g. Slack webhooks) where the
|
|
85
|
+
* HTTP response is sent before background work completes, so the
|
|
86
|
+
* per-request flush middleware runs too early to capture those spans.
|
|
87
|
+
*/
|
|
88
|
+
async function flushTraces() {
|
|
89
|
+
try {
|
|
90
|
+
const provider = trace.getTracerProvider();
|
|
91
|
+
const delegate = typeof provider.getDelegate === "function" ? provider.getDelegate() : provider;
|
|
92
|
+
if (typeof delegate.forceFlush === "function") await delegate.forceFlush();
|
|
93
|
+
} catch (error) {
|
|
94
|
+
logger.warn({ error }, "Failed to flush traces");
|
|
95
|
+
}
|
|
96
|
+
}
|
|
77
97
|
|
|
78
98
|
//#endregion
|
|
79
|
-
export { getTracer, setSpanWithError, unwrapError };
|
|
99
|
+
export { flushTraces, getTracer, setSpanWithError, unwrapError };
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
//#region src/utils/wait-until.d.ts
|
|
2
|
+
type WaitUntilFn = (promise: Promise<unknown>) => void;
|
|
3
|
+
/**
|
|
4
|
+
* Lazy-load and cache Vercel's `waitUntil` function.
|
|
5
|
+
*
|
|
6
|
+
* - On Vercel (`process.env.VERCEL` set): dynamically imports `@vercel/functions`
|
|
7
|
+
* and returns `waitUntil`, which extends the serverless function lifetime
|
|
8
|
+
* past the HTTP response so background work can complete.
|
|
9
|
+
* - Outside Vercel: returns `undefined`. Callers should let the promise
|
|
10
|
+
* execute naturally via the Node.js event loop (fire-and-forget with
|
|
11
|
+
* error handling).
|
|
12
|
+
* - Import failure: logs a warning and returns `undefined` (graceful degradation).
|
|
13
|
+
* - Result is cached after first call (lazy singleton). Concurrent callers
|
|
14
|
+
* share the same import promise to avoid duplicate imports.
|
|
15
|
+
*/
|
|
16
|
+
declare function getWaitUntil(): Promise<WaitUntilFn | undefined>;
|
|
17
|
+
/**
|
|
18
|
+
* Reset internal cache. Exposed only for testing.
|
|
19
|
+
*/
|
|
20
|
+
declare function _resetWaitUntilCache(): void;
|
|
21
|
+
//#endregion
|
|
22
|
+
export { _resetWaitUntilCache, getWaitUntil };
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { getLogger } from "./logger.js";
|
|
2
|
+
|
|
3
|
+
//#region src/utils/wait-until.ts
|
|
4
|
+
const logger = getLogger("wait-until");
|
|
5
|
+
let _importPromise;
|
|
6
|
+
/**
|
|
7
|
+
* Lazy-load and cache Vercel's `waitUntil` function.
|
|
8
|
+
*
|
|
9
|
+
* - On Vercel (`process.env.VERCEL` set): dynamically imports `@vercel/functions`
|
|
10
|
+
* and returns `waitUntil`, which extends the serverless function lifetime
|
|
11
|
+
* past the HTTP response so background work can complete.
|
|
12
|
+
* - Outside Vercel: returns `undefined`. Callers should let the promise
|
|
13
|
+
* execute naturally via the Node.js event loop (fire-and-forget with
|
|
14
|
+
* error handling).
|
|
15
|
+
* - Import failure: logs a warning and returns `undefined` (graceful degradation).
|
|
16
|
+
* - Result is cached after first call (lazy singleton). Concurrent callers
|
|
17
|
+
* share the same import promise to avoid duplicate imports.
|
|
18
|
+
*/
|
|
19
|
+
async function getWaitUntil() {
|
|
20
|
+
if (_importPromise) return _importPromise;
|
|
21
|
+
_importPromise = (async () => {
|
|
22
|
+
if (!process.env.VERCEL) return void 0;
|
|
23
|
+
try {
|
|
24
|
+
return (await import("@vercel/functions")).waitUntil;
|
|
25
|
+
} catch (e) {
|
|
26
|
+
logger.warn({ error: e }, "Failed to import @vercel/functions, waitUntil unavailable");
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
})();
|
|
30
|
+
return _importPromise;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Reset internal cache. Exposed only for testing.
|
|
34
|
+
*/
|
|
35
|
+
function _resetWaitUntilCache() {
|
|
36
|
+
_importPromise = void 0;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
//#endregion
|
|
40
|
+
export { _resetWaitUntilCache, getWaitUntil };
|