@inkeep/agents-run-api 0.0.0-dev-20260106194315 → 0.0.0-dev-20260106221934

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,6 @@ import { toolSessionManager } from "./ToolSessionManager.js";
5
5
  import { getCompressionConfigForModel } from "../utils/model-context-utils.js";
6
6
  import { setSpanWithError as setSpanWithError$1, tracer } from "../utils/tracer.js";
7
7
  import { getModelAwareCompressionConfig } from "../services/BaseCompressor.js";
8
- import "../services/ConversationCompressor.js";
9
8
  import { createDefaultConversationHistoryConfig, getConversationHistoryWithCompression } from "../data/conversations.js";
10
9
  import { getStreamHelper } from "../utils/stream-registry.js";
11
10
  import { agentSessionManager } from "../services/AgentSession.js";
@@ -25,7 +24,7 @@ import { Phase2Config } from "./versions/v1/Phase2Config.js";
25
24
  import { z } from "@hono/zod-openapi";
26
25
  import { ContextResolver, CredentialStuffer, MCPServerType, MCPTransportType, McpClient, ModelFactory, TemplateEngine, agentHasArtifactComponents, createMessage, generateId, getContextConfigById, getCredentialReference, getFullAgentDefinition, getFunction, getFunctionToolsForSubAgent, getLedgerArtifacts, getToolsForAgent, getUserScopedCredentialReference, listTaskIdsByContextId, parseEmbeddedJson } from "@inkeep/agents-core";
27
26
  import { SpanStatusCode, trace } from "@opentelemetry/api";
28
- import { generateObject, generateText, streamObject, streamText, tool } from "ai";
27
+ import { Output, generateText, streamText, tool } from "ai";
29
28
 
30
29
  //#region src/agents/Agent.ts
31
30
  /**
@@ -1808,15 +1807,15 @@ ${output}${structureHintsFormatted}`;
1808
1807
  return phase2Messages;
1809
1808
  }
1810
1809
  async executeStreamingPhase2(structuredModelSettings, phase2Messages, dataComponentsSchema, phase2TimeoutMs, sessionId, contextId, response) {
1811
- const streamResult = streamObject({
1810
+ const streamResult = streamText({
1812
1811
  ...structuredModelSettings,
1813
1812
  messages: phase2Messages,
1814
- schema: z.object({ dataComponents: z.array(dataComponentsSchema) }),
1813
+ output: Output.object({ schema: z.object({ dataComponents: z.array(dataComponentsSchema) }) }),
1815
1814
  experimental_telemetry: this.buildTelemetryConfig("structured_generation"),
1816
1815
  abortSignal: AbortSignal.timeout(phase2TimeoutMs)
1817
1816
  });
1818
1817
  const parser = this.setupStreamParser(sessionId, contextId);
1819
- for await (const delta of streamResult.partialObjectStream) if (delta) await parser.processObjectDelta(delta);
1818
+ for await (const delta of streamResult.partialOutputStream) if (delta) await parser.processObjectDelta(delta);
1820
1819
  await parser.finalize();
1821
1820
  const structuredResponse = await streamResult;
1822
1821
  const collectedParts = parser.getCollectedParts();
@@ -1827,22 +1826,22 @@ ${output}${structureHintsFormatted}`;
1827
1826
  })) };
1828
1827
  return {
1829
1828
  ...response,
1830
- object: structuredResponse.object,
1831
- textResponse: JSON.stringify(structuredResponse.object, null, 2)
1829
+ object: structuredResponse.output,
1830
+ textResponse: JSON.stringify(structuredResponse.output, null, 2)
1832
1831
  };
1833
1832
  }
1834
1833
  async executeNonStreamingPhase2(structuredModelSettings, phase2Messages, dataComponentsSchema, phase2TimeoutMs, response) {
1835
- const structuredResponse = await generateObject(withJsonPostProcessing({
1834
+ const structuredResponse = await generateText(withJsonPostProcessing({
1836
1835
  ...structuredModelSettings,
1837
1836
  messages: phase2Messages,
1838
- schema: z.object({ dataComponents: z.array(dataComponentsSchema) }),
1837
+ output: Output.object({ schema: z.object({ dataComponents: z.array(dataComponentsSchema) }) }),
1839
1838
  experimental_telemetry: this.buildTelemetryConfig("structured_generation"),
1840
1839
  abortSignal: AbortSignal.timeout(phase2TimeoutMs)
1841
1840
  }));
1842
1841
  return {
1843
1842
  ...response,
1844
- object: structuredResponse.object,
1845
- textResponse: JSON.stringify(structuredResponse.object, null, 2)
1843
+ object: structuredResponse.output,
1844
+ textResponse: JSON.stringify(structuredResponse.output, null, 2)
1846
1845
  };
1847
1846
  }
1848
1847
  async formatFinalResponse(response, textResponse, sessionId, contextId) {
@@ -11,7 +11,7 @@ import { ArtifactParser } from "./ArtifactParser.js";
11
11
  import { z } from "@hono/zod-openapi";
12
12
  import { CONVERSATION_HISTORY_DEFAULT_LIMIT, CONVERSATION_HISTORY_MAX_OUTPUT_TOKENS_DEFAULT, ModelFactory, getLedgerArtifacts, getSubAgentById } from "@inkeep/agents-core";
13
13
  import { SpanStatusCode } from "@opentelemetry/api";
14
- import { generateObject } from "ai";
14
+ import { Output, generateText } from "ai";
15
15
 
16
16
  //#region src/services/AgentSession.ts
17
17
  const logger = getLogger("AgentSession");
@@ -577,10 +577,10 @@ ${this.statusUpdateState?.config.prompt?.trim() || ""}`;
577
577
  modelToUse = this.statusUpdateState.baseModel;
578
578
  }
579
579
  if (!modelToUse) throw new Error("No model configuration available");
580
- const { object } = await generateObject({
580
+ const { output: object } = await generateText({
581
581
  model: ModelFactory.createModel(modelToUse),
582
582
  prompt,
583
- schema: selectionSchema,
583
+ output: Output.object({ schema: selectionSchema }),
584
584
  experimental_telemetry: {
585
585
  isEnabled: true,
586
586
  functionId: `structured_update_${this.sessionId}`,
@@ -875,7 +875,7 @@ Make the name extremely specific to what this tool call actually returned, not g
875
875
  name: z.string().describe("Concise, descriptive name for the artifact"),
876
876
  description: z.string().describe("Brief description of the artifact's relevance to the user's question")
877
877
  });
878
- const { object } = await tracer.startActiveSpan("agent_session.generate_artifact_metadata", { attributes: {
878
+ const { output: object } = await tracer.startActiveSpan("agent_session.generate_artifact_metadata", { attributes: {
879
879
  "llm.model": this.statusUpdateState?.summarizerModel?.model,
880
880
  "llm.operation": "generate_object",
881
881
  "artifact.id": artifactData.artifactId,
@@ -887,10 +887,10 @@ Make the name extremely specific to what this tool call actually returned, not g
887
887
  const maxRetries = 3;
888
888
  let lastError = null;
889
889
  for (let attempt = 1; attempt <= maxRetries; attempt++) try {
890
- const result$1 = await generateObject({
890
+ const result$1 = await generateText({
891
891
  model,
892
892
  prompt,
893
- schema,
893
+ output: Output.object({ schema }),
894
894
  experimental_telemetry: {
895
895
  isEnabled: true,
896
896
  functionId: `artifact_processing_${artifactData.artifactId}`,
@@ -906,12 +906,12 @@ Make the name extremely specific to what this tool call actually returned, not g
906
906
  generationSpan.setAttributes({
907
907
  "artifact.id": artifactData.artifactId,
908
908
  "artifact.type": artifactData.artifactType,
909
- "artifact.name": result$1.object.name,
910
- "artifact.description": result$1.object.description,
909
+ "artifact.name": result$1.output.name,
910
+ "artifact.description": result$1.output.description,
911
911
  "artifact.summary": JSON.stringify(artifactData.summaryData, null, 2),
912
912
  "artifact.full": JSON.stringify(artifactData.data || artifactData.summaryData, null, 2),
913
- "generation.name_length": result$1.object.name.length,
914
- "generation.description_length": result$1.object.description.length,
913
+ "generation.name_length": result$1.output.name.length,
914
+ "generation.description_length": result$1.output.description.length,
915
915
  "generation.attempts": attempt
916
916
  });
917
917
  generationSpan.setStatus({ code: SpanStatusCode.OK });
@@ -1,6 +1,6 @@
1
1
  import { getLogger } from "../logger.js";
2
2
  import { ModelFactory } from "@inkeep/agents-core";
3
- import { generateObject } from "ai";
3
+ import { Output, generateText } from "ai";
4
4
  import { z } from "zod";
5
5
 
6
6
  //#region src/tools/distill-conversation-history-tool.ts
@@ -57,7 +57,7 @@ async function distillConversationHistory(params) {
57
57
  const { messages, conversationId, summarizerModel, toolCallToArtifactMap } = params;
58
58
  try {
59
59
  if (!summarizerModel?.model?.trim()) throw new Error("Summarizer model is required");
60
- const { object: summary } = await generateObject({
60
+ const { output: summary } = await generateText({
61
61
  model: ModelFactory.createModel(summarizerModel),
62
62
  prompt: `You are a conversation history summarization assistant. Your job is to create a comprehensive summary that can COMPLETELY REPLACE the original conversation history while preserving all essential context.
63
63
 
@@ -158,7 +158,7 @@ Create a comprehensive summary using this exact JSON schema:
158
158
  **REMEMBER**: This summary is REPLACING the entire conversation history. Include everything essential for context continuation.
159
159
 
160
160
  Return **only** valid JSON.`,
161
- schema: ConversationHistorySummarySchema
161
+ output: Output.object({ schema: ConversationHistorySummarySchema })
162
162
  });
163
163
  summary.session_id = conversationId;
164
164
  return summary;
@@ -1,6 +1,6 @@
1
1
  import { getLogger } from "../logger.js";
2
2
  import { ModelFactory } from "@inkeep/agents-core";
3
- import { generateObject, tool } from "ai";
3
+ import { Output, generateText } from "ai";
4
4
  import { z } from "zod";
5
5
 
6
6
  //#region src/tools/distill-conversation-tool.ts
@@ -36,7 +36,7 @@ async function distillConversation(params) {
36
36
  try {
37
37
  const modelToUse = summarizerModel;
38
38
  if (!modelToUse?.model?.trim()) throw new Error("Summarizer model is required");
39
- const { object: summary } = await generateObject({
39
+ const { output: summary } = await generateText({
40
40
  model: ModelFactory.createModel(modelToUse),
41
41
  prompt: `You are a conversation summarization assistant. Your job is to create or update a compact, structured summary that captures VALUABLE CONTENT and FINDINGS, not just operational details.
42
42
 
@@ -111,7 +111,7 @@ Create/update a summary using this exact JSON schema:
111
111
  **Focus on WHAT WAS LEARNED, not HOW IT WAS LEARNED**
112
112
 
113
113
  Return **only** valid JSON.`,
114
- schema: ConversationSummarySchema
114
+ output: Output.object({ schema: ConversationSummarySchema })
115
115
  });
116
116
  summary.session_id = conversationId;
117
117
  return summary;
@@ -4,7 +4,7 @@
4
4
  */
5
5
  declare function stripJsonCodeBlocks(text: string): string;
6
6
  /**
7
- * Configuration helper to add JSON post-processing to generateObject calls
7
+ * Configuration helper to add JSON post-processing to structured output generateText calls
8
8
  */
9
9
  declare function withJsonPostProcessing<T extends Record<string, any>>(config: T): T & {
10
10
  experimental_transform?: (text: string) => string;
@@ -6,7 +6,7 @@ function stripJsonCodeBlocks(text) {
6
6
  return text.trim().replace(/^```json\s*/is, "").replace(/^```\s*/s, "").replace(/\s*```$/s, "").replace(/^```json\s*([\s\S]*?)\s*```$/i, "$1").replace(/^```\s*([\s\S]*?)\s*```$/i, "$1").trim();
7
7
  }
8
8
  /**
9
- * Configuration helper to add JSON post-processing to generateObject calls
9
+ * Configuration helper to add JSON post-processing to structured output generateText calls
10
10
  */
11
11
  function withJsonPostProcessing(config) {
12
12
  return {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@inkeep/agents-run-api",
3
- "version": "0.0.0-dev-20260106194315",
3
+ "version": "0.0.0-dev-20260106221934",
4
4
  "description": "Agents Run API for Inkeep Agent Framework - handles chat, agent execution, and streaming",
5
5
  "types": "dist/index.d.ts",
6
6
  "exports": {
@@ -13,13 +13,13 @@
13
13
  "type": "module",
14
14
  "license": "SEE LICENSE IN LICENSE.md",
15
15
  "dependencies": {
16
- "@electric-sql/pglite": "^0.3.13",
17
- "@ai-sdk/anthropic": "3.0.0-beta.66",
18
- "@ai-sdk/gateway": "2.0.0-beta.68",
19
- "@ai-sdk/google": "3.0.0-beta.62",
20
- "@ai-sdk/openai": "3.0.0-beta.74",
21
- "@ai-sdk/openai-compatible": "2.0.0-beta.41",
16
+ "@ai-sdk/anthropic": "3.0.7",
17
+ "@ai-sdk/gateway": "3.0.9",
18
+ "@ai-sdk/google": "3.0.4",
19
+ "@ai-sdk/openai": "3.0.7",
20
+ "@ai-sdk/openai-compatible": "2.0.4",
22
21
  "@alcyone-labs/modelcontextprotocol-sdk": "^1.16.0",
22
+ "@electric-sql/pglite": "^0.3.13",
23
23
  "@hono/otel": "^0.4.0",
24
24
  "@hono/swagger-ui": "^0.5.1",
25
25
  "@openrouter/ai-sdk-provider": "^1.2.0",
@@ -34,14 +34,14 @@
34
34
  "@opentelemetry/sdk-trace-base": "^2.1.0",
35
35
  "@opentelemetry/semantic-conventions": "^1.37.0",
36
36
  "@vercel/sandbox": "^0.0.24",
37
- "ai": "6.0.0-beta.124",
37
+ "ai": "6.0.14",
38
38
  "ajv": "^8.17.1",
39
39
  "drizzle-orm": "^0.44.4",
40
40
  "fetch-to-node": "^2.1.0",
41
41
  "hono": "^4.10.4",
42
42
  "jmespath": "^0.16.0",
43
43
  "llm-info": "^1.0.69",
44
- "@inkeep/agents-core": "^0.0.0-dev-20260106194315"
44
+ "@inkeep/agents-core": "^0.0.0-dev-20260106221934"
45
45
  },
46
46
  "peerDependencies": {
47
47
  "@hono/zod-openapi": "^1.1.5",