@voltagent/core 2.3.2 → 2.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -8428,6 +8428,7 @@ declare class Agent {
8428
8428
  private resetOperationAttemptState;
8429
8429
  private getConversationBuffer;
8430
8430
  private getMemoryPersistQueue;
8431
+ private ensureStreamingResponseMessageId;
8431
8432
  private flushPendingMessagesOnError;
8432
8433
  /**
8433
8434
  * Get contextual logger with parent tracking
@@ -8469,6 +8470,7 @@ declare class Agent {
8469
8470
  * Prepare messages with system prompt and memory
8470
8471
  */
8471
8472
  private prepareMessages;
8473
+ private validateIncomingUIMessages;
8472
8474
  /**
8473
8475
  * Get system message with dynamic instructions and retriever context
8474
8476
  */
package/dist/index.d.ts CHANGED
@@ -8428,6 +8428,7 @@ declare class Agent {
8428
8428
  private resetOperationAttemptState;
8429
8429
  private getConversationBuffer;
8430
8430
  private getMemoryPersistQueue;
8431
+ private ensureStreamingResponseMessageId;
8431
8432
  private flushPendingMessagesOnError;
8432
8433
  /**
8433
8434
  * Get contextual logger with parent tracking
@@ -8469,6 +8470,7 @@ declare class Agent {
8469
8470
  * Prepare messages with system prompt and memory
8470
8471
  */
8471
8472
  private prepareMessages;
8473
+ private validateIncomingUIMessages;
8472
8474
  /**
8473
8475
  * Get system message with dynamic instructions and retriever context
8474
8476
  */
package/dist/index.js CHANGED
@@ -19184,6 +19184,32 @@ var TOOL_ROUTING_CONTEXT_KEY = Symbol("toolRoutingConfig");
19184
19184
  var TOOL_ROUTING_SEARCHED_TOOLS_CONTEXT_KEY = Symbol("toolRoutingSearchedTools");
19185
19185
 
19186
19186
  // src/agent/conversation-buffer.ts
19187
+ var extractOpenAIItemId = /* @__PURE__ */ __name((metadata) => {
19188
+ if (!metadata || typeof metadata !== "object") {
19189
+ return "";
19190
+ }
19191
+ const openai = metadata.openai;
19192
+ if (!openai || typeof openai !== "object") {
19193
+ return "";
19194
+ }
19195
+ const openaiRecord = openai;
19196
+ const itemId = typeof openaiRecord.itemId === "string" ? openaiRecord.itemId.trim() : "";
19197
+ if (itemId) {
19198
+ return itemId;
19199
+ }
19200
+ const traceId = typeof openaiRecord.reasoning_trace_id === "string" ? openaiRecord.reasoning_trace_id.trim() : "";
19201
+ if (traceId) {
19202
+ return traceId;
19203
+ }
19204
+ const reasoning = openaiRecord.reasoning;
19205
+ if (reasoning && typeof reasoning === "object") {
19206
+ const reasoningId = typeof reasoning.id === "string" ? reasoning.id.trim() : "";
19207
+ if (reasoningId) {
19208
+ return reasoningId;
19209
+ }
19210
+ }
19211
+ return "";
19212
+ }, "extractOpenAIItemId");
19187
19213
  var ConversationBuffer = class {
19188
19214
  constructor(initialMessages, logger) {
19189
19215
  this.logger = logger;
@@ -19256,13 +19282,21 @@ var ConversationBuffer = class {
19256
19282
  getAllMessages() {
19257
19283
  return this.messages.map((message) => this.cloneMessage(message));
19258
19284
  }
19259
- addMetadataToLastAssistantMessage(metadata) {
19285
+ addMetadataToLastAssistantMessage(metadata, options) {
19260
19286
  if (!metadata || Object.keys(metadata).length === 0) {
19261
- return;
19287
+ return false;
19262
19288
  }
19263
- const lastAssistantIndex = this.findLastAssistantIndex();
19289
+ let lastAssistantIndex = this.findLastAssistantIndex({
19290
+ pendingOnly: options?.requirePending
19291
+ });
19264
19292
  if (lastAssistantIndex === -1) {
19265
- return;
19293
+ if (options?.requirePending) {
19294
+ return false;
19295
+ }
19296
+ lastAssistantIndex = this.findLastAssistantIndex();
19297
+ }
19298
+ if (lastAssistantIndex === -1) {
19299
+ return false;
19266
19300
  }
19267
19301
  const target = this.messages[lastAssistantIndex];
19268
19302
  const existing = typeof target.metadata === "object" && target.metadata !== null ? target.metadata : {};
@@ -19271,6 +19305,7 @@ var ConversationBuffer = class {
19271
19305
  ...metadata
19272
19306
  };
19273
19307
  this.pendingMessageIds.add(target.id);
19308
+ return true;
19274
19309
  }
19275
19310
  appendExistingMessage(message, options = { markAsSaved: true }) {
19276
19311
  const hydrated = this.cloneMessage(message);
@@ -19411,9 +19446,12 @@ var ConversationBuffer = class {
19411
19446
  }
19412
19447
  }
19413
19448
  }
19414
- findLastAssistantIndex() {
19449
+ findLastAssistantIndex(options) {
19415
19450
  for (let i = this.messages.length - 1; i >= 0; i--) {
19416
19451
  if (this.messages[i].role === "assistant") {
19452
+ if (options?.pendingOnly && !this.pendingMessageIds.has(this.messages[i].id)) {
19453
+ continue;
19454
+ }
19417
19455
  return i;
19418
19456
  }
19419
19457
  }
@@ -19541,8 +19579,12 @@ var ConversationBuffer = class {
19541
19579
  switch (part.type) {
19542
19580
  case "text":
19543
19581
  return `text:${part.text}:${JSON.stringify(part.providerMetadata ?? null)}`;
19544
- case "reasoning":
19545
- return `reasoning:${part.text}`;
19582
+ case "reasoning": {
19583
+ const reasoningText = typeof part.text === "string" ? part.text : "";
19584
+ const reasoningId = typeof part.reasoningId === "string" ? part.reasoningId.trim() : "";
19585
+ const openaiItemId = extractOpenAIItemId(part.providerMetadata);
19586
+ return `reasoning:${reasoningText}:${reasoningId}:${openaiItemId}`;
19587
+ }
19546
19588
  case "step-start":
19547
19589
  return "step-start";
19548
19590
  default: {
@@ -19749,6 +19791,13 @@ var safeClone = /* @__PURE__ */ __name((value) => {
19749
19791
  return { ...value };
19750
19792
  }
19751
19793
  }, "safeClone");
19794
+ var compactObject = /* @__PURE__ */ __name((value) => {
19795
+ const entries = Object.entries(value).filter(([, entryValue]) => entryValue !== void 0);
19796
+ if (entries.length === Object.keys(value).length) {
19797
+ return value;
19798
+ }
19799
+ return Object.fromEntries(entries);
19800
+ }, "compactObject");
19752
19801
  var normalizeText = /* @__PURE__ */ __name((part) => {
19753
19802
  const text = typeof part.text === "string" ? part.text : "";
19754
19803
  if (!text.trim()) {
@@ -20153,10 +20202,35 @@ var stripReasoningLinkedProviderMetadata = /* @__PURE__ */ __name((parts) => {
20153
20202
  }
20154
20203
  const cloned = { ...metadata };
20155
20204
  const openaiMetadata = cloned.openai;
20156
- if (!isObject(openaiMetadata) || !("itemId" in openaiMetadata || "reasoning_trace_id" in openaiMetadata || "reasoning" in openaiMetadata && isObject(openaiMetadata.reasoning))) {
20205
+ if (!isObject(openaiMetadata)) {
20157
20206
  return metadata;
20158
20207
  }
20159
- const { openai, ...cleanedMetadata } = cloned;
20208
+ const openaiClone = { ...openaiMetadata };
20209
+ let changed = false;
20210
+ if (typeof openaiClone.itemId === "string") {
20211
+ const itemId = openaiClone.itemId.trim();
20212
+ if (itemId && isOpenAIReasoningId(itemId)) {
20213
+ openaiClone.itemId = void 0;
20214
+ changed = true;
20215
+ }
20216
+ }
20217
+ if (typeof openaiClone.reasoning_trace_id === "string") {
20218
+ openaiClone.reasoning_trace_id = void 0;
20219
+ changed = true;
20220
+ }
20221
+ if ("reasoning" in openaiClone) {
20222
+ openaiClone.reasoning = void 0;
20223
+ changed = true;
20224
+ }
20225
+ if (!changed) {
20226
+ return metadata;
20227
+ }
20228
+ const cleanedOpenai = compactObject(openaiClone);
20229
+ const nextMetadata = {
20230
+ ...cloned,
20231
+ openai: Object.keys(cleanedOpenai).length > 0 ? cleanedOpenai : void 0
20232
+ };
20233
+ const cleanedMetadata = compactObject(nextMetadata);
20160
20234
  return Object.keys(cleanedMetadata).length > 0 ? cleanedMetadata : void 0;
20161
20235
  }, "stripMetadata");
20162
20236
  let mutated = false;
@@ -21944,6 +22018,7 @@ var QUEUE_CONTEXT_KEY = Symbol("memoryPersistQueue");
21944
22018
  var STEP_PERSIST_COUNT_KEY = Symbol("persistedStepCount");
21945
22019
  var ABORT_LISTENER_ATTACHED_KEY = Symbol("abortListenerAttached");
21946
22020
  var MIDDLEWARE_RETRY_FEEDBACK_KEY = Symbol("middlewareRetryFeedback");
22021
+ var STREAM_RESPONSE_MESSAGE_ID_KEY = Symbol("streamResponseMessageId");
21947
22022
  var DEFAULT_FEEDBACK_KEY = "satisfaction";
21948
22023
  var DEFAULT_CONVERSATION_TITLE_PROMPT = [
21949
22024
  "You generate concise titles for new conversations.",
@@ -21954,6 +22029,67 @@ var DEFAULT_CONVERSATION_TITLE_MAX_OUTPUT_TOKENS = 32;
21954
22029
  var DEFAULT_CONVERSATION_TITLE_MAX_CHARS = 80;
21955
22030
  var CONVERSATION_TITLE_INPUT_MAX_CHARS = 2e3;
21956
22031
  var DEFAULT_TOOL_SEARCH_TOP_K = 1;
22032
+ var isRecord2 = /* @__PURE__ */ __name((value) => typeof value === "object" && value !== null, "isRecord");
22033
+ var hasNonEmptyString = /* @__PURE__ */ __name((value) => typeof value === "string" && value.trim().length > 0, "hasNonEmptyString");
22034
+ var isAssistantContentPart = /* @__PURE__ */ __name((value) => {
22035
+ if (!isRecord2(value)) {
22036
+ return false;
22037
+ }
22038
+ switch (value.type) {
22039
+ case "text":
22040
+ case "reasoning":
22041
+ return typeof value.text === "string";
22042
+ case "tool-call":
22043
+ case "tool-result":
22044
+ return hasNonEmptyString(value.toolCallId) && hasNonEmptyString(value.toolName);
22045
+ case "tool-approval-request":
22046
+ return hasNonEmptyString(value.toolCallId) && hasNonEmptyString(value.approvalId);
22047
+ case "image":
22048
+ return "image" in value && value.image != null;
22049
+ case "file":
22050
+ return hasNonEmptyString(value.mediaType) && "data" in value && value.data != null;
22051
+ default:
22052
+ return false;
22053
+ }
22054
+ }, "isAssistantContentPart");
22055
+ var isToolContentPart = /* @__PURE__ */ __name((value) => {
22056
+ if (!isRecord2(value)) {
22057
+ return false;
22058
+ }
22059
+ switch (value.type) {
22060
+ case "tool-result":
22061
+ return hasNonEmptyString(value.toolCallId) && hasNonEmptyString(value.toolName);
22062
+ case "tool-approval-response":
22063
+ return hasNonEmptyString(value.approvalId) && typeof value.approved === "boolean";
22064
+ default:
22065
+ return false;
22066
+ }
22067
+ }, "isToolContentPart");
22068
+ var isResponseMessage = /* @__PURE__ */ __name((value) => {
22069
+ if (!isRecord2(value)) {
22070
+ return false;
22071
+ }
22072
+ if (value.role === "assistant") {
22073
+ if (typeof value.content === "string") {
22074
+ return true;
22075
+ }
22076
+ if (Array.isArray(value.content)) {
22077
+ return value.content.every(isAssistantContentPart);
22078
+ }
22079
+ return false;
22080
+ }
22081
+ if (value.role === "tool") {
22082
+ return Array.isArray(value.content) && value.content.every(isToolContentPart);
22083
+ }
22084
+ return false;
22085
+ }, "isResponseMessage");
22086
+ var filterResponseMessages = /* @__PURE__ */ __name((messages) => {
22087
+ if (!Array.isArray(messages)) {
22088
+ return void 0;
22089
+ }
22090
+ const filtered = messages.filter(isResponseMessage);
22091
+ return filtered.length > 0 ? filtered : void 0;
22092
+ }, "filterResponseMessages");
21957
22093
  var searchToolsParameters = import_zod3.z.object({
21958
22094
  query: import_zod3.z.string().describe("User request or query to search tools for."),
21959
22095
  topK: import_zod3.z.number().int().positive().optional().describe("Maximum number of tools to return.")
@@ -22449,7 +22585,20 @@ var Agent = class {
22449
22585
  feedbackMetadata = await feedbackPromise;
22450
22586
  }
22451
22587
  if (feedbackMetadata) {
22452
- buffer.addMetadataToLastAssistantMessage({ feedback: feedbackMetadata });
22588
+ const metadataApplied = buffer.addMetadataToLastAssistantMessage(
22589
+ { feedback: feedbackMetadata },
22590
+ { requirePending: true }
22591
+ );
22592
+ if (!metadataApplied) {
22593
+ const responseMessages = filterResponseMessages(result.response?.messages);
22594
+ if (responseMessages?.length) {
22595
+ buffer.addModelMessages(responseMessages, "response");
22596
+ buffer.addMetadataToLastAssistantMessage(
22597
+ { feedback: feedbackMetadata },
22598
+ { requirePending: true }
22599
+ );
22600
+ }
22601
+ }
22453
22602
  }
22454
22603
  if (shouldDeferPersist) {
22455
22604
  await persistQueue.flush(buffer, oc);
@@ -22569,6 +22718,7 @@ var Agent = class {
22569
22718
  let feedbackResolved = false;
22570
22719
  let feedbackFinalizeRequested = false;
22571
22720
  let feedbackApplied = false;
22721
+ let latestResponseMessages;
22572
22722
  const resolveFeedbackDeferred = /* @__PURE__ */ __name((value) => {
22573
22723
  if (!feedbackDeferred || feedbackResolved) {
22574
22724
  return;
@@ -22590,7 +22740,17 @@ var Agent = class {
22590
22740
  return;
22591
22741
  }
22592
22742
  feedbackApplied = true;
22593
- buffer.addMetadataToLastAssistantMessage({ feedback: metadata });
22743
+ const metadataApplied = buffer.addMetadataToLastAssistantMessage(
22744
+ { feedback: metadata },
22745
+ { requirePending: true }
22746
+ );
22747
+ if (!metadataApplied && latestResponseMessages?.length) {
22748
+ buffer.addModelMessages(latestResponseMessages, "response");
22749
+ buffer.addMetadataToLastAssistantMessage(
22750
+ { feedback: metadata },
22751
+ { requirePending: true }
22752
+ );
22753
+ }
22594
22754
  if (shouldDeferPersist) {
22595
22755
  void persistQueue.flush(buffer, oc).catch((error) => {
22596
22756
  oc.logger?.debug?.("Failed to persist feedback metadata", { error });
@@ -22718,6 +22878,7 @@ var Agent = class {
22718
22878
  } = options || {};
22719
22879
  const forcedToolChoice = oc.systemContext.get(FORCED_TOOL_CHOICE_CONTEXT_KEY);
22720
22880
  applyForcedToolChoice(aiSDKOptions, forcedToolChoice);
22881
+ const responseMessageId = await this.ensureStreamingResponseMessageId(oc, buffer);
22721
22882
  const guardrailStreamingEnabled = guardrailSet.output.length > 0;
22722
22883
  let guardrailPipeline = null;
22723
22884
  let sanitizedTextPromise;
@@ -22842,6 +23003,7 @@ var Agent = class {
22842
23003
  );
22843
23004
  }, "onError"),
22844
23005
  onFinish: /* @__PURE__ */ __name(async (finalResult) => {
23006
+ latestResponseMessages = filterResponseMessages(finalResult.response?.messages);
22845
23007
  const providerUsage = finalResult.usage ? await Promise.resolve(finalResult.usage) : void 0;
22846
23008
  const usageForFinish = resolveFinishUsage({
22847
23009
  providerMetadata: finalResult.providerMetadata,
@@ -23001,6 +23163,15 @@ var Agent = class {
23001
23163
  );
23002
23164
  }
23003
23165
  const agent = this;
23166
+ const applyResponseMessageId = /* @__PURE__ */ __name((streamOptions) => {
23167
+ if (!responseMessageId) {
23168
+ return streamOptions;
23169
+ }
23170
+ return {
23171
+ ...streamOptions ?? {},
23172
+ generateMessageId: /* @__PURE__ */ __name(() => responseMessageId, "generateMessageId")
23173
+ };
23174
+ }, "applyResponseMessageId");
23004
23175
  const createBaseFullStream = /* @__PURE__ */ __name(() => {
23005
23176
  const wrapWithAbortHandling = /* @__PURE__ */ __name(async function* (baseStream) {
23006
23177
  const iterator = baseStream[Symbol.asyncIterator]();
@@ -23115,10 +23286,11 @@ var Agent = class {
23115
23286
  return guardrailPipeline.createUIStream(streamOptions);
23116
23287
  }, "getGuardrailAwareUIStream");
23117
23288
  const createMergedUIStream = /* @__PURE__ */ __name((streamOptions) => {
23289
+ const resolvedStreamOptions = applyResponseMessageId(streamOptions);
23118
23290
  const mergedStream = (0, import_ai7.createUIMessageStream)({
23119
23291
  execute: /* @__PURE__ */ __name(async ({ writer }) => {
23120
23292
  oc.systemContext.set("uiStreamWriter", writer);
23121
- writer.merge(getGuardrailAwareUIStream(streamOptions));
23293
+ writer.merge(getGuardrailAwareUIStream(resolvedStreamOptions));
23122
23294
  }, "execute"),
23123
23295
  onError: /* @__PURE__ */ __name((error) => String(error), "onError")
23124
23296
  });
@@ -23174,7 +23346,8 @@ var Agent = class {
23174
23346
  });
23175
23347
  }, "attachFeedbackMetadata");
23176
23348
  const toUIMessageStreamSanitized = /* @__PURE__ */ __name((streamOptions) => {
23177
- const baseStream = agent.subAgentManager.hasSubAgents() ? createMergedUIStream(streamOptions) : getGuardrailAwareUIStream(streamOptions);
23349
+ const resolvedStreamOptions = applyResponseMessageId(streamOptions);
23350
+ const baseStream = agent.subAgentManager.hasSubAgents() ? createMergedUIStream(resolvedStreamOptions) : getGuardrailAwareUIStream(resolvedStreamOptions);
23178
23351
  return attachFeedbackMetadata(baseStream);
23179
23352
  }, "toUIMessageStreamSanitized");
23180
23353
  const toUIMessageStreamResponseSanitized = /* @__PURE__ */ __name((options2) => {
@@ -24085,6 +24258,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24085
24258
  oc.systemContext.delete(STEP_PERSIST_COUNT_KEY);
24086
24259
  oc.systemContext.delete("conversationSteps");
24087
24260
  oc.systemContext.delete("bailedResult");
24261
+ oc.systemContext.delete(STREAM_RESPONSE_MESSAGE_ID_KEY);
24088
24262
  oc.conversationSteps = [];
24089
24263
  oc.output = void 0;
24090
24264
  }
@@ -24104,6 +24278,21 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24104
24278
  }
24105
24279
  return queue;
24106
24280
  }
24281
+ async ensureStreamingResponseMessageId(oc, buffer) {
24282
+ const existing = oc.systemContext.get(STREAM_RESPONSE_MESSAGE_ID_KEY);
24283
+ if (typeof existing === "string" && existing.trim().length > 0) {
24284
+ return existing;
24285
+ }
24286
+ const messageId = (0, import_ai7.generateId)();
24287
+ const placeholder = {
24288
+ id: messageId,
24289
+ role: "assistant",
24290
+ parts: []
24291
+ };
24292
+ buffer.ingestUIMessages([placeholder], false);
24293
+ oc.systemContext.set(STREAM_RESPONSE_MESSAGE_ID_KEY, messageId);
24294
+ return messageId;
24295
+ }
24107
24296
  async flushPendingMessagesOnError(oc) {
24108
24297
  const buffer = this.getConversationBuffer(oc);
24109
24298
  const queue = this.getMemoryPersistQueue(oc);
@@ -24509,8 +24698,9 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24509
24698
  */
24510
24699
  // biome-ignore lint/complexity/noExcessiveCognitiveComplexity: legacy message preparation pipeline
24511
24700
  async prepareMessages(input, oc, options, buffer) {
24701
+ const resolvedInput = await this.validateIncomingUIMessages(input, oc);
24512
24702
  const messages = [];
24513
- const systemMessage = await this.getSystemMessage(input, oc, options);
24703
+ const systemMessage = await this.getSystemMessage(resolvedInput, oc, options);
24514
24704
  if (systemMessage) {
24515
24705
  const systemMessagesAsUI = (() => {
24516
24706
  if (typeof systemMessage === "string") {
@@ -24555,7 +24745,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24555
24745
  const canIUseMemory = options?.userId && options.conversationId;
24556
24746
  if (canIUseMemory) {
24557
24747
  const useSemanticSearch = options?.semanticMemory?.enabled ?? this.hasSemanticSearchSupport();
24558
- const currentQuery = useSemanticSearch ? this.extractUserQuery(input) : void 0;
24748
+ const currentQuery = useSemanticSearch ? this.extractUserQuery(resolvedInput) : void 0;
24559
24749
  const semanticLimit = options?.semanticMemory?.semanticLimit ?? 5;
24560
24750
  const semanticThreshold = options?.semanticMemory?.semanticThreshold ?? 0.7;
24561
24751
  const mergeStrategy = options?.semanticMemory?.mergeStrategy ?? "append";
@@ -24563,7 +24753,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24563
24753
  const traceContext = oc.traceContext;
24564
24754
  if (traceContext) {
24565
24755
  const spanInput = {
24566
- query: isSemanticSearch ? currentQuery : input,
24756
+ query: isSemanticSearch ? currentQuery : resolvedInput,
24567
24757
  userId: options?.userId,
24568
24758
  conversationId: options?.conversationId
24569
24759
  };
@@ -24601,7 +24791,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24601
24791
  buffer.ingestUIMessages(memMessages, true);
24602
24792
  return memMessages;
24603
24793
  }
24604
- const inputForMemory = typeof input === "string" ? input : Array.isArray(input) && input[0]?.parts ? input : convertModelMessagesToUIMessages(input);
24794
+ const inputForMemory = typeof resolvedInput === "string" ? resolvedInput : Array.isArray(resolvedInput) && resolvedInput[0]?.parts ? resolvedInput : convertModelMessagesToUIMessages(resolvedInput);
24605
24795
  const result = await this.memoryManager.prepareConversationContext(
24606
24796
  oc,
24607
24797
  inputForMemory,
@@ -24626,7 +24816,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24626
24816
  messages.push(...memoryResult);
24627
24817
  if (isSemanticSearch && oc.userId && oc.conversationId) {
24628
24818
  try {
24629
- const inputForMemory = typeof input === "string" ? input : Array.isArray(input) && input[0]?.parts ? input : convertModelMessagesToUIMessages(input);
24819
+ const inputForMemory = typeof resolvedInput === "string" ? resolvedInput : Array.isArray(resolvedInput) && resolvedInput[0]?.parts ? resolvedInput : convertModelMessagesToUIMessages(resolvedInput);
24630
24820
  this.memoryManager.queueSaveInput(oc, inputForMemory, oc.userId, oc.conversationId);
24631
24821
  } catch (_e) {
24632
24822
  }
@@ -24639,16 +24829,16 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24639
24829
  }
24640
24830
  }
24641
24831
  }
24642
- if (typeof input === "string") {
24832
+ if (typeof resolvedInput === "string") {
24643
24833
  messages.push({
24644
24834
  id: randomUUID(),
24645
24835
  role: "user",
24646
- parts: [{ type: "text", text: input }]
24836
+ parts: [{ type: "text", text: resolvedInput }]
24647
24837
  });
24648
- } else if (Array.isArray(input)) {
24649
- const first = input[0];
24838
+ } else if (Array.isArray(resolvedInput)) {
24839
+ const first = resolvedInput[0];
24650
24840
  if (first && Array.isArray(first.parts)) {
24651
- const inputMessages = input;
24841
+ const inputMessages = resolvedInput;
24652
24842
  const idsToReplace = new Set(
24653
24843
  inputMessages.map((message) => message.id).filter((id) => typeof id === "string" && id.trim().length > 0)
24654
24844
  );
@@ -24661,7 +24851,7 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24661
24851
  }
24662
24852
  messages.push(...inputMessages);
24663
24853
  } else {
24664
- messages.push(...convertModelMessagesToUIMessages(input));
24854
+ messages.push(...convertModelMessagesToUIMessages(resolvedInput));
24665
24855
  }
24666
24856
  }
24667
24857
  const sanitizedMessages = sanitizeMessagesForModel(messages);
@@ -24681,9 +24871,25 @@ Metadata: ${(0, import_utils28.safeStringify)(metadata)}`;
24681
24871
  agent: this,
24682
24872
  context: oc
24683
24873
  });
24684
- return result?.messages || summarizedMessages;
24874
+ const preparedMessages = result?.messages || summarizedMessages;
24875
+ return await (0, import_ai7.validateUIMessages)({ messages: preparedMessages });
24876
+ }
24877
+ return await (0, import_ai7.validateUIMessages)({ messages: summarizedMessages });
24878
+ }
24879
+ async validateIncomingUIMessages(input, oc) {
24880
+ if (!Array.isArray(input) || input.length === 0) {
24881
+ return input;
24882
+ }
24883
+ const first = input[0];
24884
+ if (!first || !Array.isArray(first.parts)) {
24885
+ return input;
24886
+ }
24887
+ try {
24888
+ return await (0, import_ai7.validateUIMessages)({ messages: input });
24889
+ } catch (error) {
24890
+ oc.logger?.error?.("Invalid UI messages", { error });
24891
+ throw error;
24685
24892
  }
24686
- return summarizedMessages;
24687
24893
  }
24688
24894
  /**
24689
24895
  * Get system message with dynamic instructions and retriever context
@@ -26169,7 +26375,7 @@ ${retrieverContext}`;
26169
26375
  }
26170
26376
  }
26171
26377
  }
26172
- const responseMessages = event.response?.messages;
26378
+ const responseMessages = filterResponseMessages(event.response?.messages);
26173
26379
  if (responseMessages && responseMessages.length > 0) {
26174
26380
  buffer.addModelMessages(responseMessages, "response");
26175
26381
  }