langchain 1.0.5 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/README.md +1 -1
  3. package/chat_models/universal.cjs +1 -0
  4. package/chat_models/universal.d.cts +1 -0
  5. package/chat_models/universal.d.ts +1 -0
  6. package/chat_models/universal.js +1 -0
  7. package/dist/agents/ReactAgent.cjs +43 -39
  8. package/dist/agents/ReactAgent.cjs.map +1 -1
  9. package/dist/agents/ReactAgent.js +46 -42
  10. package/dist/agents/ReactAgent.js.map +1 -1
  11. package/dist/agents/index.d.cts +0 -2
  12. package/dist/agents/index.d.ts +0 -2
  13. package/dist/agents/middleware/constants.cjs +16 -0
  14. package/dist/agents/middleware/constants.cjs.map +1 -0
  15. package/dist/agents/middleware/constants.js +15 -0
  16. package/dist/agents/middleware/constants.js.map +1 -0
  17. package/dist/agents/middleware/contextEditing.cjs.map +1 -1
  18. package/dist/agents/middleware/contextEditing.d.cts +23 -7
  19. package/dist/agents/middleware/contextEditing.d.ts +23 -7
  20. package/dist/agents/middleware/contextEditing.js.map +1 -1
  21. package/dist/agents/middleware/dynamicSystemPrompt.cjs +5 -2
  22. package/dist/agents/middleware/dynamicSystemPrompt.cjs.map +1 -1
  23. package/dist/agents/middleware/dynamicSystemPrompt.d.cts +2 -1
  24. package/dist/agents/middleware/dynamicSystemPrompt.d.ts +2 -1
  25. package/dist/agents/middleware/dynamicSystemPrompt.js +4 -2
  26. package/dist/agents/middleware/dynamicSystemPrompt.js.map +1 -1
  27. package/dist/agents/middleware/error.cjs +20 -0
  28. package/dist/agents/middleware/error.cjs.map +1 -0
  29. package/dist/agents/middleware/error.js +19 -0
  30. package/dist/agents/middleware/error.js.map +1 -0
  31. package/dist/agents/middleware/index.cjs +4 -2
  32. package/dist/agents/middleware/index.d.ts +18 -0
  33. package/dist/agents/middleware/index.js +4 -2
  34. package/dist/agents/middleware/modelRetry.cjs +162 -0
  35. package/dist/agents/middleware/modelRetry.cjs.map +1 -0
  36. package/dist/agents/middleware/modelRetry.d.cts +134 -0
  37. package/dist/agents/middleware/modelRetry.d.ts +134 -0
  38. package/dist/agents/middleware/modelRetry.js +161 -0
  39. package/dist/agents/middleware/modelRetry.js.map +1 -0
  40. package/dist/agents/middleware/{promptCaching.cjs → provider/anthropic/promptCaching.cjs} +3 -3
  41. package/dist/agents/middleware/provider/anthropic/promptCaching.cjs.map +1 -0
  42. package/dist/agents/middleware/{promptCaching.d.cts → provider/anthropic/promptCaching.d.cts} +2 -2
  43. package/dist/agents/middleware/{promptCaching.d.ts → provider/anthropic/promptCaching.d.ts} +2 -2
  44. package/dist/agents/middleware/{promptCaching.js → provider/anthropic/promptCaching.js} +2 -2
  45. package/dist/agents/middleware/provider/anthropic/promptCaching.js.map +1 -0
  46. package/dist/agents/middleware/provider/openai/moderation.cjs +299 -0
  47. package/dist/agents/middleware/provider/openai/moderation.cjs.map +1 -0
  48. package/dist/agents/middleware/provider/openai/moderation.d.cts +133 -0
  49. package/dist/agents/middleware/provider/openai/moderation.d.ts +133 -0
  50. package/dist/agents/middleware/provider/openai/moderation.js +298 -0
  51. package/dist/agents/middleware/provider/openai/moderation.js.map +1 -0
  52. package/dist/agents/middleware/summarization.d.cts +0 -4
  53. package/dist/agents/middleware/summarization.d.ts +0 -4
  54. package/dist/agents/middleware/todoListMiddleware.cjs +1 -1
  55. package/dist/agents/middleware/todoListMiddleware.cjs.map +1 -1
  56. package/dist/agents/middleware/todoListMiddleware.js +1 -1
  57. package/dist/agents/middleware/todoListMiddleware.js.map +1 -1
  58. package/dist/agents/middleware/toolRetry.cjs +32 -44
  59. package/dist/agents/middleware/toolRetry.cjs.map +1 -1
  60. package/dist/agents/middleware/toolRetry.d.cts +16 -36
  61. package/dist/agents/middleware/toolRetry.d.ts +16 -36
  62. package/dist/agents/middleware/toolRetry.js +32 -44
  63. package/dist/agents/middleware/toolRetry.js.map +1 -1
  64. package/dist/agents/middleware/types.d.cts +9 -10
  65. package/dist/agents/middleware/types.d.ts +9 -10
  66. package/dist/agents/middleware/utils.cjs +23 -0
  67. package/dist/agents/middleware/utils.cjs.map +1 -1
  68. package/dist/agents/middleware/utils.d.ts +2 -0
  69. package/dist/agents/middleware/utils.js +23 -1
  70. package/dist/agents/middleware/utils.js.map +1 -1
  71. package/dist/agents/nodes/AgentNode.cjs +72 -28
  72. package/dist/agents/nodes/AgentNode.cjs.map +1 -1
  73. package/dist/agents/nodes/AgentNode.js +74 -31
  74. package/dist/agents/nodes/AgentNode.js.map +1 -1
  75. package/dist/agents/nodes/ToolNode.cjs +5 -0
  76. package/dist/agents/nodes/ToolNode.cjs.map +1 -1
  77. package/dist/agents/nodes/ToolNode.js +5 -1
  78. package/dist/agents/nodes/ToolNode.js.map +1 -1
  79. package/dist/agents/nodes/types.d.cts +39 -3
  80. package/dist/agents/nodes/types.d.ts +39 -3
  81. package/dist/agents/responses.cjs.map +1 -1
  82. package/dist/agents/responses.d.cts +2 -19
  83. package/dist/agents/responses.d.ts +2 -19
  84. package/dist/agents/responses.js.map +1 -1
  85. package/dist/agents/runtime.d.ts +1 -0
  86. package/dist/agents/tests/utils.cjs +10 -1
  87. package/dist/agents/tests/utils.cjs.map +1 -1
  88. package/dist/agents/tests/utils.js +10 -1
  89. package/dist/agents/tests/utils.js.map +1 -1
  90. package/dist/agents/types.d.cts +68 -2
  91. package/dist/agents/types.d.ts +68 -2
  92. package/dist/agents/utils.cjs +15 -12
  93. package/dist/agents/utils.cjs.map +1 -1
  94. package/dist/agents/utils.js +16 -13
  95. package/dist/agents/utils.js.map +1 -1
  96. package/dist/chat_models/universal.cjs +50 -16
  97. package/dist/chat_models/universal.cjs.map +1 -1
  98. package/dist/chat_models/universal.d.cts +19 -1
  99. package/dist/chat_models/universal.d.ts +19 -1
  100. package/dist/chat_models/universal.js +50 -16
  101. package/dist/chat_models/universal.js.map +1 -1
  102. package/dist/index.cjs +8 -2
  103. package/dist/index.d.cts +5 -3
  104. package/dist/index.d.ts +6 -3
  105. package/dist/index.js +7 -3
  106. package/dist/load/import_constants.cjs +2 -1
  107. package/dist/load/import_constants.cjs.map +1 -1
  108. package/dist/load/import_constants.js +2 -1
  109. package/dist/load/import_constants.js.map +1 -1
  110. package/dist/load/import_map.cjs +2 -19
  111. package/dist/load/import_map.cjs.map +1 -1
  112. package/dist/load/import_map.js +2 -19
  113. package/dist/load/import_map.js.map +1 -1
  114. package/hub/node.cjs +1 -0
  115. package/hub/node.d.cts +1 -0
  116. package/hub/node.d.ts +1 -0
  117. package/hub/node.js +1 -0
  118. package/hub.cjs +1 -0
  119. package/hub.d.cts +1 -0
  120. package/hub.d.ts +1 -0
  121. package/hub.js +1 -0
  122. package/load/serializable.cjs +1 -0
  123. package/load/serializable.d.cts +1 -0
  124. package/load/serializable.d.ts +1 -0
  125. package/load/serializable.js +1 -0
  126. package/load.cjs +1 -0
  127. package/load.d.cts +1 -0
  128. package/load.d.ts +1 -0
  129. package/load.js +1 -0
  130. package/package.json +65 -52
  131. package/storage/encoder_backed.cjs +1 -0
  132. package/storage/encoder_backed.d.cts +1 -0
  133. package/storage/encoder_backed.d.ts +1 -0
  134. package/storage/encoder_backed.js +1 -0
  135. package/storage/file_system.cjs +1 -0
  136. package/storage/file_system.d.cts +1 -0
  137. package/storage/file_system.d.ts +1 -0
  138. package/storage/file_system.js +1 -0
  139. package/storage/in_memory.cjs +1 -0
  140. package/storage/in_memory.d.cts +1 -0
  141. package/storage/in_memory.d.ts +1 -0
  142. package/storage/in_memory.js +1 -0
  143. package/dist/agents/ReactAgent.d.cts.map +0 -1
  144. package/dist/agents/ReactAgent.d.ts.map +0 -1
  145. package/dist/agents/constants.cjs +0 -7
  146. package/dist/agents/constants.cjs.map +0 -1
  147. package/dist/agents/constants.d.cts.map +0 -1
  148. package/dist/agents/constants.d.ts.map +0 -1
  149. package/dist/agents/constants.js +0 -6
  150. package/dist/agents/constants.js.map +0 -1
  151. package/dist/agents/errors.d.cts.map +0 -1
  152. package/dist/agents/errors.d.ts.map +0 -1
  153. package/dist/agents/index.d.cts.map +0 -1
  154. package/dist/agents/index.d.ts.map +0 -1
  155. package/dist/agents/middleware/contextEditing.d.cts.map +0 -1
  156. package/dist/agents/middleware/contextEditing.d.ts.map +0 -1
  157. package/dist/agents/middleware/dynamicSystemPrompt.d.cts.map +0 -1
  158. package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +0 -1
  159. package/dist/agents/middleware/hitl.d.cts.map +0 -1
  160. package/dist/agents/middleware/hitl.d.ts.map +0 -1
  161. package/dist/agents/middleware/llmToolSelector.d.cts.map +0 -1
  162. package/dist/agents/middleware/llmToolSelector.d.ts.map +0 -1
  163. package/dist/agents/middleware/modelCallLimit.d.cts.map +0 -1
  164. package/dist/agents/middleware/modelCallLimit.d.ts.map +0 -1
  165. package/dist/agents/middleware/modelFallback.d.cts.map +0 -1
  166. package/dist/agents/middleware/modelFallback.d.ts.map +0 -1
  167. package/dist/agents/middleware/pii.d.cts.map +0 -1
  168. package/dist/agents/middleware/pii.d.ts.map +0 -1
  169. package/dist/agents/middleware/piiRedaction.d.cts.map +0 -1
  170. package/dist/agents/middleware/piiRedaction.d.ts.map +0 -1
  171. package/dist/agents/middleware/promptCaching.cjs.map +0 -1
  172. package/dist/agents/middleware/promptCaching.d.cts.map +0 -1
  173. package/dist/agents/middleware/promptCaching.d.ts.map +0 -1
  174. package/dist/agents/middleware/promptCaching.js.map +0 -1
  175. package/dist/agents/middleware/summarization.d.cts.map +0 -1
  176. package/dist/agents/middleware/summarization.d.ts.map +0 -1
  177. package/dist/agents/middleware/todoListMiddleware.d.cts.map +0 -1
  178. package/dist/agents/middleware/todoListMiddleware.d.ts.map +0 -1
  179. package/dist/agents/middleware/toolCallLimit.d.cts.map +0 -1
  180. package/dist/agents/middleware/toolCallLimit.d.ts.map +0 -1
  181. package/dist/agents/middleware/toolEmulator.d.cts.map +0 -1
  182. package/dist/agents/middleware/toolEmulator.d.ts.map +0 -1
  183. package/dist/agents/middleware/toolRetry.d.cts.map +0 -1
  184. package/dist/agents/middleware/toolRetry.d.ts.map +0 -1
  185. package/dist/agents/middleware/types.d.cts.map +0 -1
  186. package/dist/agents/middleware/types.d.ts.map +0 -1
  187. package/dist/agents/middleware/utils.d.cts.map +0 -1
  188. package/dist/agents/middleware/utils.d.ts.map +0 -1
  189. package/dist/agents/middleware.d.cts.map +0 -1
  190. package/dist/agents/middleware.d.ts.map +0 -1
  191. package/dist/agents/nodes/types.d.cts.map +0 -1
  192. package/dist/agents/nodes/types.d.ts.map +0 -1
  193. package/dist/agents/responses.d.cts.map +0 -1
  194. package/dist/agents/responses.d.ts.map +0 -1
  195. package/dist/agents/runtime.d.cts.map +0 -1
  196. package/dist/agents/runtime.d.ts.map +0 -1
  197. package/dist/agents/tests/utils.d.cts.map +0 -1
  198. package/dist/agents/tests/utils.d.ts.map +0 -1
  199. package/dist/agents/types.d.cts.map +0 -1
  200. package/dist/agents/types.d.ts.map +0 -1
  201. package/dist/chat_models/universal.d.cts.map +0 -1
  202. package/dist/chat_models/universal.d.ts.map +0 -1
  203. package/dist/hub/base.d.cts.map +0 -1
  204. package/dist/hub/base.d.ts.map +0 -1
  205. package/dist/hub/index.d.cts.map +0 -1
  206. package/dist/hub/index.d.ts.map +0 -1
  207. package/dist/hub/node.d.cts.map +0 -1
  208. package/dist/hub/node.d.ts.map +0 -1
  209. package/dist/load/import_type.d.cts.map +0 -1
  210. package/dist/load/import_type.d.ts.map +0 -1
  211. package/dist/load/index.d.cts.map +0 -1
  212. package/dist/load/index.d.ts.map +0 -1
  213. package/dist/storage/encoder_backed.d.cts.map +0 -1
  214. package/dist/storage/encoder_backed.d.ts.map +0 -1
  215. package/dist/storage/file_system.d.cts.map +0 -1
  216. package/dist/storage/file_system.d.ts.map +0 -1
@@ -0,0 +1 @@
1
+ {"version":3,"file":"modelRetry.js","names":["config: ModelRetryMiddlewareConfig","error: Error","error","attemptsMade: number","content: string"],"sources":["../../../src/agents/middleware/modelRetry.ts"],"sourcesContent":["/**\n * Model retry middleware for agents.\n */\nimport { z } from \"zod/v3\";\nimport { AIMessage } from \"@langchain/core/messages\";\n\nimport { createMiddleware } from \"../middleware.js\";\nimport type { AgentMiddleware } from \"./types.js\";\nimport { sleep, calculateRetryDelay } from \"./utils.js\";\nimport { RetrySchema } from \"./constants.js\";\nimport { InvalidRetryConfigError } from \"./error.js\";\n\n/**\n * Configuration options for the Model Retry Middleware.\n */\nexport const ModelRetryMiddlewareOptionsSchema = z\n .object({\n /**\n * Behavior when all retries are exhausted. Options:\n * - `\"continue\"` (default): Return an AIMessage with error details, allowing\n * the agent to potentially handle the failure gracefully.\n * - `\"error\"`: Re-raise the exception, stopping agent execution.\n * - Custom function: Function that takes the exception and returns a string\n * for the AIMessage content, allowing custom error formatting.\n */\n onFailure: z\n .union([\n z.literal(\"error\"),\n z.literal(\"continue\"),\n z.function().args(z.instanceof(Error)).returns(z.string()),\n ])\n .default(\"continue\"),\n })\n .merge(RetrySchema);\n\nexport type ModelRetryMiddlewareConfig = z.input<\n typeof ModelRetryMiddlewareOptionsSchema\n>;\n\n/**\n * Middleware that automatically retries failed model calls with configurable backoff.\n *\n * Supports retrying on specific exceptions and exponential backoff.\n *\n * @example Basic usage with default settings (2 retries, exponential backoff)\n * ```ts\n * import { createAgent, modelRetryMiddleware } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [searchTool],\n * middleware: [modelRetryMiddleware()],\n * });\n * ```\n *\n * @example Retry specific exceptions only\n * ```ts\n * import { modelRetryMiddleware } from \"langchain\";\n *\n * const retry = modelRetryMiddleware({\n * maxRetries: 4,\n * retryOn: [TimeoutError, NetworkError],\n * backoffFactor: 1.5,\n * });\n * ```\n *\n * @example Custom exception filtering\n * ```ts\n * function shouldRetry(error: Error): boolean {\n * // Only retry on rate limit errors\n * if (error.name === \"RateLimitError\") {\n * return true;\n * }\n * // Or check for specific HTTP status codes\n * if (error.name === \"HTTPError\" && \"statusCode\" in error) {\n * const statusCode = (error as any).statusCode;\n * return statusCode === 429 || statusCode === 503;\n * }\n * return false;\n * }\n *\n * const retry = modelRetryMiddleware({\n * maxRetries: 3,\n * retryOn: shouldRetry,\n * });\n * ```\n *\n * @example Return error message instead of raising\n * ```ts\n * const retry = modelRetryMiddleware({\n * maxRetries: 4,\n * onFailure: \"continue\", // Return AIMessage with error instead of throwing\n * });\n * ```\n *\n * @example Custom error message formatting\n * ```ts\n * const formatError = (error: Error) =>\n * `Model call failed: ${error.message}. Please try again later.`;\n *\n * const retry = modelRetryMiddleware({\n * maxRetries: 4,\n * onFailure: formatError,\n * });\n * ```\n *\n * @example Constant backoff (no exponential growth)\n * ```ts\n * const retry = modelRetryMiddleware({\n * maxRetries: 5,\n * backoffFactor: 0.0, // No exponential growth\n * initialDelayMs: 2000, // Always wait 2 seconds\n * });\n * ```\n *\n * @example Raise exception on failure\n * ```ts\n * const retry = modelRetryMiddleware({\n * maxRetries: 2,\n * onFailure: \"error\", // Re-raise exception instead of returning message\n * });\n * ```\n *\n * @param config - Configuration options for the retry middleware\n * @returns A middleware instance that handles model failures with retries\n */\nexport function modelRetryMiddleware(\n config: ModelRetryMiddlewareConfig = {}\n): AgentMiddleware {\n const { success, error, data } =\n ModelRetryMiddlewareOptionsSchema.safeParse(config);\n if (!success) {\n throw new InvalidRetryConfigError(error);\n }\n const {\n maxRetries,\n retryOn,\n onFailure,\n backoffFactor,\n initialDelayMs,\n maxDelayMs,\n jitter,\n } = data;\n\n /**\n * Check if the exception should trigger a retry.\n */\n const shouldRetryException = (error: Error): boolean => {\n if (typeof retryOn === \"function\") {\n return retryOn(error);\n }\n // retryOn is an array of error constructors\n return retryOn.some(\n (ErrorConstructor) => error.constructor === ErrorConstructor\n );\n };\n\n // Use the exported calculateRetryDelay function with our config\n const delayConfig = { backoffFactor, initialDelayMs, maxDelayMs, jitter };\n\n /**\n * Format the failure message when retries are exhausted.\n */\n const formatFailureMessage = (error: Error, attemptsMade: number): string => {\n const errorType = error.constructor.name;\n const attemptWord = attemptsMade === 1 ? \"attempt\" : \"attempts\";\n return `Model call failed after ${attemptsMade} ${attemptWord} with ${errorType}: ${error.message}`;\n };\n\n /**\n * Handle failure when all retries are exhausted.\n */\n const handleFailure = (error: Error, attemptsMade: number): AIMessage => {\n if (onFailure === \"error\") {\n throw error;\n }\n\n let content: string;\n if (typeof onFailure === \"function\") {\n content = onFailure(error);\n } else {\n content = formatFailureMessage(error, attemptsMade);\n }\n\n return new AIMessage({\n content,\n });\n };\n\n return createMiddleware({\n name: \"modelRetryMiddleware\",\n contextSchema: ModelRetryMiddlewareOptionsSchema,\n wrapModelCall: async (request, handler) => {\n // Initial attempt + retries\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n return await handler(request);\n } catch (error) {\n const attemptsMade = attempt + 1; // attempt is 0-indexed\n\n // Ensure error is an Error instance\n const err =\n error && typeof error === \"object\" && \"message\" in error\n ? (error as Error)\n : new Error(String(error));\n\n // Check if we should retry this exception\n if (!shouldRetryException(err)) {\n // Exception is not retryable, handle failure immediately\n return handleFailure(err, attemptsMade);\n }\n\n // Check if we have more retries left\n if (attempt < maxRetries) {\n // Calculate and apply backoff delay\n const delay = calculateRetryDelay(delayConfig, attempt);\n if (delay > 0) {\n await sleep(delay);\n }\n // Continue to next retry\n } else {\n // No more retries, handle failure\n return handleFailure(err, attemptsMade);\n }\n }\n }\n\n // Unreachable: loop always returns via handler success or handleFailure\n throw new Error(\"Unexpected: retry loop completed without returning\");\n },\n });\n}\n"],"mappings":";;;;;;;;;;;AAeA,MAAa,oCAAoC,EAC9C,OAAO,EASN,WAAW,EACR,MAAM;CACL,EAAE,QAAQ,QAAQ;CAClB,EAAE,QAAQ,WAAW;CACrB,EAAE,UAAU,CAAC,KAAK,EAAE,WAAW,MAAM,CAAC,CAAC,QAAQ,EAAE,QAAQ,CAAC;AAC3D,EAAC,CACD,QAAQ,WAAW,CACvB,EAAC,CACD,MAAM,YAAY;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6FrB,SAAgB,qBACdA,SAAqC,CAAE,GACtB;CACjB,MAAM,EAAE,SAAS,OAAO,MAAM,GAC5B,kCAAkC,UAAU,OAAO;AACrD,KAAI,CAAC,QACH,OAAM,IAAI,wBAAwB;CAEpC,MAAM,EACJ,YACA,SACA,WACA,eACA,gBACA,YACA,QACD,GAAG;;;;CAKJ,MAAM,uBAAuB,CAACC,YAA0B;AACtD,MAAI,OAAO,YAAY,WACrB,QAAO,QAAQC,QAAM;AAGvB,SAAO,QAAQ,KACb,CAAC,qBAAqBA,QAAM,gBAAgB,iBAC7C;CACF;CAGD,MAAM,cAAc;EAAE;EAAe;EAAgB;EAAY;CAAQ;;;;CAKzE,MAAM,uBAAuB,CAACD,SAAcE,iBAAiC;EAC3E,MAAM,YAAYD,QAAM,YAAY;EACpC,MAAM,cAAc,iBAAiB,IAAI,YAAY;AACrD,SAAO,CAAC,wBAAwB,EAAE,aAAa,CAAC,EAAE,YAAY,MAAM,EAAE,UAAU,EAAE,EAAEA,QAAM,SAAS;CACpG;;;;CAKD,MAAM,gBAAgB,CAACD,SAAcE,iBAAoC;AACvE,MAAI,cAAc,QAChB,OAAMD;EAGR,IAAIE;AACJ,MAAI,OAAO,cAAc,YACvB,UAAU,UAAUF,QAAM;OAE1B,UAAU,qBAAqBA,SAAO,aAAa;AAGrD,SAAO,IAAI,UAAU,EACnB,QACD;CACF;AAED,QAAO,iBAAiB;EACtB,MAAM;EACN,eAAe;EACf,eAAe,OAAO,SAAS,YAAY;AAEzC,QAAK,IAAI,UAAU,GAAG,WAAW,YAAY,UAC3C,KAAI;AACF,WAAO,MAAM,QAAQ,QAAQ;GAC9B,SAAQA,SAAO;IACd,MAAM,eAAe,UAAU;IAG/B,MAAM,MACJA,WAAS,OAAOA,YAAU,YAAY,aAAaA,UAC9CA,UACD,IAAI,MAAM,OAAOA,QAAM;AAG7B,QAAI,CAAC,qBAAqB,IAAI,CAE5B,QAAO,cAAc,KAAK,aAAa;AAIzC,QAAI,UAAU,YAAY;KAExB,MAAM,QAAQ,oBAAoB,aAAa,QAAQ;AACvD,SAAI,QAAQ,GACV,MAAM,MAAM,MAAM;IAGrB,MAEC,QAAO,cAAc,KAAK,aAAa;GAE1C;AAIH,SAAM,IAAI,MAAM;EACjB;CACF,EAAC;AACH"}
@@ -1,8 +1,8 @@
1
- const require_rolldown_runtime = require('../../_virtual/rolldown_runtime.cjs');
2
- const require_middleware = require('../middleware.cjs');
1
+ const require_rolldown_runtime = require('../../../../_virtual/rolldown_runtime.cjs');
2
+ const require_middleware = require('../../../middleware.cjs');
3
3
  const zod_v3 = require_rolldown_runtime.__toESM(require("zod/v3"));
4
4
 
5
- //#region src/agents/middleware/promptCaching.ts
5
+ //#region src/agents/middleware/provider/anthropic/promptCaching.ts
6
6
  const DEFAULT_ENABLE_CACHING = true;
7
7
  const DEFAULT_TTL = "5m";
8
8
  const DEFAULT_MIN_MESSAGES_TO_CACHE = 3;
@@ -0,0 +1 @@
1
+ {"version":3,"file":"promptCaching.cjs","names":["z","message: string","middlewareOptions?: PromptCachingMiddlewareConfig","createMiddleware"],"sources":["../../../../../src/agents/middleware/provider/anthropic/promptCaching.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { ContentBlock } from \"@langchain/core/messages\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\n\nimport { ConfigurableModel } from \"../../../../chat_models/universal.js\";\nimport { createMiddleware } from \"../../../middleware.js\";\n\nconst DEFAULT_ENABLE_CACHING = true;\nconst DEFAULT_TTL = \"5m\";\nconst DEFAULT_MIN_MESSAGES_TO_CACHE = 3;\nconst DEFAULT_UNSUPPORTED_MODEL_BEHAVIOR = \"warn\";\n\nconst contextSchema = z.object({\n /**\n * Whether to enable prompt caching.\n * @default true\n */\n enableCaching: z.boolean().optional(),\n /**\n * The time-to-live for the cached prompt.\n * @default \"5m\"\n */\n ttl: z.enum([\"5m\", \"1h\"]).optional(),\n /**\n * The minimum number of messages required before caching is applied.\n * @default 3\n */\n minMessagesToCache: z.number().optional(),\n /**\n * The behavior to take when an unsupported model is used.\n * - \"ignore\" will ignore the unsupported model and continue without caching.\n * - \"warn\" will warn the user and continue without caching.\n * - \"raise\" will raise an error and stop the agent.\n * @default \"warn\"\n */\n unsupportedModelBehavior: z.enum([\"ignore\", \"warn\", \"raise\"]).optional(),\n});\nexport type PromptCachingMiddlewareConfig = Partial<\n InferInteropZodInput<typeof contextSchema>\n>;\n\nclass PromptCachingMiddlewareError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"PromptCachingMiddlewareError\";\n }\n}\n\n/**\n * Creates a prompt caching middleware for Anthropic models to optimize API usage.\n *\n * This middleware automatically adds cache control headers to the last messages when using Anthropic models,\n * enabling their prompt caching feature. This can significantly reduce costs for applications with repetitive\n * prompts, long system messages, or extensive conversation histories.\n *\n * ## How It Works\n *\n * The middleware intercepts model requests and adds cache control metadata that tells Anthropic's\n * API to cache processed prompt prefixes. On subsequent requests with matching prefixes, the\n * cached representations are reused, skipping redundant token processing.\n *\n * ## Benefits\n *\n * - **Cost Reduction**: Avoid reprocessing the same tokens repeatedly (up to 90% savings on cached portions)\n * - **Lower Latency**: Cached prompts are processed faster as embeddings are pre-computed\n * - **Better Scalability**: Reduced computational load enables handling more requests\n * - **Consistent Performance**: Stable response times for repetitive queries\n *\n * @param middlewareOptions - Configuration options for the caching behavior\n * @param middlewareOptions.enableCaching - Whether to enable prompt caching (default: `true`)\n * @param middlewareOptions.ttl - Cache time-to-live: `\"5m\"` for 5 minutes or `\"1h\"` for 1 hour (default: `\"5m\"`)\n * @param middlewareOptions.minMessagesToCache - Minimum number of messages required before caching is applied (default: `3`)\n * @param middlewareOptions.unsupportedModelBehavior - The behavior to take when an unsupported model is used (default: `\"warn\"`)\n *\n * @returns A middleware instance that can be passed to `createAgent`\n *\n * @throws {Error} If used with non-Anthropic models\n *\n * @example\n * Basic usage with default settings\n * ```typescript\n * import { createAgent } from \"langchain\";\n * import { anthropicPromptCachingMiddleware } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * middleware: [\n * anthropicPromptCachingMiddleware()\n * ]\n * });\n * ```\n *\n * @example\n * Custom configuration for longer conversations\n * ```typescript\n * const cachingMiddleware = anthropicPromptCachingMiddleware({\n * ttl: \"1h\", // Cache for 1 hour instead of default 5 minutes\n * minMessagesToCache: 5 // Only cache after 5 messages\n * });\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: \"You are a helpful assistant with deep knowledge of...\", // Long system prompt\n * middleware: [cachingMiddleware]\n * });\n * ```\n *\n * @example\n * Conditional caching based on runtime context\n * ```typescript\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * middleware: [\n * anthropicPromptCachingMiddleware({\n * enableCaching: true,\n * ttl: \"5m\"\n * })\n * ]\n * });\n *\n * // Disable caching for specific requests\n * await agent.invoke(\n * { messages: [new HumanMessage(\"Process this without caching\")] },\n * {\n * configurable: {\n * middleware_context: { enableCaching: false }\n * }\n * }\n * );\n * ```\n *\n * @example\n * Optimal setup for customer support chatbot\n * ```typescript\n * const supportAgent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: `You are a customer support agent for ACME Corp.\n *\n * Company policies:\n * - Always be polite and professional\n * - Refer to knowledge base for product information\n * - Escalate billing issues to human agents\n * ... (extensive policies and guidelines)\n * `,\n * tools: [searchKnowledgeBase, createTicket, checkOrderStatus],\n * middleware: [\n * anthropicPromptCachingMiddleware({\n * ttl: \"1h\", // Long TTL for stable system prompt\n * minMessagesToCache: 1 // Cache immediately due to large system prompt\n * })\n * ]\n * });\n * ```\n *\n * @remarks\n * - **Anthropic Only**: This middleware only works with Anthropic models and will throw an error if used with other providers\n * - **Automatic Application**: Caching is applied automatically when message count exceeds `minMessagesToCache`\n * - **Cache Scope**: Caches are isolated per API key and cannot be shared across different keys\n * - **TTL Options**: Only supports \"5m\" (5 minutes) and \"1h\" (1 hour) as TTL values per Anthropic's API\n * - **Best Use Cases**: Long system prompts, multi-turn conversations, repetitive queries, RAG applications\n * - **Cost Impact**: Cached tokens are billed at 10% of the base input token price, cache writes are billed at 25% of the base\n *\n * @see {@link createAgent} for agent creation\n * @see {@link https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching} Anthropic's prompt caching documentation\n * @public\n */\nexport function anthropicPromptCachingMiddleware(\n middlewareOptions?: PromptCachingMiddlewareConfig\n) {\n return createMiddleware({\n name: \"PromptCachingMiddleware\",\n contextSchema,\n wrapModelCall: (request, handler) => {\n /**\n * Prefer runtime context values over middleware options values over defaults\n */\n const enableCaching =\n request.runtime.context.enableCaching ??\n middlewareOptions?.enableCaching ??\n DEFAULT_ENABLE_CACHING;\n const ttl =\n request.runtime.context.ttl ?? middlewareOptions?.ttl ?? DEFAULT_TTL;\n const minMessagesToCache =\n request.runtime.context.minMessagesToCache ??\n middlewareOptions?.minMessagesToCache ??\n DEFAULT_MIN_MESSAGES_TO_CACHE;\n const unsupportedModelBehavior =\n request.runtime.context.unsupportedModelBehavior ??\n middlewareOptions?.unsupportedModelBehavior ??\n DEFAULT_UNSUPPORTED_MODEL_BEHAVIOR;\n\n // Skip if caching is disabled\n if (!enableCaching || !request.model) {\n return handler(request);\n }\n\n const isAnthropicModel =\n request.model.getName() === \"ChatAnthropic\" ||\n (request.model.getName() === \"ConfigurableModel\" &&\n (request.model as ConfigurableModel)._defaultConfig?.modelProvider ===\n \"anthropic\");\n if (!isAnthropicModel) {\n // Get model name for better error context\n const modelName = request.model.getName();\n const modelInfo =\n request.model.getName() === \"ConfigurableModel\"\n ? `${modelName} (${\n (request.model as ConfigurableModel)._defaultConfig\n ?.modelProvider\n })`\n : modelName;\n\n const baseMessage = `Unsupported model '${modelInfo}'. Prompt caching requires an Anthropic model`;\n\n if (unsupportedModelBehavior === \"raise\") {\n throw new PromptCachingMiddlewareError(\n `${baseMessage} (e.g., 'anthropic:claude-4-0-sonnet').`\n );\n } else if (unsupportedModelBehavior === \"warn\") {\n console.warn(\n `PromptCachingMiddleware: Skipping caching for ${modelName}. Consider switching to an Anthropic model for caching benefits.`\n );\n }\n return handler(request);\n }\n\n const messagesCount =\n request.state.messages.length + (request.systemPrompt ? 1 : 0);\n\n if (messagesCount < minMessagesToCache) {\n return handler(request);\n }\n\n /**\n * Add cache_control to the last message\n */\n const lastMessage = request.messages.at(-1);\n if (!lastMessage) {\n return handler(request);\n }\n\n const NewMessageConstructor =\n Object.getPrototypeOf(lastMessage).constructor;\n if (Array.isArray(lastMessage.content)) {\n const newMessage = new NewMessageConstructor({\n ...lastMessage,\n content: [\n ...lastMessage.content.slice(0, -1),\n {\n ...lastMessage.content.at(-1),\n cache_control: {\n type: \"ephemeral\",\n ttl,\n },\n } as ContentBlock,\n ],\n });\n return handler({\n ...request,\n messages: [...request.messages.slice(0, -1), newMessage],\n });\n } else if (typeof lastMessage.content === \"string\") {\n const newMessage = new NewMessageConstructor({\n ...lastMessage,\n content: [\n {\n type: \"text\",\n text: lastMessage.content,\n cache_control: {\n type: \"ephemeral\",\n ttl,\n },\n },\n ],\n });\n return handler({\n ...request,\n messages: [...request.messages.slice(0, -1), newMessage],\n });\n }\n\n throw new PromptCachingMiddlewareError(\n \"Last message content is not a string or array\"\n );\n },\n });\n}\n"],"mappings":";;;;;AAOA,MAAM,yBAAyB;AAC/B,MAAM,cAAc;AACpB,MAAM,gCAAgC;AACtC,MAAM,qCAAqC;AAE3C,MAAM,gBAAgBA,SAAE,OAAO;CAK7B,eAAeA,SAAE,SAAS,CAAC,UAAU;CAKrC,KAAKA,SAAE,KAAK,CAAC,MAAM,IAAK,EAAC,CAAC,UAAU;CAKpC,oBAAoBA,SAAE,QAAQ,CAAC,UAAU;CAQzC,0BAA0BA,SAAE,KAAK;EAAC;EAAU;EAAQ;CAAQ,EAAC,CAAC,UAAU;AACzE,EAAC;AAKF,IAAM,+BAAN,cAA2C,MAAM;CAC/C,YAAYC,SAAiB;EAC3B,MAAM,QAAQ;EACd,KAAK,OAAO;CACb;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwHD,SAAgB,iCACdC,mBACA;AACA,QAAOC,oCAAiB;EACtB,MAAM;EACN;EACA,eAAe,CAAC,SAAS,YAAY;;;;GAInC,MAAM,gBACJ,QAAQ,QAAQ,QAAQ,iBACxB,mBAAmB,iBACnB;GACF,MAAM,MACJ,QAAQ,QAAQ,QAAQ,OAAO,mBAAmB,OAAO;GAC3D,MAAM,qBACJ,QAAQ,QAAQ,QAAQ,sBACxB,mBAAmB,sBACnB;GACF,MAAM,2BACJ,QAAQ,QAAQ,QAAQ,4BACxB,mBAAmB,4BACnB;AAGF,OAAI,CAAC,iBAAiB,CAAC,QAAQ,MAC7B,QAAO,QAAQ,QAAQ;GAGzB,MAAM,mBACJ,QAAQ,MAAM,SAAS,KAAK,mBAC3B,QAAQ,MAAM,SAAS,KAAK,uBAC1B,QAAQ,MAA4B,gBAAgB,kBACnD;AACN,OAAI,CAAC,kBAAkB;IAErB,MAAM,YAAY,QAAQ,MAAM,SAAS;IACzC,MAAM,YACJ,QAAQ,MAAM,SAAS,KAAK,sBACxB,GAAG,UAAU,EAAE,EACZ,QAAQ,MAA4B,gBACjC,cACL,CAAC,CAAC,GACH;IAEN,MAAM,cAAc,CAAC,mBAAmB,EAAE,UAAU,6CAA6C,CAAC;AAElG,QAAI,6BAA6B,QAC/B,OAAM,IAAI,6BACR,GAAG,YAAY,uCAAuC,CAAC;aAEhD,6BAA6B,QACtC,QAAQ,KACN,CAAC,8CAA8C,EAAE,UAAU,gEAAgE,CAAC,CAC7H;AAEH,WAAO,QAAQ,QAAQ;GACxB;GAED,MAAM,gBACJ,QAAQ,MAAM,SAAS,UAAU,QAAQ,eAAe,IAAI;AAE9D,OAAI,gBAAgB,mBAClB,QAAO,QAAQ,QAAQ;;;;GAMzB,MAAM,cAAc,QAAQ,SAAS,GAAG,GAAG;AAC3C,OAAI,CAAC,YACH,QAAO,QAAQ,QAAQ;GAGzB,MAAM,wBACJ,OAAO,eAAe,YAAY,CAAC;AACrC,OAAI,MAAM,QAAQ,YAAY,QAAQ,EAAE;IACtC,MAAM,aAAa,IAAI,sBAAsB;KAC3C,GAAG;KACH,SAAS,CACP,GAAG,YAAY,QAAQ,MAAM,GAAG,GAAG,EACnC;MACE,GAAG,YAAY,QAAQ,GAAG,GAAG;MAC7B,eAAe;OACb,MAAM;OACN;MACD;KACF,CACF;IACF;AACD,WAAO,QAAQ;KACb,GAAG;KACH,UAAU,CAAC,GAAG,QAAQ,SAAS,MAAM,GAAG,GAAG,EAAE,UAAW;IACzD,EAAC;GACH,WAAU,OAAO,YAAY,YAAY,UAAU;IAClD,MAAM,aAAa,IAAI,sBAAsB;KAC3C,GAAG;KACH,SAAS,CACP;MACE,MAAM;MACN,MAAM,YAAY;MAClB,eAAe;OACb,MAAM;OACN;MACD;KACF,CACF;IACF;AACD,WAAO,QAAQ;KACb,GAAG;KACH,UAAU,CAAC,GAAG,QAAQ,SAAS,MAAM,GAAG,GAAG,EAAE,UAAW;IACzD,EAAC;GACH;AAED,SAAM,IAAI,6BACR;EAEH;CACF,EAAC;AACH"}
@@ -1,8 +1,8 @@
1
- import { AgentMiddleware } from "./types.cjs";
1
+ import { AgentMiddleware } from "../../types.cjs";
2
2
  import { InferInteropZodInput } from "@langchain/core/utils/types";
3
3
  import { z } from "zod/v3";
4
4
 
5
- //#region src/agents/middleware/promptCaching.d.ts
5
+ //#region src/agents/middleware/provider/anthropic/promptCaching.d.ts
6
6
  declare const contextSchema: z.ZodObject<{
7
7
  /**
8
8
  * Whether to enable prompt caching.
@@ -1,8 +1,8 @@
1
- import { AgentMiddleware } from "./types.js";
1
+ import { AgentMiddleware } from "../../types.js";
2
2
  import { z } from "zod/v3";
3
3
  import { InferInteropZodInput } from "@langchain/core/utils/types";
4
4
 
5
- //#region src/agents/middleware/promptCaching.d.ts
5
+ //#region src/agents/middleware/provider/anthropic/promptCaching.d.ts
6
6
  declare const contextSchema: z.ZodObject<{
7
7
  /**
8
8
  * Whether to enable prompt caching.
@@ -1,7 +1,7 @@
1
- import { createMiddleware } from "../middleware.js";
1
+ import { createMiddleware } from "../../../middleware.js";
2
2
  import { z } from "zod/v3";
3
3
 
4
- //#region src/agents/middleware/promptCaching.ts
4
+ //#region src/agents/middleware/provider/anthropic/promptCaching.ts
5
5
  const DEFAULT_ENABLE_CACHING = true;
6
6
  const DEFAULT_TTL = "5m";
7
7
  const DEFAULT_MIN_MESSAGES_TO_CACHE = 3;
@@ -0,0 +1 @@
1
+ {"version":3,"file":"promptCaching.js","names":["message: string","middlewareOptions?: PromptCachingMiddlewareConfig"],"sources":["../../../../../src/agents/middleware/provider/anthropic/promptCaching.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { ContentBlock } from \"@langchain/core/messages\";\nimport { InferInteropZodInput } from \"@langchain/core/utils/types\";\n\nimport { ConfigurableModel } from \"../../../../chat_models/universal.js\";\nimport { createMiddleware } from \"../../../middleware.js\";\n\nconst DEFAULT_ENABLE_CACHING = true;\nconst DEFAULT_TTL = \"5m\";\nconst DEFAULT_MIN_MESSAGES_TO_CACHE = 3;\nconst DEFAULT_UNSUPPORTED_MODEL_BEHAVIOR = \"warn\";\n\nconst contextSchema = z.object({\n /**\n * Whether to enable prompt caching.\n * @default true\n */\n enableCaching: z.boolean().optional(),\n /**\n * The time-to-live for the cached prompt.\n * @default \"5m\"\n */\n ttl: z.enum([\"5m\", \"1h\"]).optional(),\n /**\n * The minimum number of messages required before caching is applied.\n * @default 3\n */\n minMessagesToCache: z.number().optional(),\n /**\n * The behavior to take when an unsupported model is used.\n * - \"ignore\" will ignore the unsupported model and continue without caching.\n * - \"warn\" will warn the user and continue without caching.\n * - \"raise\" will raise an error and stop the agent.\n * @default \"warn\"\n */\n unsupportedModelBehavior: z.enum([\"ignore\", \"warn\", \"raise\"]).optional(),\n});\nexport type PromptCachingMiddlewareConfig = Partial<\n InferInteropZodInput<typeof contextSchema>\n>;\n\nclass PromptCachingMiddlewareError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"PromptCachingMiddlewareError\";\n }\n}\n\n/**\n * Creates a prompt caching middleware for Anthropic models to optimize API usage.\n *\n * This middleware automatically adds cache control headers to the last messages when using Anthropic models,\n * enabling their prompt caching feature. This can significantly reduce costs for applications with repetitive\n * prompts, long system messages, or extensive conversation histories.\n *\n * ## How It Works\n *\n * The middleware intercepts model requests and adds cache control metadata that tells Anthropic's\n * API to cache processed prompt prefixes. On subsequent requests with matching prefixes, the\n * cached representations are reused, skipping redundant token processing.\n *\n * ## Benefits\n *\n * - **Cost Reduction**: Avoid reprocessing the same tokens repeatedly (up to 90% savings on cached portions)\n * - **Lower Latency**: Cached prompts are processed faster as embeddings are pre-computed\n * - **Better Scalability**: Reduced computational load enables handling more requests\n * - **Consistent Performance**: Stable response times for repetitive queries\n *\n * @param middlewareOptions - Configuration options for the caching behavior\n * @param middlewareOptions.enableCaching - Whether to enable prompt caching (default: `true`)\n * @param middlewareOptions.ttl - Cache time-to-live: `\"5m\"` for 5 minutes or `\"1h\"` for 1 hour (default: `\"5m\"`)\n * @param middlewareOptions.minMessagesToCache - Minimum number of messages required before caching is applied (default: `3`)\n * @param middlewareOptions.unsupportedModelBehavior - The behavior to take when an unsupported model is used (default: `\"warn\"`)\n *\n * @returns A middleware instance that can be passed to `createAgent`\n *\n * @throws {Error} If used with non-Anthropic models\n *\n * @example\n * Basic usage with default settings\n * ```typescript\n * import { createAgent } from \"langchain\";\n * import { anthropicPromptCachingMiddleware } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * middleware: [\n * anthropicPromptCachingMiddleware()\n * ]\n * });\n * ```\n *\n * @example\n * Custom configuration for longer conversations\n * ```typescript\n * const cachingMiddleware = anthropicPromptCachingMiddleware({\n * ttl: \"1h\", // Cache for 1 hour instead of default 5 minutes\n * minMessagesToCache: 5 // Only cache after 5 messages\n * });\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: \"You are a helpful assistant with deep knowledge of...\", // Long system prompt\n * middleware: [cachingMiddleware]\n * });\n * ```\n *\n * @example\n * Conditional caching based on runtime context\n * ```typescript\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * middleware: [\n * anthropicPromptCachingMiddleware({\n * enableCaching: true,\n * ttl: \"5m\"\n * })\n * ]\n * });\n *\n * // Disable caching for specific requests\n * await agent.invoke(\n * { messages: [new HumanMessage(\"Process this without caching\")] },\n * {\n * configurable: {\n * middleware_context: { enableCaching: false }\n * }\n * }\n * );\n * ```\n *\n * @example\n * Optimal setup for customer support chatbot\n * ```typescript\n * const supportAgent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * systemPrompt: `You are a customer support agent for ACME Corp.\n *\n * Company policies:\n * - Always be polite and professional\n * - Refer to knowledge base for product information\n * - Escalate billing issues to human agents\n * ... (extensive policies and guidelines)\n * `,\n * tools: [searchKnowledgeBase, createTicket, checkOrderStatus],\n * middleware: [\n * anthropicPromptCachingMiddleware({\n * ttl: \"1h\", // Long TTL for stable system prompt\n * minMessagesToCache: 1 // Cache immediately due to large system prompt\n * })\n * ]\n * });\n * ```\n *\n * @remarks\n * - **Anthropic Only**: This middleware only works with Anthropic models and will throw an error if used with other providers\n * - **Automatic Application**: Caching is applied automatically when message count exceeds `minMessagesToCache`\n * - **Cache Scope**: Caches are isolated per API key and cannot be shared across different keys\n * - **TTL Options**: Only supports \"5m\" (5 minutes) and \"1h\" (1 hour) as TTL values per Anthropic's API\n * - **Best Use Cases**: Long system prompts, multi-turn conversations, repetitive queries, RAG applications\n * - **Cost Impact**: Cached tokens are billed at 10% of the base input token price, cache writes are billed at 25% of the base\n *\n * @see {@link createAgent} for agent creation\n * @see {@link https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching} Anthropic's prompt caching documentation\n * @public\n */\nexport function anthropicPromptCachingMiddleware(\n middlewareOptions?: PromptCachingMiddlewareConfig\n) {\n return createMiddleware({\n name: \"PromptCachingMiddleware\",\n contextSchema,\n wrapModelCall: (request, handler) => {\n /**\n * Prefer runtime context values over middleware options values over defaults\n */\n const enableCaching =\n request.runtime.context.enableCaching ??\n middlewareOptions?.enableCaching ??\n DEFAULT_ENABLE_CACHING;\n const ttl =\n request.runtime.context.ttl ?? middlewareOptions?.ttl ?? DEFAULT_TTL;\n const minMessagesToCache =\n request.runtime.context.minMessagesToCache ??\n middlewareOptions?.minMessagesToCache ??\n DEFAULT_MIN_MESSAGES_TO_CACHE;\n const unsupportedModelBehavior =\n request.runtime.context.unsupportedModelBehavior ??\n middlewareOptions?.unsupportedModelBehavior ??\n DEFAULT_UNSUPPORTED_MODEL_BEHAVIOR;\n\n // Skip if caching is disabled\n if (!enableCaching || !request.model) {\n return handler(request);\n }\n\n const isAnthropicModel =\n request.model.getName() === \"ChatAnthropic\" ||\n (request.model.getName() === \"ConfigurableModel\" &&\n (request.model as ConfigurableModel)._defaultConfig?.modelProvider ===\n \"anthropic\");\n if (!isAnthropicModel) {\n // Get model name for better error context\n const modelName = request.model.getName();\n const modelInfo =\n request.model.getName() === \"ConfigurableModel\"\n ? `${modelName} (${\n (request.model as ConfigurableModel)._defaultConfig\n ?.modelProvider\n })`\n : modelName;\n\n const baseMessage = `Unsupported model '${modelInfo}'. Prompt caching requires an Anthropic model`;\n\n if (unsupportedModelBehavior === \"raise\") {\n throw new PromptCachingMiddlewareError(\n `${baseMessage} (e.g., 'anthropic:claude-4-0-sonnet').`\n );\n } else if (unsupportedModelBehavior === \"warn\") {\n console.warn(\n `PromptCachingMiddleware: Skipping caching for ${modelName}. Consider switching to an Anthropic model for caching benefits.`\n );\n }\n return handler(request);\n }\n\n const messagesCount =\n request.state.messages.length + (request.systemPrompt ? 1 : 0);\n\n if (messagesCount < minMessagesToCache) {\n return handler(request);\n }\n\n /**\n * Add cache_control to the last message\n */\n const lastMessage = request.messages.at(-1);\n if (!lastMessage) {\n return handler(request);\n }\n\n const NewMessageConstructor =\n Object.getPrototypeOf(lastMessage).constructor;\n if (Array.isArray(lastMessage.content)) {\n const newMessage = new NewMessageConstructor({\n ...lastMessage,\n content: [\n ...lastMessage.content.slice(0, -1),\n {\n ...lastMessage.content.at(-1),\n cache_control: {\n type: \"ephemeral\",\n ttl,\n },\n } as ContentBlock,\n ],\n });\n return handler({\n ...request,\n messages: [...request.messages.slice(0, -1), newMessage],\n });\n } else if (typeof lastMessage.content === \"string\") {\n const newMessage = new NewMessageConstructor({\n ...lastMessage,\n content: [\n {\n type: \"text\",\n text: lastMessage.content,\n cache_control: {\n type: \"ephemeral\",\n ttl,\n },\n },\n ],\n });\n return handler({\n ...request,\n messages: [...request.messages.slice(0, -1), newMessage],\n });\n }\n\n throw new PromptCachingMiddlewareError(\n \"Last message content is not a string or array\"\n );\n },\n });\n}\n"],"mappings":";;;;AAOA,MAAM,yBAAyB;AAC/B,MAAM,cAAc;AACpB,MAAM,gCAAgC;AACtC,MAAM,qCAAqC;AAE3C,MAAM,gBAAgB,EAAE,OAAO;CAK7B,eAAe,EAAE,SAAS,CAAC,UAAU;CAKrC,KAAK,EAAE,KAAK,CAAC,MAAM,IAAK,EAAC,CAAC,UAAU;CAKpC,oBAAoB,EAAE,QAAQ,CAAC,UAAU;CAQzC,0BAA0B,EAAE,KAAK;EAAC;EAAU;EAAQ;CAAQ,EAAC,CAAC,UAAU;AACzE,EAAC;AAKF,IAAM,+BAAN,cAA2C,MAAM;CAC/C,YAAYA,SAAiB;EAC3B,MAAM,QAAQ;EACd,KAAK,OAAO;CACb;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwHD,SAAgB,iCACdC,mBACA;AACA,QAAO,iBAAiB;EACtB,MAAM;EACN;EACA,eAAe,CAAC,SAAS,YAAY;;;;GAInC,MAAM,gBACJ,QAAQ,QAAQ,QAAQ,iBACxB,mBAAmB,iBACnB;GACF,MAAM,MACJ,QAAQ,QAAQ,QAAQ,OAAO,mBAAmB,OAAO;GAC3D,MAAM,qBACJ,QAAQ,QAAQ,QAAQ,sBACxB,mBAAmB,sBACnB;GACF,MAAM,2BACJ,QAAQ,QAAQ,QAAQ,4BACxB,mBAAmB,4BACnB;AAGF,OAAI,CAAC,iBAAiB,CAAC,QAAQ,MAC7B,QAAO,QAAQ,QAAQ;GAGzB,MAAM,mBACJ,QAAQ,MAAM,SAAS,KAAK,mBAC3B,QAAQ,MAAM,SAAS,KAAK,uBAC1B,QAAQ,MAA4B,gBAAgB,kBACnD;AACN,OAAI,CAAC,kBAAkB;IAErB,MAAM,YAAY,QAAQ,MAAM,SAAS;IACzC,MAAM,YACJ,QAAQ,MAAM,SAAS,KAAK,sBACxB,GAAG,UAAU,EAAE,EACZ,QAAQ,MAA4B,gBACjC,cACL,CAAC,CAAC,GACH;IAEN,MAAM,cAAc,CAAC,mBAAmB,EAAE,UAAU,6CAA6C,CAAC;AAElG,QAAI,6BAA6B,QAC/B,OAAM,IAAI,6BACR,GAAG,YAAY,uCAAuC,CAAC;aAEhD,6BAA6B,QACtC,QAAQ,KACN,CAAC,8CAA8C,EAAE,UAAU,gEAAgE,CAAC,CAC7H;AAEH,WAAO,QAAQ,QAAQ;GACxB;GAED,MAAM,gBACJ,QAAQ,MAAM,SAAS,UAAU,QAAQ,eAAe,IAAI;AAE9D,OAAI,gBAAgB,mBAClB,QAAO,QAAQ,QAAQ;;;;GAMzB,MAAM,cAAc,QAAQ,SAAS,GAAG,GAAG;AAC3C,OAAI,CAAC,YACH,QAAO,QAAQ,QAAQ;GAGzB,MAAM,wBACJ,OAAO,eAAe,YAAY,CAAC;AACrC,OAAI,MAAM,QAAQ,YAAY,QAAQ,EAAE;IACtC,MAAM,aAAa,IAAI,sBAAsB;KAC3C,GAAG;KACH,SAAS,CACP,GAAG,YAAY,QAAQ,MAAM,GAAG,GAAG,EACnC;MACE,GAAG,YAAY,QAAQ,GAAG,GAAG;MAC7B,eAAe;OACb,MAAM;OACN;MACD;KACF,CACF;IACF;AACD,WAAO,QAAQ;KACb,GAAG;KACH,UAAU,CAAC,GAAG,QAAQ,SAAS,MAAM,GAAG,GAAG,EAAE,UAAW;IACzD,EAAC;GACH,WAAU,OAAO,YAAY,YAAY,UAAU;IAClD,MAAM,aAAa,IAAI,sBAAsB;KAC3C,GAAG;KACH,SAAS,CACP;MACE,MAAM;MACN,MAAM,YAAY;MAClB,eAAe;OACb,MAAM;OACN;MACD;KACF,CACF;IACF;AACD,WAAO,QAAQ;KACb,GAAG;KACH,UAAU,CAAC,GAAG,QAAQ,SAAS,MAAM,GAAG,GAAG,EAAE,UAAW;IACzD,EAAC;GACH;AAED,SAAM,IAAI,6BACR;EAEH;CACF,EAAC;AACH"}
@@ -0,0 +1,299 @@
1
+ const require_rolldown_runtime = require('../../../../_virtual/rolldown_runtime.cjs');
2
+ const require_chat_models_universal = require('../../../../chat_models/universal.cjs');
3
+ const require_middleware = require('../../../middleware.cjs');
4
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
5
+
6
+ //#region src/agents/middleware/provider/openai/moderation.ts
7
+ /**
8
+ * Check if the model is an OpenAI model that supports moderation.
9
+ * @param model - The model to check.
10
+ * @returns Whether the model is an OpenAI model that supports moderation.
11
+ */
12
+ function isOpenAIModel(model) {
13
+ if (!model || typeof model !== "object" || model === null || !("client" in model) || !("_getClientOptions" in model) || typeof model._getClientOptions !== "function") return false;
14
+ /**
15
+ * client may not yet be initialized, so we need to check if the model has a _getClientOptions method.
16
+ */
17
+ model._getClientOptions();
18
+ return typeof model.client === "object" && model.client !== null && "moderations" in model.client && typeof model.client.moderations === "object" && model.client.moderations !== null && "create" in model.client.moderations && typeof model.client.moderations.create === "function";
19
+ }
20
+ /**
21
+ * Default template for violation messages.
22
+ */
23
+ const DEFAULT_VIOLATION_TEMPLATE = "I'm sorry, but I can't comply with that request. It was flagged for {categories}.";
24
+ /**
25
+ * Error raised when OpenAI flags content and `exitBehavior` is set to `"error"`.
26
+ */
27
+ var OpenAIModerationError = class extends Error {
28
+ content;
29
+ stage;
30
+ result;
31
+ originalMessage;
32
+ constructor({ content, stage, result, message }) {
33
+ super(message);
34
+ this.name = "OpenAIModerationError";
35
+ this.content = content;
36
+ this.stage = stage;
37
+ this.result = result;
38
+ this.originalMessage = message;
39
+ }
40
+ };
41
+ /**
42
+ * Middleware that moderates agent traffic using OpenAI's moderation endpoint.
43
+ *
44
+ * This middleware checks messages for content policy violations at different stages:
45
+ * - Input: User messages before they reach the model
46
+ * - Output: AI model responses
47
+ * - Tool results: Results returned from tool executions
48
+ *
49
+ * @param options - Configuration options for the middleware
50
+ * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
51
+ * @param options.moderationModel - Moderation model to use.
52
+ * @param options.checkInput - Whether to check user input messages.
53
+ * @param options.checkOutput - Whether to check model output messages.
54
+ * @param options.checkToolResults - Whether to check tool result messages.
55
+ * @param options.exitBehavior - How to handle violations.
56
+ * @param options.violationMessage - Custom template for violation messages.
57
+ * @returns Middleware function that can be used to moderate agent traffic.
58
+ *
59
+ * @example Using model instance
60
+ * ```ts
61
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
62
+ *
63
+ * const middleware = openAIModerationMiddleware({
64
+ * checkInput: true,
65
+ * checkOutput: true,
66
+ * exitBehavior: "end"
67
+ * });
68
+ *
69
+ * const agent = createAgent({
70
+ * model: "openai:gpt-4o",
71
+ * tools: [...],
72
+ * middleware: [middleware],
73
+ * });
74
+ * ```
75
+ *
76
+ * @example Using model name
77
+ * ```ts
78
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
79
+ *
80
+ * const middleware = openAIModerationMiddleware({
81
+ * model: "gpt-4o-mini",
82
+ * checkInput: true,
83
+ * checkOutput: true,
84
+ * exitBehavior: "end"
85
+ * });
86
+ *
87
+ * const agent = createAgent({
88
+ * model: "openai:gpt-4o",
89
+ * tools: [...],
90
+ * middleware: [middleware],
91
+ * });
92
+ * ```
93
+ *
94
+ * @example Custom violation message
95
+ * ```ts
96
+ * const middleware = openAIModerationMiddleware({
97
+ * violationMessage: "Content flagged: {categories}. Scores: {category_scores}"
98
+ * });
99
+ * ```
100
+ */
101
+ function openAIModerationMiddleware(options) {
102
+ const { model, moderationModel = "omni-moderation-latest", checkInput = true, checkOutput = true, checkToolResults = false, exitBehavior = "end", violationMessage } = options;
103
+ let openaiModel;
104
+ const initModerationModel = async () => {
105
+ if (openaiModel) return openaiModel;
106
+ const resolvedModel = typeof model === "string" ? await require_chat_models_universal.initChatModel(model) : model;
107
+ /**
108
+ * Check if the model is an OpenAI model.
109
+ */
110
+ if (!resolvedModel.getName().includes("ChatOpenAI")) throw new Error(`Model must be an OpenAI model to use moderation middleware. Got: ${resolvedModel.getName()}`);
111
+ /**
112
+ * check if OpenAI model package supports moderation.
113
+ */
114
+ if (!isOpenAIModel(resolvedModel)) throw new Error("Model must support moderation to use moderation middleware.");
115
+ openaiModel = resolvedModel;
116
+ return openaiModel;
117
+ };
118
+ /**
119
+ * Extract text content from a message.
120
+ */
121
+ const extractText = (message) => {
122
+ if (message.content == null) return null;
123
+ const text = message.text;
124
+ return text || null;
125
+ };
126
+ /**
127
+ * Find the last index of a message type in the messages array.
128
+ */
129
+ const findLastIndex = (messages, messageType) => {
130
+ for (let idx = messages.length - 1; idx >= 0; idx--) if (messageType.isInstance(messages[idx])) return idx;
131
+ return null;
132
+ };
133
+ /**
134
+ * Format violation message from moderation result.
135
+ */
136
+ const formatViolationMessage = (content, result) => {
137
+ const categories = [];
138
+ const categoriesObj = result.categories;
139
+ for (const [name, flagged] of Object.entries(categoriesObj)) if (flagged) categories.push(name.replace(/_/g, " "));
140
+ const categoryLabel = categories.length > 0 ? categories.join(", ") : "OpenAI's safety policies";
141
+ const template = violationMessage || DEFAULT_VIOLATION_TEMPLATE;
142
+ const scoresJson = JSON.stringify(result.category_scores, null, 2);
143
+ try {
144
+ return template.replace("{categories}", categoryLabel).replace("{category_scores}", scoresJson).replace("{original_content}", content);
145
+ } catch {
146
+ return template;
147
+ }
148
+ };
149
+ function moderateContent(input, params) {
150
+ const clientOptions = openaiModel?._getClientOptions?.();
151
+ const moderationModel$1 = params?.model ?? "omni-moderation-latest";
152
+ const moderationRequest = {
153
+ input,
154
+ model: moderationModel$1
155
+ };
156
+ return openaiModel.client.moderations.create(moderationRequest, clientOptions);
157
+ }
158
+ /**
159
+ * Apply violation handling based on exit behavior.
160
+ */
161
+ const applyViolation = (messages, index, stage, content, result) => {
162
+ const violationText = formatViolationMessage(content, result);
163
+ if (exitBehavior === "error") throw new OpenAIModerationError({
164
+ content,
165
+ stage,
166
+ result,
167
+ message: violationText
168
+ });
169
+ if (exitBehavior === "end") return {
170
+ jumpTo: "end",
171
+ messages: [new __langchain_core_messages.AIMessage({ content: violationText })]
172
+ };
173
+ if (index == null) return void 0;
174
+ /**
175
+ * Replace the original message with a new message that contains the violation text.
176
+ */
177
+ const newMessages = [...messages];
178
+ const original = newMessages[index];
179
+ const MessageConstructor = Object.getPrototypeOf(original).constructor;
180
+ newMessages[index] = new MessageConstructor({
181
+ ...original,
182
+ content: violationText
183
+ });
184
+ return { messages: newMessages };
185
+ };
186
+ /**
187
+ * Moderate user input messages.
188
+ */
189
+ const moderateUserMessage = async (messages) => {
190
+ const idx = findLastIndex(messages, __langchain_core_messages.HumanMessage);
191
+ if (idx == null) return null;
192
+ const message = messages[idx];
193
+ const text = extractText(message);
194
+ if (!text) return null;
195
+ await initModerationModel();
196
+ const response = await moderateContent(text, { model: moderationModel });
197
+ const flaggedResult = response.results.find((result) => result.flagged);
198
+ if (!flaggedResult) return null;
199
+ return applyViolation(messages, idx, "input", text, flaggedResult);
200
+ };
201
+ /**
202
+ * Moderate tool result messages.
203
+ */
204
+ const moderateToolMessages = async (messages) => {
205
+ const lastAiIdx = findLastIndex(messages, __langchain_core_messages.AIMessage);
206
+ if (lastAiIdx == null) return null;
207
+ const working = [...messages];
208
+ let modified = false;
209
+ for (let idx = lastAiIdx + 1; idx < working.length; idx++) {
210
+ const msg = working[idx];
211
+ if (!__langchain_core_messages.ToolMessage.isInstance(msg)) continue;
212
+ const text = extractText(msg);
213
+ if (!text) continue;
214
+ await initModerationModel();
215
+ const response = await moderateContent(text, { model: moderationModel });
216
+ const flaggedResult = response.results.find((result) => result.flagged);
217
+ if (!flaggedResult) continue;
218
+ const action = applyViolation(working, idx, "tool", text, flaggedResult);
219
+ if (action) {
220
+ if ("jumpTo" in action) return action;
221
+ if ("messages" in action) {
222
+ working.splice(0, working.length, ...action.messages);
223
+ modified = true;
224
+ }
225
+ }
226
+ }
227
+ if (modified) return { messages: working };
228
+ return null;
229
+ };
230
+ /**
231
+ * Moderate model output messages.
232
+ */
233
+ const moderateOutput = async (messages) => {
234
+ const lastAiIdx = findLastIndex(messages, __langchain_core_messages.AIMessage);
235
+ if (lastAiIdx == null) return null;
236
+ const aiMessage = messages[lastAiIdx];
237
+ const text = extractText(aiMessage);
238
+ if (!text) return null;
239
+ await initModerationModel();
240
+ const response = await moderateContent(text, { model: moderationModel });
241
+ const flaggedResult = response.results.find((result) => result.flagged);
242
+ if (!flaggedResult) return null;
243
+ return applyViolation(messages, lastAiIdx, "output", text, flaggedResult);
244
+ };
245
+ /**
246
+ * Moderate inputs (user messages and tool results) before model call.
247
+ */
248
+ const moderateInputs = async (messages) => {
249
+ const working = [...messages];
250
+ let modified = false;
251
+ if (checkToolResults) {
252
+ const action = await moderateToolMessages(working);
253
+ if (action) {
254
+ if ("jumpTo" in action) return action;
255
+ if ("messages" in action) {
256
+ working.splice(0, working.length, ...action.messages);
257
+ modified = true;
258
+ }
259
+ }
260
+ }
261
+ if (checkInput) {
262
+ const action = await moderateUserMessage(working);
263
+ if (action) {
264
+ if ("jumpTo" in action) return action;
265
+ if ("messages" in action) {
266
+ working.splice(0, working.length, ...action.messages);
267
+ modified = true;
268
+ }
269
+ }
270
+ }
271
+ if (modified) return { messages: working };
272
+ return null;
273
+ };
274
+ return require_middleware.createMiddleware({
275
+ name: "OpenAIModerationMiddleware",
276
+ beforeModel: {
277
+ hook: async (state) => {
278
+ if (!checkInput && !checkToolResults) return void 0;
279
+ const messages = state.messages || [];
280
+ if (messages.length === 0) return void 0;
281
+ return await moderateInputs(messages) ?? void 0;
282
+ },
283
+ canJumpTo: ["end"]
284
+ },
285
+ afterModel: {
286
+ hook: async (state) => {
287
+ if (!checkOutput) return void 0;
288
+ const messages = state.messages || [];
289
+ if (messages.length === 0) return void 0;
290
+ return await moderateOutput(messages) ?? void 0;
291
+ },
292
+ canJumpTo: ["end"]
293
+ }
294
+ });
295
+ }
296
+
297
+ //#endregion
298
+ exports.openAIModerationMiddleware = openAIModerationMiddleware;
299
+ //# sourceMappingURL=moderation.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"moderation.cjs","names":["model: unknown","options: OpenAIModerationMiddlewareOptions","openaiModel: OpenAIModel | undefined","initChatModel","message: BaseMessage","messages: BaseMessage[]","messageType: typeof AIMessage | typeof HumanMessage | typeof ToolMessage","content: string","result: ModerationResult","categories: string[]","input: string | string[]","params?: { model?: ModerationModel; options?: unknown }","moderationModel","index: number | null","stage: ViolationStage","AIMessage","HumanMessage","ToolMessage","createMiddleware"],"sources":["../../../../../src/agents/middleware/provider/openai/moderation.ts"],"sourcesContent":["import type { BaseMessage } from \"@langchain/core/messages\";\nimport { AIMessage, HumanMessage, ToolMessage } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\n\nimport { initChatModel } from \"../../../../chat_models/universal.js\";\nimport { createMiddleware } from \"../../../middleware.js\";\nimport type { MiddlewareResult, AgentMiddleware } from \"../../types.js\";\nimport type { AgentBuiltInState } from \"../../../runtime.js\";\n\n/**\n * OpenAI model interface.\n */\ninterface OpenAIModel extends BaseLanguageModel {\n getName: () => string;\n _getClientOptions: () => unknown;\n client: {\n moderations: {\n create: (\n input: {\n input: string | string[];\n model: string;\n },\n options?: unknown\n ) => Promise<ModerationResponse>;\n };\n };\n}\n\n/**\n * Check if the model is an OpenAI model that supports moderation.\n * @param model - The model to check.\n * @returns Whether the model is an OpenAI model that supports moderation.\n */\nfunction isOpenAIModel(model: unknown): model is OpenAIModel {\n if (\n !model ||\n typeof model !== \"object\" ||\n model === null ||\n !(\"client\" in model) ||\n !(\"_getClientOptions\" in model) ||\n typeof model._getClientOptions !== \"function\"\n ) {\n return false;\n }\n\n /**\n * client may not yet be initialized, so we need to check if the model has a _getClientOptions method.\n */\n model._getClientOptions();\n return (\n typeof model.client === \"object\" &&\n model.client !== null &&\n \"moderations\" in model.client &&\n typeof model.client.moderations === \"object\" &&\n model.client.moderations !== null &&\n \"create\" in model.client.moderations &&\n typeof model.client.moderations.create === \"function\"\n );\n}\n\n/**\n * Stage where a violation occurred.\n */\nexport type ViolationStage = \"input\" | \"output\" | \"tool\";\n\n/**\n * Default template for violation messages.\n */\nconst DEFAULT_VIOLATION_TEMPLATE =\n \"I'm sorry, but I can't comply with that request. It was flagged for {categories}.\";\n\n/**\n * Result of moderation.\n * @see https://platform.openai.com/docs/api-reference/moderations/object\n */\ninterface ModerationResult {\n flagged: boolean;\n categories: Record<string, boolean>;\n category_scores: Record<string, number>;\n category_applied_input_types: Record<string, string[]>;\n}\n\n/**\n * Moderation response.\n * @see https://platform.openai.com/docs/api-reference/moderations/create\n */\ninterface ModerationResponse {\n id: string;\n model: string;\n results: ModerationResult[];\n}\n\ntype ModerationModel =\n | \"omni-moderation-latest\"\n | \"omni-moderation-2024-09-26\"\n | \"text-moderation-latest\"\n | \"text-moderation-stable\";\n\n/**\n * Error raised when OpenAI flags content and `exitBehavior` is set to `\"error\"`.\n */\nexport class OpenAIModerationError extends Error {\n content: string;\n stage: ViolationStage;\n result: ModerationResult;\n originalMessage: string;\n\n constructor({\n content,\n stage,\n result,\n message,\n }: {\n content: string;\n stage: ViolationStage;\n result: ModerationResult;\n message: string;\n }) {\n super(message);\n this.name = \"OpenAIModerationError\";\n this.content = content;\n this.stage = stage;\n this.result = result;\n this.originalMessage = message;\n }\n}\n\n/**\n * Options for configuring the OpenAI Moderation middleware.\n */\nexport interface OpenAIModerationMiddlewareOptions {\n /**\n * OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.\n * @example\n * ```ts\n * const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n * const middleware = openAIModerationMiddleware({ model });\n * const agent = createAgent({\n * model,\n * middleware: [middleware],\n * });\n * ```\n * @example\n * ```ts\n * const middleware = openAIModerationMiddleware({ model: \"gpt-4o-mini\" });\n * const agent = createAgent({\n * model: \"gpt-5\",\n * middleware: [middleware],\n * });\n * ```\n */\n model: string | BaseChatModel;\n\n /**\n * Moderation model to use.\n * @default \"omni-moderation-latest\"\n */\n moderationModel?: ModerationModel;\n\n /**\n * Whether to check user input messages.\n * @default true\n */\n checkInput?: boolean;\n\n /**\n * Whether to check model output messages.\n * @default true\n */\n checkOutput?: boolean;\n\n /**\n * Whether to check tool result messages.\n * @default false\n */\n checkToolResults?: boolean;\n\n /**\n * How to handle violations.\n * - `\"error\"`: Throw an error when content is flagged\n * - `\"end\"`: End the agent execution and return a violation message\n * - `\"replace\"`: Replace the flagged content with a violation message\n * @default \"end\"\n */\n exitBehavior?: \"error\" | \"end\" | \"replace\";\n\n /**\n * Custom template for violation messages.\n * Available placeholders: `{categories}`, `{category_scores}`, `{original_content}`\n */\n violationMessage?: string;\n}\n\n/**\n * Middleware that moderates agent traffic using OpenAI's moderation endpoint.\n *\n * This middleware checks messages for content policy violations at different stages:\n * - Input: User messages before they reach the model\n * - Output: AI model responses\n * - Tool results: Results returned from tool executions\n *\n * @param options - Configuration options for the middleware\n * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.\n * @param options.moderationModel - Moderation model to use.\n * @param options.checkInput - Whether to check user input messages.\n * @param options.checkOutput - Whether to check model output messages.\n * @param options.checkToolResults - Whether to check tool result messages.\n * @param options.exitBehavior - How to handle violations.\n * @param options.violationMessage - Custom template for violation messages.\n * @returns Middleware function that can be used to moderate agent traffic.\n *\n * @example Using model instance\n * ```ts\n * import { createAgent, openAIModerationMiddleware } from \"langchain\";\n *\n * const middleware = openAIModerationMiddleware({\n * checkInput: true,\n * checkOutput: true,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [...],\n * middleware: [middleware],\n * });\n * ```\n *\n * @example Using model name\n * ```ts\n * import { createAgent, openAIModerationMiddleware } from \"langchain\";\n *\n * const middleware = openAIModerationMiddleware({\n * model: \"gpt-4o-mini\",\n * checkInput: true,\n * checkOutput: true,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [...],\n * middleware: [middleware],\n * });\n * ```\n *\n * @example Custom violation message\n * ```ts\n * const middleware = openAIModerationMiddleware({\n * violationMessage: \"Content flagged: {categories}. Scores: {category_scores}\"\n * });\n * ```\n */\nexport function openAIModerationMiddleware(\n options: OpenAIModerationMiddlewareOptions\n): AgentMiddleware {\n const {\n model,\n moderationModel = \"omni-moderation-latest\",\n checkInput = true,\n checkOutput = true,\n checkToolResults = false,\n exitBehavior = \"end\",\n violationMessage,\n } = options;\n\n let openaiModel: OpenAIModel | undefined;\n const initModerationModel = async (): Promise<OpenAIModel> => {\n if (openaiModel) {\n return openaiModel;\n }\n\n const resolvedModel =\n typeof model === \"string\" ? await initChatModel(model) : model;\n\n /**\n * Check if the model is an OpenAI model.\n */\n if (!resolvedModel.getName().includes(\"ChatOpenAI\")) {\n throw new Error(\n `Model must be an OpenAI model to use moderation middleware. Got: ${resolvedModel.getName()}`\n );\n }\n\n /**\n * check if OpenAI model package supports moderation.\n */\n if (!isOpenAIModel(resolvedModel)) {\n throw new Error(\n \"Model must support moderation to use moderation middleware.\"\n );\n }\n\n openaiModel = resolvedModel as unknown as OpenAIModel;\n return openaiModel;\n };\n\n /**\n * Extract text content from a message.\n */\n const extractText = (message: BaseMessage): string | null => {\n if (message.content == null) {\n return null;\n }\n const text = message.text;\n return text || null;\n };\n\n /**\n * Find the last index of a message type in the messages array.\n */\n const findLastIndex = (\n messages: BaseMessage[],\n messageType: typeof AIMessage | typeof HumanMessage | typeof ToolMessage\n ): number | null => {\n for (let idx = messages.length - 1; idx >= 0; idx--) {\n if (messageType.isInstance(messages[idx])) {\n return idx;\n }\n }\n return null;\n };\n\n /**\n * Format violation message from moderation result.\n */\n const formatViolationMessage = (\n content: string,\n result: ModerationResult\n ): string => {\n // Convert categories to array of flagged category names\n const categories: string[] = [];\n const categoriesObj = result.categories as unknown as Record<\n string,\n boolean\n >;\n for (const [name, flagged] of Object.entries(categoriesObj)) {\n if (flagged) {\n categories.push(name.replace(/_/g, \" \"));\n }\n }\n\n const categoryLabel =\n categories.length > 0\n ? categories.join(\", \")\n : \"OpenAI's safety policies\";\n\n const template = violationMessage || DEFAULT_VIOLATION_TEMPLATE;\n const scoresJson = JSON.stringify(\n result.category_scores as unknown as Record<string, number>,\n null,\n 2\n );\n\n try {\n return template\n .replace(\"{categories}\", categoryLabel)\n .replace(\"{category_scores}\", scoresJson)\n .replace(\"{original_content}\", content);\n } catch {\n return template;\n }\n };\n\n function moderateContent(\n input: string | string[],\n params?: { model?: ModerationModel; options?: unknown }\n ): Promise<ModerationResponse> {\n const clientOptions = openaiModel?._getClientOptions?.();\n const moderationModel = params?.model ?? \"omni-moderation-latest\";\n const moderationRequest = {\n input,\n model: moderationModel,\n };\n return openaiModel!.client.moderations.create(\n moderationRequest,\n clientOptions\n );\n }\n\n /**\n * Apply violation handling based on exit behavior.\n */\n const applyViolation = (\n messages: BaseMessage[],\n index: number | null,\n stage: ViolationStage,\n content: string,\n result: ModerationResult\n ): MiddlewareResult<Partial<AgentBuiltInState>> | undefined => {\n const violationText = formatViolationMessage(content, result);\n\n if (exitBehavior === \"error\") {\n throw new OpenAIModerationError({\n content,\n stage,\n result,\n message: violationText,\n });\n }\n\n if (exitBehavior === \"end\") {\n return {\n jumpTo: \"end\",\n messages: [new AIMessage({ content: violationText })],\n };\n }\n\n if (index == null) {\n return undefined;\n }\n\n /**\n * Replace the original message with a new message that contains the violation text.\n */\n const newMessages = [...messages];\n const original = newMessages[index];\n const MessageConstructor = Object.getPrototypeOf(original).constructor;\n newMessages[index] = new MessageConstructor({\n ...original,\n content: violationText,\n });\n\n return { messages: newMessages };\n };\n\n /**\n * Moderate user input messages.\n */\n const moderateUserMessage = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const idx = findLastIndex(messages, HumanMessage);\n if (idx == null) {\n return null;\n }\n\n const message = messages[idx];\n const text = extractText(message);\n if (!text) {\n return null;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n return null;\n }\n\n return applyViolation(messages, idx, \"input\", text, flaggedResult);\n };\n\n /**\n * Moderate tool result messages.\n */\n const moderateToolMessages = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const lastAiIdx = findLastIndex(messages, AIMessage);\n if (lastAiIdx == null) {\n return null;\n }\n\n const working = [...messages];\n let modified = false;\n\n for (let idx = lastAiIdx + 1; idx < working.length; idx++) {\n const msg = working[idx];\n if (!ToolMessage.isInstance(msg)) {\n continue;\n }\n\n const text = extractText(msg);\n if (!text) {\n continue;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n continue;\n }\n\n const action = applyViolation(working, idx, \"tool\", text, flaggedResult);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (modified) {\n return { messages: working };\n }\n\n return null;\n };\n\n /**\n * Moderate model output messages.\n */\n const moderateOutput = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const lastAiIdx = findLastIndex(messages, AIMessage);\n if (lastAiIdx == null) {\n return null;\n }\n\n const aiMessage = messages[lastAiIdx];\n const text = extractText(aiMessage);\n if (!text) {\n return null;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n return null;\n }\n\n return applyViolation(messages, lastAiIdx, \"output\", text, flaggedResult);\n };\n\n /**\n * Moderate inputs (user messages and tool results) before model call.\n */\n const moderateInputs = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const working = [...messages];\n let modified = false;\n\n if (checkToolResults) {\n const action = await moderateToolMessages(working);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (checkInput) {\n const action = await moderateUserMessage(working);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (modified) {\n return { messages: working };\n }\n\n return null;\n };\n\n return createMiddleware({\n name: \"OpenAIModerationMiddleware\",\n beforeModel: {\n hook: async (\n state\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | undefined> => {\n if (!checkInput && !checkToolResults) {\n return undefined;\n }\n\n const messages = state.messages || [];\n if (messages.length === 0) {\n return undefined;\n }\n\n return (await moderateInputs(messages)) ?? undefined;\n },\n canJumpTo: [\"end\"],\n },\n afterModel: {\n hook: async (\n state\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | undefined> => {\n if (!checkOutput) {\n return undefined;\n }\n\n const messages = state.messages || [];\n if (messages.length === 0) {\n return undefined;\n }\n\n return (await moderateOutput(messages)) ?? undefined;\n },\n canJumpTo: [\"end\"],\n },\n });\n}\n"],"mappings":";;;;;;;;;;;AAkCA,SAAS,cAAcA,OAAsC;AAC3D,KACE,CAAC,SACD,OAAO,UAAU,YACjB,UAAU,QACV,EAAE,YAAY,UACd,EAAE,uBAAuB,UACzB,OAAO,MAAM,sBAAsB,WAEnC,QAAO;;;;CAMT,MAAM,mBAAmB;AACzB,QACE,OAAO,MAAM,WAAW,YACxB,MAAM,WAAW,QACjB,iBAAiB,MAAM,UACvB,OAAO,MAAM,OAAO,gBAAgB,YACpC,MAAM,OAAO,gBAAgB,QAC7B,YAAY,MAAM,OAAO,eACzB,OAAO,MAAM,OAAO,YAAY,WAAW;AAE9C;;;;AAUD,MAAM,6BACJ;;;;AAgCF,IAAa,wBAAb,cAA2C,MAAM;CAC/C;CACA;CACA;CACA;CAEA,YAAY,EACV,SACA,OACA,QACA,SAMD,EAAE;EACD,MAAM,QAAQ;EACd,KAAK,OAAO;EACZ,KAAK,UAAU;EACf,KAAK,QAAQ;EACb,KAAK,SAAS;EACd,KAAK,kBAAkB;CACxB;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgID,SAAgB,2BACdC,SACiB;CACjB,MAAM,EACJ,OACA,kBAAkB,0BAClB,aAAa,MACb,cAAc,MACd,mBAAmB,OACnB,eAAe,OACf,kBACD,GAAG;CAEJ,IAAIC;CACJ,MAAM,sBAAsB,YAAkC;AAC5D,MAAI,YACF,QAAO;EAGT,MAAM,gBACJ,OAAO,UAAU,WAAW,MAAMC,4CAAc,MAAM,GAAG;;;;AAK3D,MAAI,CAAC,cAAc,SAAS,CAAC,SAAS,aAAa,CACjD,OAAM,IAAI,MACR,CAAC,iEAAiE,EAAE,cAAc,SAAS,EAAE;;;;AAOjG,MAAI,CAAC,cAAc,cAAc,CAC/B,OAAM,IAAI,MACR;EAIJ,cAAc;AACd,SAAO;CACR;;;;CAKD,MAAM,cAAc,CAACC,YAAwC;AAC3D,MAAI,QAAQ,WAAW,KACrB,QAAO;EAET,MAAM,OAAO,QAAQ;AACrB,SAAO,QAAQ;CAChB;;;;CAKD,MAAM,gBAAgB,CACpBC,UACAC,gBACkB;AAClB,OAAK,IAAI,MAAM,SAAS,SAAS,GAAG,OAAO,GAAG,MAC5C,KAAI,YAAY,WAAW,SAAS,KAAK,CACvC,QAAO;AAGX,SAAO;CACR;;;;CAKD,MAAM,yBAAyB,CAC7BC,SACAC,WACW;EAEX,MAAMC,aAAuB,CAAE;EAC/B,MAAM,gBAAgB,OAAO;AAI7B,OAAK,MAAM,CAAC,MAAM,QAAQ,IAAI,OAAO,QAAQ,cAAc,CACzD,KAAI,SACF,WAAW,KAAK,KAAK,QAAQ,MAAM,IAAI,CAAC;EAI5C,MAAM,gBACJ,WAAW,SAAS,IAChB,WAAW,KAAK,KAAK,GACrB;EAEN,MAAM,WAAW,oBAAoB;EACrC,MAAM,aAAa,KAAK,UACtB,OAAO,iBACP,MACA,EACD;AAED,MAAI;AACF,UAAO,SACJ,QAAQ,gBAAgB,cAAc,CACtC,QAAQ,qBAAqB,WAAW,CACxC,QAAQ,sBAAsB,QAAQ;EAC1C,QAAO;AACN,UAAO;EACR;CACF;CAED,SAAS,gBACPC,OACAC,QAC6B;EAC7B,MAAM,gBAAgB,aAAa,qBAAqB;EACxD,MAAMC,oBAAkB,QAAQ,SAAS;EACzC,MAAM,oBAAoB;GACxB;GACA,OAAOA;EACR;AACD,SAAO,YAAa,OAAO,YAAY,OACrC,mBACA,cACD;CACF;;;;CAKD,MAAM,iBAAiB,CACrBP,UACAQ,OACAC,OACAP,SACAC,WAC6D;EAC7D,MAAM,gBAAgB,uBAAuB,SAAS,OAAO;AAE7D,MAAI,iBAAiB,QACnB,OAAM,IAAI,sBAAsB;GAC9B;GACA;GACA;GACA,SAAS;EACV;AAGH,MAAI,iBAAiB,MACnB,QAAO;GACL,QAAQ;GACR,UAAU,CAAC,IAAIO,oCAAU,EAAE,SAAS,cAAe,EAAE;EACtD;AAGH,MAAI,SAAS,KACX,QAAO;;;;EAMT,MAAM,cAAc,CAAC,GAAG,QAAS;EACjC,MAAM,WAAW,YAAY;EAC7B,MAAM,qBAAqB,OAAO,eAAe,SAAS,CAAC;EAC3D,YAAY,SAAS,IAAI,mBAAmB;GAC1C,GAAG;GACH,SAAS;EACV;AAED,SAAO,EAAE,UAAU,YAAa;CACjC;;;;CAKD,MAAM,sBAAsB,OAC1BV,aACiE;EACjE,MAAM,MAAM,cAAc,UAAUW,uCAAa;AACjD,MAAI,OAAO,KACT,QAAO;EAGT,MAAM,UAAU,SAAS;EACzB,MAAM,OAAO,YAAY,QAAQ;AACjC,MAAI,CAAC,KACH,QAAO;EAGT,MAAM,qBAAqB;EAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;EAEF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,MAAI,CAAC,cACH,QAAO;AAGT,SAAO,eAAe,UAAU,KAAK,SAAS,MAAM,cAAc;CACnE;;;;CAKD,MAAM,uBAAuB,OAC3BX,aACiE;EACjE,MAAM,YAAY,cAAc,UAAUU,oCAAU;AACpD,MAAI,aAAa,KACf,QAAO;EAGT,MAAM,UAAU,CAAC,GAAG,QAAS;EAC7B,IAAI,WAAW;AAEf,OAAK,IAAI,MAAM,YAAY,GAAG,MAAM,QAAQ,QAAQ,OAAO;GACzD,MAAM,MAAM,QAAQ;AACpB,OAAI,CAACE,sCAAY,WAAW,IAAI,CAC9B;GAGF,MAAM,OAAO,YAAY,IAAI;AAC7B,OAAI,CAAC,KACH;GAGF,MAAM,qBAAqB;GAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;GACF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,OAAI,CAAC,cACH;GAGF,MAAM,SAAS,eAAe,SAAS,KAAK,QAAQ,MAAM,cAAc;AACxE,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,SACF,QAAO,EAAE,UAAU,QAAS;AAG9B,SAAO;CACR;;;;CAKD,MAAM,iBAAiB,OACrBZ,aACiE;EACjE,MAAM,YAAY,cAAc,UAAUU,oCAAU;AACpD,MAAI,aAAa,KACf,QAAO;EAGT,MAAM,YAAY,SAAS;EAC3B,MAAM,OAAO,YAAY,UAAU;AACnC,MAAI,CAAC,KACH,QAAO;EAGT,MAAM,qBAAqB;EAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;EACF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,MAAI,CAAC,cACH,QAAO;AAGT,SAAO,eAAe,UAAU,WAAW,UAAU,MAAM,cAAc;CAC1E;;;;CAKD,MAAM,iBAAiB,OACrBV,aACiE;EACjE,MAAM,UAAU,CAAC,GAAG,QAAS;EAC7B,IAAI,WAAW;AAEf,MAAI,kBAAkB;GACpB,MAAM,SAAS,MAAM,qBAAqB,QAAQ;AAClD,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,YAAY;GACd,MAAM,SAAS,MAAM,oBAAoB,QAAQ;AACjD,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,SACF,QAAO,EAAE,UAAU,QAAS;AAG9B,SAAO;CACR;AAED,QAAOa,oCAAiB;EACtB,MAAM;EACN,aAAa;GACX,MAAM,OACJ,UACsE;AACtE,QAAI,CAAC,cAAc,CAAC,iBAClB,QAAO;IAGT,MAAM,WAAW,MAAM,YAAY,CAAE;AACrC,QAAI,SAAS,WAAW,EACtB,QAAO;AAGT,WAAQ,MAAM,eAAe,SAAS,IAAK;GAC5C;GACD,WAAW,CAAC,KAAM;EACnB;EACD,YAAY;GACV,MAAM,OACJ,UACsE;AACtE,QAAI,CAAC,YACH,QAAO;IAGT,MAAM,WAAW,MAAM,YAAY,CAAE;AACrC,QAAI,SAAS,WAAW,EACtB,QAAO;AAGT,WAAQ,MAAM,eAAe,SAAS,IAAK;GAC5C;GACD,WAAW,CAAC,KAAM;EACnB;CACF,EAAC;AACH"}
@@ -0,0 +1,133 @@
1
+ import { AgentMiddleware } from "../../types.cjs";
2
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models";
3
+
4
+ //#region src/agents/middleware/provider/openai/moderation.d.ts
5
+
6
+ type ModerationModel = "omni-moderation-latest" | "omni-moderation-2024-09-26" | "text-moderation-latest" | "text-moderation-stable";
7
+ /**
8
+ * Error raised when OpenAI flags content and `exitBehavior` is set to `"error"`.
9
+ */
10
+
11
+ /**
12
+ * Options for configuring the OpenAI Moderation middleware.
13
+ */
14
+ interface OpenAIModerationMiddlewareOptions {
15
+ /**
16
+ * OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
17
+ * @example
18
+ * ```ts
19
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
20
+ * const middleware = openAIModerationMiddleware({ model });
21
+ * const agent = createAgent({
22
+ * model,
23
+ * middleware: [middleware],
24
+ * });
25
+ * ```
26
+ * @example
27
+ * ```ts
28
+ * const middleware = openAIModerationMiddleware({ model: "gpt-4o-mini" });
29
+ * const agent = createAgent({
30
+ * model: "gpt-5",
31
+ * middleware: [middleware],
32
+ * });
33
+ * ```
34
+ */
35
+ model: string | BaseChatModel;
36
+ /**
37
+ * Moderation model to use.
38
+ * @default "omni-moderation-latest"
39
+ */
40
+ moderationModel?: ModerationModel;
41
+ /**
42
+ * Whether to check user input messages.
43
+ * @default true
44
+ */
45
+ checkInput?: boolean;
46
+ /**
47
+ * Whether to check model output messages.
48
+ * @default true
49
+ */
50
+ checkOutput?: boolean;
51
+ /**
52
+ * Whether to check tool result messages.
53
+ * @default false
54
+ */
55
+ checkToolResults?: boolean;
56
+ /**
57
+ * How to handle violations.
58
+ * - `"error"`: Throw an error when content is flagged
59
+ * - `"end"`: End the agent execution and return a violation message
60
+ * - `"replace"`: Replace the flagged content with a violation message
61
+ * @default "end"
62
+ */
63
+ exitBehavior?: "error" | "end" | "replace";
64
+ /**
65
+ * Custom template for violation messages.
66
+ * Available placeholders: `{categories}`, `{category_scores}`, `{original_content}`
67
+ */
68
+ violationMessage?: string;
69
+ }
70
+ /**
71
+ * Middleware that moderates agent traffic using OpenAI's moderation endpoint.
72
+ *
73
+ * This middleware checks messages for content policy violations at different stages:
74
+ * - Input: User messages before they reach the model
75
+ * - Output: AI model responses
76
+ * - Tool results: Results returned from tool executions
77
+ *
78
+ * @param options - Configuration options for the middleware
79
+ * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
80
+ * @param options.moderationModel - Moderation model to use.
81
+ * @param options.checkInput - Whether to check user input messages.
82
+ * @param options.checkOutput - Whether to check model output messages.
83
+ * @param options.checkToolResults - Whether to check tool result messages.
84
+ * @param options.exitBehavior - How to handle violations.
85
+ * @param options.violationMessage - Custom template for violation messages.
86
+ * @returns Middleware function that can be used to moderate agent traffic.
87
+ *
88
+ * @example Using model instance
89
+ * ```ts
90
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
91
+ *
92
+ * const middleware = openAIModerationMiddleware({
93
+ * checkInput: true,
94
+ * checkOutput: true,
95
+ * exitBehavior: "end"
96
+ * });
97
+ *
98
+ * const agent = createAgent({
99
+ * model: "openai:gpt-4o",
100
+ * tools: [...],
101
+ * middleware: [middleware],
102
+ * });
103
+ * ```
104
+ *
105
+ * @example Using model name
106
+ * ```ts
107
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
108
+ *
109
+ * const middleware = openAIModerationMiddleware({
110
+ * model: "gpt-4o-mini",
111
+ * checkInput: true,
112
+ * checkOutput: true,
113
+ * exitBehavior: "end"
114
+ * });
115
+ *
116
+ * const agent = createAgent({
117
+ * model: "openai:gpt-4o",
118
+ * tools: [...],
119
+ * middleware: [middleware],
120
+ * });
121
+ * ```
122
+ *
123
+ * @example Custom violation message
124
+ * ```ts
125
+ * const middleware = openAIModerationMiddleware({
126
+ * violationMessage: "Content flagged: {categories}. Scores: {category_scores}"
127
+ * });
128
+ * ```
129
+ */
130
+ declare function openAIModerationMiddleware(options: OpenAIModerationMiddlewareOptions): AgentMiddleware;
131
+ //#endregion
132
+ export { OpenAIModerationMiddlewareOptions, openAIModerationMiddleware };
133
+ //# sourceMappingURL=moderation.d.cts.map